From 9e9278d1841312f79bd9ee537ad4801374b37ba2 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 30 Nov 2025 20:33:24 +0000 Subject: [PATCH] feat: add SQL window function and CTE parsing support Add experimental support for SQL window functions (OVER clause) and CTEs (WITH clause) to the parser via a local fork of stackql-parser. Window function features: - OVER clause with optional PARTITION BY and ORDER BY - Frame specifications (ROWS/RANGE BETWEEN) - All standard frame boundary types CTE features: - Simple and multiple CTEs - Recursive CTEs with WITH RECURSIVE - Optional column list specification This is a proof-of-concept implementation for testing feasibility. The changes are in a local fork at internal/stackql-parser-fork/ with a replace directive in go.mod. Includes: - Parser unit tests for both features (all passing) - Documentation of implementation details and next steps --- docs/window-function-cte-implementation.md | 231 + go.mod | 2 + internal/stackql-parser-fork/.codeclimate.yml | 44 + internal/stackql-parser-fork/.dockerignore | 11 + .../stackql-parser-fork/.github/CODEOWNERS | 6 + .../.github/ISSUE_TEMPLATE/bug_report.md | 68 + .../.github/ISSUE_TEMPLATE/feature_request.md | 14 + .../.github/ISSUE_TEMPLATE/question.md | 9 + .../.github/workflows/check_formatting.yml | 21 + .../.github/workflows/check_make_parser.yml | 35 + .../.github/workflows/check_make_visitor.yml | 35 + .../.github/workflows/cluster_endtoend.yml | 47 + .../cluster_initial_sharding_multi.yml | 20 + .../.github/workflows/cluster_vtctl_web.yml | 22 + .../.github/workflows/create_release.yml | 40 + .../.github/workflows/e2e_race.yml | 35 + .../.github/workflows/endtoend.yml | 39 + .../.github/workflows/golangci-linter.yml | 23 + .../workflows/legacy_local_example.yml | 54 + .../.github/workflows/local_example.yml | 54 + .../.github/workflows/misc_test_docker.yml | 20 + .../.github/workflows/region_example.yml | 55 + .../.github/workflows/sonar_analysis.yml | 53 + .../.github/workflows/unit.yml | 90 + .../.github/workflows/unit_race.yml | 39 + internal/stackql-parser-fork/.gitignore | 87 + internal/stackql-parser-fork/.gitmodules | 0 .../stackql-parser-fork/CODE_OF_CONDUCT.md | 3 + internal/stackql-parser-fork/CONTRIBUTING.md | 10 + internal/stackql-parser-fork/LICENSE | 202 + internal/stackql-parser-fork/MAINTAINERS.md | 12 + internal/stackql-parser-fork/README.md | 29 + .../cicd/build_scripts/01_ast_rebuild.sh | 9 + .../config/gomysql.pc.tmpl | 2 + .../stackql-parser-fork/config/init_db.sql | 104 + .../config/mycnf/default-fast.cnf | 22 + .../config/mycnf/default.cnf | 37 + .../config/mycnf/master_mariadb100.cnf | 43 + .../config/mycnf/master_mariadb101.cnf | 42 + .../config/mycnf/master_mariadb102.cnf | 37 + .../config/mycnf/master_mariadb103.cnf | 30 + .../config/mycnf/master_mariadb104.cnf | 30 + .../config/mycnf/master_mysql56.cnf | 42 + .../config/mycnf/master_mysql57.cnf | 35 + .../config/mycnf/master_mysql80.cnf | 31 + .../stackql-parser-fork/config/mycnf/sbr.cnf | 3 + .../config/tablet/default.yaml | 115 + .../config/zk-client-dev.json | 4 + .../stackql-parser-fork/config/zkcfg/zoo.cfg | 9 + internal/stackql-parser-fork/go.mod | 41 + internal/stackql-parser-fork/go.sum | 124 + internal/stackql-parser-fork/go/README.md | 19 + .../stackql-parser-fork/go/bytes2/buffer.go | 65 + .../go/bytes2/buffer_test.go | 38 + .../stackql-parser-fork/go/cache/lru_cache.go | 284 + .../go/cache/lru_cache_test.go | 303 + .../stackql-parser-fork/go/cache/perf_test.go | 40 + internal/stackql-parser-fork/go/exit/exit.go | 103 + .../stackql-parser-fork/go/exit/exit_test.go | 100 + internal/stackql-parser-fork/go/hack/hack.go | 39 + .../stackql-parser-fork/go/hack/hack_test.go | 38 + .../stackql-parser-fork/go/sqlescape/ids.go | 37 + .../go/sqlescape/ids_test.go | 36 + .../go/sqltypes/bind_variables.go | 329 + .../go/sqltypes/bind_variables_test.go | 644 ++ .../go/sqltypes/event_token.go | 42 + .../go/sqltypes/event_token_test.go | 80 + .../go/sqltypes/plan_value.go | 267 + .../go/sqltypes/plan_value_test.go | 314 + .../stackql-parser-fork/go/sqltypes/proto3.go | 218 + .../go/sqltypes/proto3_test.go | 247 + .../go/sqltypes/query_response.go | 44 + .../stackql-parser-fork/go/sqltypes/result.go | 221 + .../go/sqltypes/result_test.go | 298 + .../go/sqltypes/testing.go | 160 + .../stackql-parser-fork/go/sqltypes/type.go | 313 + .../go/sqltypes/type_test.go | 435 + .../stackql-parser-fork/go/sqltypes/value.go | 398 + .../go/sqltypes/value_test.go | 412 + internal/stackql-parser-fork/go/tb/error.go | 138 + .../stackql-parser-fork/go/test/utils/diff.go | 83 + .../stackql-parser-fork/go/test/utils/sort.go | 13 + internal/stackql-parser-fork/go/trace/fake.go | 48 + .../go/trace/opentracing.go | 135 + .../go/trace/opentracing_test.go | 41 + .../go/trace/plugin_datadog.go | 56 + .../go/trace/plugin_jaeger.go | 102 + .../stackql-parser-fork/go/trace/trace.go | 171 + .../go/trace/trace_test.go | 145 + .../stackql-parser-fork/go/trace/utils.go | 33 + internal/stackql-parser-fork/go/vt/env/env.go | 102 + .../stackql-parser-fork/go/vt/env/env_test.go | 45 + .../stackql-parser-fork/go/vt/hook/hook.go | 286 + internal/stackql-parser-fork/go/vt/log/log.go | 78 + .../go/vt/logz/logz_utils.go | 155 + .../go/vt/proto/automation/automation.pb.go | 585 ++ .../automationservice/automationservice.pb.go | 167 + .../go/vt/proto/binlogdata/binlogdata.pb.go | 2136 ++++ .../proto/binlogservice/binlogservice.pb.go | 223 + .../go/vt/proto/logutil/logutil.pb.go | 157 + .../go/vt/proto/mysqlctl/mysqlctl.pb.go | 617 ++ .../go/vt/proto/query/query.pb.go | 4509 ++++++++ .../vt/proto/queryservice/queryservice.pb.go | 1235 +++ .../replicationdata/replicationdata.pb.go | 185 + .../go/vt/proto/tableacl/tableacl.pb.go | 160 + .../tabletmanagerdata/tabletmanagerdata.pb.go | 4529 ++++++++ .../tabletmanagerservice.pb.go | 2117 ++++ .../proto/throttlerdata/throttlerdata.pb.go | 723 ++ .../throttlerservice/throttlerservice.pb.go | 299 + .../go/vt/proto/topodata/topodata.pb.go | 1465 +++ .../go/vt/proto/vschema/vschema.pb.go | 609 ++ .../go/vt/proto/vtctldata/vtctldata.pb.go | 303 + .../vt/proto/vtctlservice/vtctlservice.pb.go | 150 + .../go/vt/proto/vtgate/vtgate.pb.go | 1055 ++ .../proto/vtgateservice/vtgateservice.pb.go | 360 + .../go/vt/proto/vtrpc/vtrpc.pb.go | 471 + .../go/vt/proto/vttest/vttest.pb.go | 254 + .../go/vt/proto/vttime/vttime.pb.go | 89 + .../vt/proto/vtworkerdata/vtworkerdata.pb.go | 125 + .../vtworkerservice/vtworkerservice.pb.go | 154 + .../go/vt/proto/workflow/workflow.pb.go | 388 + .../go/vt/sqlparser/analyzer.go | 387 + .../go/vt/sqlparser/analyzer_test.go | 501 + .../go/vt/sqlparser/ast.go | 2240 ++++ .../go/vt/sqlparser/ast_funcs.go | 893 ++ .../go/vt/sqlparser/ast_test.go | 800 ++ .../go/vt/sqlparser/comments.go | 323 + .../go/vt/sqlparser/comments_test.go | 409 + .../go/vt/sqlparser/constants.go | 204 + .../go/vt/sqlparser/cte_test.go | 51 + .../go/vt/sqlparser/encodable.go | 99 + .../go/vt/sqlparser/encodable_test.go | 73 + .../go/vt/sqlparser/expression_converter.go | 71 + .../go/vt/sqlparser/expression_rewriting.go | 220 + .../vt/sqlparser/expression_rewriting_test.go | 145 + .../go/vt/sqlparser/expressions_test.go | 102 + .../go/vt/sqlparser/external_visitor.go | 125 + .../go/vt/sqlparser/fuzz.go | 25 + .../go/vt/sqlparser/impossible_query.go | 42 + .../go/vt/sqlparser/like_filter.go | 50 + .../go/vt/sqlparser/like_filter_test.go | 90 + .../go/vt/sqlparser/normalizer.go | 244 + .../go/vt/sqlparser/normalizer_test.go | 257 + .../go/vt/sqlparser/parse_next_test.go | 217 + .../go/vt/sqlparser/parse_test.go | 2923 ++++++ .../go/vt/sqlparser/parsed_query.go | 136 + .../go/vt/sqlparser/parsed_query_test.go | 157 + .../go/vt/sqlparser/parser.go | 234 + .../go/vt/sqlparser/precedence.go | 96 + .../go/vt/sqlparser/precedence_test.go | 181 + .../go/vt/sqlparser/random_expr.go | 320 + .../go/vt/sqlparser/redact_query.go | 35 + .../go/vt/sqlparser/redact_query_test.go | 33 + .../go/vt/sqlparser/rewriter.go | 1554 +++ .../go/vt/sqlparser/rewriter_api.go | 91 + .../go/vt/sqlparser/set_normalizer.go | 81 + .../go/vt/sqlparser/set_normalizer_test.go | 84 + .../go/vt/sqlparser/sql.go | 9094 +++++++++++++++++ .../stackql-parser-fork/go/vt/sqlparser/sql.y | 4392 ++++++++ .../sqlparser/test_queries/django_queries.txt | 290 + .../go/vt/sqlparser/token.go | 1103 ++ .../go/vt/sqlparser/token_test.go | 207 + .../go/vt/sqlparser/tracked_buffer.go | 220 + .../go/vt/sqlparser/truncate_query.go | 52 + .../go/vt/sqlparser/visitorgen/ast_walker.go | 130 + .../sqlparser/visitorgen/ast_walker_test.go | 239 + .../go/vt/sqlparser/visitorgen/main/main.go | 164 + .../go/vt/sqlparser/visitorgen/sast.go | 178 + .../sqlparser/visitorgen/struct_producer.go | 253 + .../visitorgen/struct_producer_test.go | 423 + .../go/vt/sqlparser/visitorgen/transformer.go | 95 + .../sqlparser/visitorgen/transformer_test.go | 110 + .../sqlparser/visitorgen/visitor_emitter.go | 76 + .../visitorgen/visitor_emitter_test.go | 92 + .../go/vt/sqlparser/visitorgen/visitorgen.go | 33 + .../go/vt/sqlparser/window_test.go | 66 + .../go/vt/vterrors/LICENSE | 23 + .../go/vt/vterrors/aggregate.go | 106 + .../go/vt/vterrors/aggregate_test.go | 103 + .../go/vt/vterrors/errors_test.go | 307 + .../go/vt/vterrors/grpc.go | 143 + .../go/vt/vterrors/proto3.go | 52 + .../go/vt/vterrors/proto3_test.go | 83 + .../go/vt/vterrors/stack.go | 163 + .../go/vt/vterrors/vterrors.go | 311 + .../go/vt/vtgate/evalengine/arithmetic.go | 820 ++ .../vt/vtgate/evalengine/arithmetic_test.go | 1482 +++ .../go/vt/vtgate/evalengine/expressions.go | 294 + .../vt/vtgate/evalengine/expressions_test.go | 107 + internal/stackql-parser-fork/log/.gitignore | 2 + .../stackql-parser-fork/misc/git/commit-msg | 20 + .../misc/git/commit-msg.bugnumber | 65 + .../misc/git/commit-msg.signoff | 63 + .../misc/git/hooks/checkstyle | 44 + .../stackql-parser-fork/misc/git/hooks/gofmt | 63 + .../misc/git/hooks/goimports | 38 + .../misc/git/hooks/golangci-lint | 36 + .../stackql-parser-fork/misc/git/hooks/golint | 79 + .../stackql-parser-fork/misc/git/hooks/govet | 54 + .../misc/git/hooks/shellcheck | 48 + .../misc/git/hooks/staticcheck | 70 + .../stackql-parser-fork/misc/git/hooks/tslint | 56 + .../misc/git/hooks/visitorgen | 18 + .../stackql-parser-fork/misc/git/pre-commit | 20 + .../misc/git/prepare-commit-msg.bugnumber | 44 + internal/stackql-parser-fork/misc/git/ps1 | 12 + internal/stackql-parser-fork/misc/gofmt-all | 3 + .../stackql-parser-fork/misc/parse_cover.py | 45 + internal/stackql-parser-fork/proto/README.md | 54 + .../proto/automation.proto | 97 + .../proto/automationservice.proto | 33 + .../proto/binlogdata.proto | 422 + .../proto/binlogservice.proto | 37 + .../stackql-parser-fork/proto/logutil.proto | 46 + .../stackql-parser-fork/proto/mysqlctl.proto | 56 + .../stackql-parser-fork/proto/query.proto | 866 ++ .../proto/queryservice.proto | 104 + .../proto/replicationdata.proto | 40 + .../stackql-parser-fork/proto/tableacl.proto | 36 + .../proto/tabletmanagerdata.proto | 520 + .../proto/tabletmanagerservice.proto | 189 + .../proto/throttlerdata.proto | 185 + .../proto/throttlerservice.proto | 52 + .../stackql-parser-fork/proto/topodata.proto | 408 + .../stackql-parser-fork/proto/vschema.proto | 118 + .../stackql-parser-fork/proto/vtctldata.proto | 62 + .../proto/vtctlservice.proto | 30 + .../stackql-parser-fork/proto/vtgate.proto | 256 + .../proto/vtgateservice.proto | 57 + .../stackql-parser-fork/proto/vtrpc.proto | 262 + .../stackql-parser-fork/proto/vttest.proto | 92 + .../stackql-parser-fork/proto/vttime.proto | 30 + .../proto/vtworkerdata.proto | 34 + .../proto/vtworkerservice.proto | 32 + .../stackql-parser-fork/proto/workflow.proto | 112 + .../tools/all_test_for_coverage.sh | 68 + .../tools/bootstrap_web.sh | 57 + .../tools/build_version_flags.sh | 40 + .../tools/check_make_parser.sh | 43 + .../tools/coverage-go/Readme.md | 6 + .../tools/coverage-go/mysqlctl_test.go | 22 + .../tools/coverage-go/vtctl_test.go | 24 + .../tools/coverage-go/vtctlclient_test.go | 7 + .../tools/coverage-go/vtctld_test.go | 22 + .../tools/coverage-go/vtgate_test.go | 22 + .../tools/coverage-go/vttablet_test.go | 25 + .../tools/coverage-go/vtworker_test.go | 22 + .../tools/coverage-go/vtworkerclient_test.go | 22 + .../tools/dependency_check.sh | 34 + .../stackql-parser-fork/tools/e2e_go_test.sh | 4 + .../tools/e2e_test_cluster.sh | 38 + .../tools/e2e_test_race.sh | 55 + .../tools/e2e_test_runner.sh | 69 + .../tools/generate_web_artifacts.sh | 29 + .../tools/make-release-packages.sh | 98 + .../stackql-parser-fork/tools/preinstall.sh | 9 + internal/stackql-parser-fork/tools/pylint.sh | 34 + .../tools/shell_functions.inc | 68 + internal/stackql-parser-fork/tools/statsd.go | 138 + internal/stackql-parser-fork/tools/tools.go | 30 + .../tools/unit_test_race.sh | 63 + .../tools/unit_test_runner.sh | 72 + 262 files changed, 73225 insertions(+) create mode 100644 docs/window-function-cte-implementation.md create mode 100644 internal/stackql-parser-fork/.codeclimate.yml create mode 100644 internal/stackql-parser-fork/.dockerignore create mode 100644 internal/stackql-parser-fork/.github/CODEOWNERS create mode 100644 internal/stackql-parser-fork/.github/ISSUE_TEMPLATE/bug_report.md create mode 100644 internal/stackql-parser-fork/.github/ISSUE_TEMPLATE/feature_request.md create mode 100644 internal/stackql-parser-fork/.github/ISSUE_TEMPLATE/question.md create mode 100644 internal/stackql-parser-fork/.github/workflows/check_formatting.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/check_make_parser.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/check_make_visitor.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/cluster_endtoend.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/cluster_initial_sharding_multi.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/cluster_vtctl_web.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/create_release.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/e2e_race.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/endtoend.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/golangci-linter.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/legacy_local_example.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/local_example.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/misc_test_docker.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/region_example.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/sonar_analysis.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/unit.yml create mode 100644 internal/stackql-parser-fork/.github/workflows/unit_race.yml create mode 100644 internal/stackql-parser-fork/.gitignore create mode 100644 internal/stackql-parser-fork/.gitmodules create mode 100644 internal/stackql-parser-fork/CODE_OF_CONDUCT.md create mode 100644 internal/stackql-parser-fork/CONTRIBUTING.md create mode 100644 internal/stackql-parser-fork/LICENSE create mode 100644 internal/stackql-parser-fork/MAINTAINERS.md create mode 100644 internal/stackql-parser-fork/README.md create mode 100755 internal/stackql-parser-fork/cicd/build_scripts/01_ast_rebuild.sh create mode 100644 internal/stackql-parser-fork/config/gomysql.pc.tmpl create mode 100644 internal/stackql-parser-fork/config/init_db.sql create mode 100644 internal/stackql-parser-fork/config/mycnf/default-fast.cnf create mode 100644 internal/stackql-parser-fork/config/mycnf/default.cnf create mode 100644 internal/stackql-parser-fork/config/mycnf/master_mariadb100.cnf create mode 100644 internal/stackql-parser-fork/config/mycnf/master_mariadb101.cnf create mode 100644 internal/stackql-parser-fork/config/mycnf/master_mariadb102.cnf create mode 100644 internal/stackql-parser-fork/config/mycnf/master_mariadb103.cnf create mode 100644 internal/stackql-parser-fork/config/mycnf/master_mariadb104.cnf create mode 100644 internal/stackql-parser-fork/config/mycnf/master_mysql56.cnf create mode 100644 internal/stackql-parser-fork/config/mycnf/master_mysql57.cnf create mode 100644 internal/stackql-parser-fork/config/mycnf/master_mysql80.cnf create mode 100644 internal/stackql-parser-fork/config/mycnf/sbr.cnf create mode 100644 internal/stackql-parser-fork/config/tablet/default.yaml create mode 100644 internal/stackql-parser-fork/config/zk-client-dev.json create mode 100644 internal/stackql-parser-fork/config/zkcfg/zoo.cfg create mode 100644 internal/stackql-parser-fork/go.mod create mode 100644 internal/stackql-parser-fork/go.sum create mode 100644 internal/stackql-parser-fork/go/README.md create mode 100644 internal/stackql-parser-fork/go/bytes2/buffer.go create mode 100644 internal/stackql-parser-fork/go/bytes2/buffer_test.go create mode 100644 internal/stackql-parser-fork/go/cache/lru_cache.go create mode 100644 internal/stackql-parser-fork/go/cache/lru_cache_test.go create mode 100644 internal/stackql-parser-fork/go/cache/perf_test.go create mode 100644 internal/stackql-parser-fork/go/exit/exit.go create mode 100644 internal/stackql-parser-fork/go/exit/exit_test.go create mode 100644 internal/stackql-parser-fork/go/hack/hack.go create mode 100644 internal/stackql-parser-fork/go/hack/hack_test.go create mode 100644 internal/stackql-parser-fork/go/sqlescape/ids.go create mode 100644 internal/stackql-parser-fork/go/sqlescape/ids_test.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/bind_variables.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/bind_variables_test.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/event_token.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/event_token_test.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/plan_value.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/plan_value_test.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/proto3.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/proto3_test.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/query_response.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/result.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/result_test.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/testing.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/type.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/type_test.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/value.go create mode 100644 internal/stackql-parser-fork/go/sqltypes/value_test.go create mode 100644 internal/stackql-parser-fork/go/tb/error.go create mode 100644 internal/stackql-parser-fork/go/test/utils/diff.go create mode 100644 internal/stackql-parser-fork/go/test/utils/sort.go create mode 100644 internal/stackql-parser-fork/go/trace/fake.go create mode 100644 internal/stackql-parser-fork/go/trace/opentracing.go create mode 100644 internal/stackql-parser-fork/go/trace/opentracing_test.go create mode 100644 internal/stackql-parser-fork/go/trace/plugin_datadog.go create mode 100644 internal/stackql-parser-fork/go/trace/plugin_jaeger.go create mode 100644 internal/stackql-parser-fork/go/trace/trace.go create mode 100644 internal/stackql-parser-fork/go/trace/trace_test.go create mode 100644 internal/stackql-parser-fork/go/trace/utils.go create mode 100644 internal/stackql-parser-fork/go/vt/env/env.go create mode 100644 internal/stackql-parser-fork/go/vt/env/env_test.go create mode 100644 internal/stackql-parser-fork/go/vt/hook/hook.go create mode 100644 internal/stackql-parser-fork/go/vt/log/log.go create mode 100644 internal/stackql-parser-fork/go/vt/logz/logz_utils.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/automation/automation.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/automationservice/automationservice.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/binlogdata/binlogdata.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/binlogservice/binlogservice.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/logutil/logutil.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/mysqlctl/mysqlctl.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/query/query.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/queryservice/queryservice.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/replicationdata/replicationdata.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/tableacl/tableacl.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/throttlerdata/throttlerdata.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/throttlerservice/throttlerservice.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/topodata/topodata.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/vschema/vschema.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/vtctldata/vtctldata.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/vtctlservice/vtctlservice.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/vtgate/vtgate.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/vtgateservice/vtgateservice.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/vtrpc/vtrpc.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/vttest/vttest.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/vttime/vttime.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/vtworkerdata/vtworkerdata.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/vtworkerservice/vtworkerservice.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/proto/workflow/workflow.pb.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/analyzer.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/analyzer_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/ast.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/ast_funcs.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/ast_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/comments.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/comments_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/constants.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/cte_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/encodable.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/encodable_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/expression_converter.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/expression_rewriting.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/expression_rewriting_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/expressions_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/external_visitor.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/fuzz.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/impossible_query.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/like_filter.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/like_filter_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/normalizer.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/normalizer_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/parse_next_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/parse_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/parsed_query.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/parsed_query_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/parser.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/precedence.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/precedence_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/random_expr.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/redact_query.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/redact_query_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/rewriter.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/rewriter_api.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/set_normalizer.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/set_normalizer_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/sql.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/sql.y create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/test_queries/django_queries.txt create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/token.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/token_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/tracked_buffer.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/truncate_query.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/ast_walker.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/ast_walker_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/main/main.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/sast.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/struct_producer.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/struct_producer_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/transformer.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/transformer_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/visitor_emitter.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/visitor_emitter_test.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/visitorgen.go create mode 100644 internal/stackql-parser-fork/go/vt/sqlparser/window_test.go create mode 100644 internal/stackql-parser-fork/go/vt/vterrors/LICENSE create mode 100644 internal/stackql-parser-fork/go/vt/vterrors/aggregate.go create mode 100644 internal/stackql-parser-fork/go/vt/vterrors/aggregate_test.go create mode 100644 internal/stackql-parser-fork/go/vt/vterrors/errors_test.go create mode 100644 internal/stackql-parser-fork/go/vt/vterrors/grpc.go create mode 100644 internal/stackql-parser-fork/go/vt/vterrors/proto3.go create mode 100644 internal/stackql-parser-fork/go/vt/vterrors/proto3_test.go create mode 100644 internal/stackql-parser-fork/go/vt/vterrors/stack.go create mode 100644 internal/stackql-parser-fork/go/vt/vterrors/vterrors.go create mode 100644 internal/stackql-parser-fork/go/vt/vtgate/evalengine/arithmetic.go create mode 100644 internal/stackql-parser-fork/go/vt/vtgate/evalengine/arithmetic_test.go create mode 100644 internal/stackql-parser-fork/go/vt/vtgate/evalengine/expressions.go create mode 100644 internal/stackql-parser-fork/go/vt/vtgate/evalengine/expressions_test.go create mode 100644 internal/stackql-parser-fork/log/.gitignore create mode 100755 internal/stackql-parser-fork/misc/git/commit-msg create mode 100755 internal/stackql-parser-fork/misc/git/commit-msg.bugnumber create mode 100755 internal/stackql-parser-fork/misc/git/commit-msg.signoff create mode 100755 internal/stackql-parser-fork/misc/git/hooks/checkstyle create mode 100755 internal/stackql-parser-fork/misc/git/hooks/gofmt create mode 100755 internal/stackql-parser-fork/misc/git/hooks/goimports create mode 100755 internal/stackql-parser-fork/misc/git/hooks/golangci-lint create mode 100755 internal/stackql-parser-fork/misc/git/hooks/golint create mode 100755 internal/stackql-parser-fork/misc/git/hooks/govet create mode 100755 internal/stackql-parser-fork/misc/git/hooks/shellcheck create mode 100755 internal/stackql-parser-fork/misc/git/hooks/staticcheck create mode 100755 internal/stackql-parser-fork/misc/git/hooks/tslint create mode 100755 internal/stackql-parser-fork/misc/git/hooks/visitorgen create mode 100755 internal/stackql-parser-fork/misc/git/pre-commit create mode 100755 internal/stackql-parser-fork/misc/git/prepare-commit-msg.bugnumber create mode 100755 internal/stackql-parser-fork/misc/git/ps1 create mode 100755 internal/stackql-parser-fork/misc/gofmt-all create mode 100755 internal/stackql-parser-fork/misc/parse_cover.py create mode 100644 internal/stackql-parser-fork/proto/README.md create mode 100644 internal/stackql-parser-fork/proto/automation.proto create mode 100644 internal/stackql-parser-fork/proto/automationservice.proto create mode 100644 internal/stackql-parser-fork/proto/binlogdata.proto create mode 100644 internal/stackql-parser-fork/proto/binlogservice.proto create mode 100644 internal/stackql-parser-fork/proto/logutil.proto create mode 100644 internal/stackql-parser-fork/proto/mysqlctl.proto create mode 100644 internal/stackql-parser-fork/proto/query.proto create mode 100644 internal/stackql-parser-fork/proto/queryservice.proto create mode 100644 internal/stackql-parser-fork/proto/replicationdata.proto create mode 100644 internal/stackql-parser-fork/proto/tableacl.proto create mode 100644 internal/stackql-parser-fork/proto/tabletmanagerdata.proto create mode 100644 internal/stackql-parser-fork/proto/tabletmanagerservice.proto create mode 100644 internal/stackql-parser-fork/proto/throttlerdata.proto create mode 100644 internal/stackql-parser-fork/proto/throttlerservice.proto create mode 100644 internal/stackql-parser-fork/proto/topodata.proto create mode 100644 internal/stackql-parser-fork/proto/vschema.proto create mode 100644 internal/stackql-parser-fork/proto/vtctldata.proto create mode 100644 internal/stackql-parser-fork/proto/vtctlservice.proto create mode 100644 internal/stackql-parser-fork/proto/vtgate.proto create mode 100644 internal/stackql-parser-fork/proto/vtgateservice.proto create mode 100644 internal/stackql-parser-fork/proto/vtrpc.proto create mode 100644 internal/stackql-parser-fork/proto/vttest.proto create mode 100644 internal/stackql-parser-fork/proto/vttime.proto create mode 100644 internal/stackql-parser-fork/proto/vtworkerdata.proto create mode 100644 internal/stackql-parser-fork/proto/vtworkerservice.proto create mode 100644 internal/stackql-parser-fork/proto/workflow.proto create mode 100755 internal/stackql-parser-fork/tools/all_test_for_coverage.sh create mode 100755 internal/stackql-parser-fork/tools/bootstrap_web.sh create mode 100755 internal/stackql-parser-fork/tools/build_version_flags.sh create mode 100755 internal/stackql-parser-fork/tools/check_make_parser.sh create mode 100644 internal/stackql-parser-fork/tools/coverage-go/Readme.md create mode 100644 internal/stackql-parser-fork/tools/coverage-go/mysqlctl_test.go create mode 100644 internal/stackql-parser-fork/tools/coverage-go/vtctl_test.go create mode 100644 internal/stackql-parser-fork/tools/coverage-go/vtctlclient_test.go create mode 100644 internal/stackql-parser-fork/tools/coverage-go/vtctld_test.go create mode 100644 internal/stackql-parser-fork/tools/coverage-go/vtgate_test.go create mode 100644 internal/stackql-parser-fork/tools/coverage-go/vttablet_test.go create mode 100644 internal/stackql-parser-fork/tools/coverage-go/vtworker_test.go create mode 100644 internal/stackql-parser-fork/tools/coverage-go/vtworkerclient_test.go create mode 100755 internal/stackql-parser-fork/tools/dependency_check.sh create mode 100755 internal/stackql-parser-fork/tools/e2e_go_test.sh create mode 100755 internal/stackql-parser-fork/tools/e2e_test_cluster.sh create mode 100755 internal/stackql-parser-fork/tools/e2e_test_race.sh create mode 100755 internal/stackql-parser-fork/tools/e2e_test_runner.sh create mode 100755 internal/stackql-parser-fork/tools/generate_web_artifacts.sh create mode 100755 internal/stackql-parser-fork/tools/make-release-packages.sh create mode 100755 internal/stackql-parser-fork/tools/preinstall.sh create mode 100755 internal/stackql-parser-fork/tools/pylint.sh create mode 100644 internal/stackql-parser-fork/tools/shell_functions.inc create mode 100644 internal/stackql-parser-fork/tools/statsd.go create mode 100644 internal/stackql-parser-fork/tools/tools.go create mode 100755 internal/stackql-parser-fork/tools/unit_test_race.sh create mode 100755 internal/stackql-parser-fork/tools/unit_test_runner.sh diff --git a/docs/window-function-cte-implementation.md b/docs/window-function-cte-implementation.md new file mode 100644 index 00000000..77d000a3 --- /dev/null +++ b/docs/window-function-cte-implementation.md @@ -0,0 +1,231 @@ +# Window Functions and CTE Implementation for StackQL Parser + +## Overview + +This document describes the experimental implementation of SQL window functions and CTEs (Common Table Expressions) in the stackql-parser fork. This is a proof-of-concept implementation to validate the approach before implementing it properly in the main stackql-parser repository. + +## Summary of Changes + +### Files Modified + +The following files in `internal/stackql-parser-fork/go/vt/sqlparser/` were modified: + +1. **ast.go** - Added AST types for window functions and CTEs +2. **sql.y** - Added grammar rules for parsing +3. **token.go** - Added keyword mappings +4. **constants.go** - Added constants for frame types +5. **external_visitor.go** - Added Accept methods for new types + +### New Test Files Created + +- `window_test.go` - Unit tests for window function parsing +- `cte_test.go` - Unit tests for CTE parsing + +## Implementation Details + +### Window Functions + +#### AST Types Added (ast.go) + +```go +// OverClause represents an OVER clause for window functions +OverClause struct { + WindowName ColIdent + WindowSpec *WindowSpec +} + +// WindowSpec represents a window specification +WindowSpec struct { + PartitionBy Exprs + OrderBy OrderBy + Frame *FrameClause +} + +// FrameClause represents a frame clause (ROWS/RANGE) +FrameClause struct { + Unit string // ROWS or RANGE + Start *FramePoint + End *FramePoint +} + +// FramePoint represents a frame boundary +FramePoint struct { + Type string // UNBOUNDED PRECEDING, CURRENT ROW, etc. + Expr Expr // for N PRECEDING or N FOLLOWING +} +``` + +The `FuncExpr` struct was extended with an `Over *OverClause` field. + +#### Grammar Rules Added (sql.y) + +- `over_clause_opt` - Optional OVER clause after function calls +- `window_spec` - Window specification (PARTITION BY, ORDER BY, frame) +- `partition_by_opt` - Optional PARTITION BY clause +- `frame_clause_opt` - Optional frame specification (ROWS/RANGE) +- `frame_point` - Frame boundary points + +#### Tokens Added + +- `OVER`, `ROWS`, `RANGE`, `UNBOUNDED`, `PRECEDING`, `FOLLOWING`, `CURRENT`, `ROW` + +#### Supported Syntax + +```sql +-- Simple window function +SELECT SUM(count) OVER () FROM t + +-- With ORDER BY +SELECT RANK() OVER (ORDER BY count DESC) FROM t + +-- With PARTITION BY +SELECT SUM(count) OVER (PARTITION BY category) FROM t + +-- With PARTITION BY and ORDER BY +SELECT SUM(count) OVER (PARTITION BY category ORDER BY name) FROM t + +-- With frame clause +SELECT SUM(count) OVER (ORDER BY id ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) FROM t + +-- Multiple window functions +SELECT SUM(x) OVER (), COUNT(*) OVER (ORDER BY y) FROM t + +-- Window functions with aggregates +SELECT serviceName, COUNT(*) as count, SUM(COUNT(*)) OVER () as total FROM t GROUP BY serviceName +``` + +### CTEs (Common Table Expressions) + +#### AST Types Added (ast.go) + +```go +// With represents a WITH clause (CTE) +With struct { + Recursive bool + CTEs []*CommonTableExpr +} + +// CommonTableExpr represents a single CTE definition +CommonTableExpr struct { + Name TableIdent + Columns Columns + Subquery *Subquery +} +``` + +The `Select` struct was extended with a `With *With` field. + +#### Grammar Rules Added (sql.y) + +- `cte_list` - List of CTE definitions +- `cte` - Single CTE definition +- Extended `base_select` with WITH clause alternatives + +#### Tokens Added + +- `RECURSIVE` + +#### Supported Syntax + +```sql +-- Simple CTE +WITH cte AS (SELECT id FROM t) SELECT * FROM cte + +-- CTE with column list +WITH cte (col1, col2) AS (SELECT id, name FROM t) SELECT * FROM cte + +-- Multiple CTEs +WITH cte1 AS (SELECT id FROM t1), cte2 AS (SELECT id FROM t2) SELECT * FROM cte1 JOIN cte2 + +-- Recursive CTE +WITH RECURSIVE cte AS (SELECT 1 AS n UNION ALL SELECT n + 1 FROM cte WHERE n < 10) SELECT * FROM cte + +-- CTE with window function +WITH sales AS (SELECT product, amount FROM orders) +SELECT product, SUM(amount) OVER (ORDER BY product) FROM sales +``` + +## Key Design Decisions + +### Window Functions + +1. **OVER clause placement**: Added `over_clause_opt` to the `function_call_generic` rule to allow OVER on any generic function call. + +2. **Frame specification**: Supports both ROWS and RANGE frame types with: + - UNBOUNDED PRECEDING + - UNBOUNDED FOLLOWING + - CURRENT ROW + - N PRECEDING + - N FOLLOWING + +3. **Named windows**: The grammar supports `OVER window_name` syntax for referencing named windows (though WINDOW clause definition is not yet implemented). + +### CTEs + +1. **Grammar approach**: Instead of using an optional `with_clause_opt` rule that includes an empty alternative (which caused grammar conflicts), we directly added WITH alternatives to the `base_select` rule. + +2. **Recursive CTEs**: Supported via the `WITH RECURSIVE` syntax. + +3. **Column lists**: Optional column list specification for CTEs is supported. + +## Parser Conflicts + +The implementation increases reduce/reduce conflicts from 461 to 464. This is acceptable for an experimental implementation. + +## Testing + +### Unit Tests + +All parser unit tests pass: +- 8 window function tests +- 5 CTE tests + +### Running Tests + +```bash +cd /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser +go test -run "TestWindowFunctions|TestCTEs" -v +``` + +## Next Steps for Production Implementation + +1. **Upstream the changes**: Apply these changes to the main `stackql-parser` repository. + +2. **Execution layer**: Implement window function and CTE execution in the SQLite backend: + - SQLite already supports window functions and CTEs natively + - Need to ensure the parsed AST is correctly converted to SQL for execution + +3. **Named Windows**: Add support for the `WINDOW` clause to define named windows: + ```sql + SELECT SUM(x) OVER w FROM t WINDOW w AS (PARTITION BY y ORDER BY z) + ``` + +4. **Additional window functions**: The parser supports any function name with OVER. Consider adding specific handling for: + - ROW_NUMBER() + - RANK() + - DENSE_RANK() + - LEAD() + - LAG() + - FIRST_VALUE() + - LAST_VALUE() + - NTH_VALUE() + - NTILE() + +5. **Robot tests**: Add integration tests that verify window functions and CTEs work end-to-end with actual cloud provider data. + +## Known Limitations + +1. **No execution support**: This implementation only adds parsing support. The execution layer still needs to be updated to handle window functions and CTEs. + +2. **Pre-existing test failures**: The parser has some pre-existing test failures unrelated to window functions/CTEs (table name quoting, OR operator rendering). These should be addressed separately. + +3. **Build complexity**: The local fork approach with replace directive in go.mod can cause directory conflicts when building the main stackql binary. For production, the changes should be upstreamed to stackql-parser. + +## Files to Review + +- `internal/stackql-parser-fork/go/vt/sqlparser/ast.go` - AST type definitions +- `internal/stackql-parser-fork/go/vt/sqlparser/sql.y` - Grammar rules (lines ~3074-3410) +- `internal/stackql-parser-fork/go/vt/sqlparser/token.go` - Keyword mappings +- `internal/stackql-parser-fork/go/vt/sqlparser/constants.go` - Frame type constants +- `internal/stackql-parser-fork/go/vt/sqlparser/window_test.go` - Window function tests +- `internal/stackql-parser-fork/go/vt/sqlparser/cte_test.go` - CTE tests diff --git a/go.mod b/go.mod index 917dbd4e..4a09dd1d 100644 --- a/go.mod +++ b/go.mod @@ -142,3 +142,5 @@ require ( replace github.com/chzyer/readline => github.com/stackql/readline v0.0.2-alpha05 replace github.com/mattn/go-sqlite3 => github.com/stackql/stackql-go-sqlite3 v1.0.4-stackql + +replace github.com/stackql/stackql-parser => ./internal/stackql-parser-fork diff --git a/internal/stackql-parser-fork/.codeclimate.yml b/internal/stackql-parser-fork/.codeclimate.yml new file mode 100644 index 00000000..8639c28e --- /dev/null +++ b/internal/stackql-parser-fork/.codeclimate.yml @@ -0,0 +1,44 @@ +engines: + gofmt: + enabled: true + golint: + enabled: true + govet: + enabled: true + shellcheck: + enabled: true + duplication: + enabled: false + +ratings: + paths: + - "**.go" + - "**.sh" + +checks: + argument-count: + enabled: false + complex-logic: + enabled: false + file-lines: + enabled: false + method-complexity: + enabled: false + method-count: + enabled: false + method-lines: + enabled: false + nested-control-flow: + enabled: false + return-statements: + enabled: false + similar-code: + enabled: false + identical-code: + enabled: false + +# Ignore generated code. +exclude_paths: +- "go/vt/proto/" +- "go/vt/sqlparser/sql.go" +- "py/util/grpc_with_metadata.py" diff --git a/internal/stackql-parser-fork/.dockerignore b/internal/stackql-parser-fork/.dockerignore new file mode 100644 index 00000000..98cca97e --- /dev/null +++ b/internal/stackql-parser-fork/.dockerignore @@ -0,0 +1,11 @@ +Godeps/_workspace/pkg +Godeps/_workspace/bin +_test +java/*/target +java/*/bin +php/vendor +releases +/dist/ +/vthook/ +/bin/ +/vtdataroot/ diff --git a/internal/stackql-parser-fork/.github/CODEOWNERS b/internal/stackql-parser-fork/.github/CODEOWNERS new file mode 100644 index 00000000..b4b82473 --- /dev/null +++ b/internal/stackql-parser-fork/.github/CODEOWNERS @@ -0,0 +1,6 @@ +* @sougou + +/docker/ @derekperkins @dkhenry +/helm/ @derekperkins @dkhenry +/config/mycnf/ @morgo +/go/vt/mysqlctl/mysqld.go @morgo diff --git a/internal/stackql-parser-fork/.github/ISSUE_TEMPLATE/bug_report.md b/internal/stackql-parser-fork/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..828f05fc --- /dev/null +++ b/internal/stackql-parser-fork/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,68 @@ +--- +name: Bug Report +about: You're experiencing an issue with Vitess that is different than the documented behavior. +--- + +When filing a bug, please include the following headings if +possible. Any example text in this template can be deleted. + +#### Overview of the Issue + +A paragraph or two about the issue you're experiencing. + +#### Reproduction Steps + +Steps to reproduce this issue, example: + +1. Deploy the following `vschema`: + + ```javascript + { + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + }, + "tables": { + "user": { + "column_vindexes": [ + { + "column": "user_id", + "name": "hash" + } + ] + } + } + } + ``` + +1. Deploy the following `schema`: + + ```sql + create table user(user_id bigint, name varchar(128), primary key(user_id)); + ``` + +1. Run `SELECT...` +1. View error + +#### Binary version +Example: + +```sh +giaquinti@workspace:~$ vtgate --version +Version: a95cf5d (Git branch 'HEAD') built on Fri May 18 16:54:26 PDT 2018 by giaquinti@workspace using go1.10 linux/amd64 +``` + +#### Operating system and Environment details + +OS, Architecture, and any other information you can provide +about the environment. + +- Operating system (output of `cat /etc/os-release`): +- Kernel version (output of `uname -sr`): +- Architecture (output of `uname -m`): + +#### Log Fragments + +Include appropriate log fragments. If the log is longer than a few dozen lines, please +include the URL to the [gist](https://gist.github.com/) of the log instead of posting it in the issue. diff --git a/internal/stackql-parser-fork/.github/ISSUE_TEMPLATE/feature_request.md b/internal/stackql-parser-fork/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..2475a47b --- /dev/null +++ b/internal/stackql-parser-fork/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,14 @@ +--- +name: Feature Request +about: If you have something you think Vitess could improve or add support for. +--- + +Please search the existing issues for relevant feature requests, and use the [reaction feature](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to add upvotes to pre-existing requests. + +#### Feature Description + +A written overview of the feature. + +#### Use Case(s) + +Any relevant use-cases that you see. diff --git a/internal/stackql-parser-fork/.github/ISSUE_TEMPLATE/question.md b/internal/stackql-parser-fork/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 00000000..0d0c7e4f --- /dev/null +++ b/internal/stackql-parser-fork/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,9 @@ +--- +name: Question +about: If you have a question, please check out our other community resources instead of opening an issue. +--- + +Issues on GitHub are intended to be related to bugs or feature requests, so we recommend using our other community resources instead of asking here. + +- [Vitess User Guide](https://vitess.io/user-guide/introduction/) +- Any other questions can be asked in the community [Slack workspace](https://vitess.io/slack) diff --git a/internal/stackql-parser-fork/.github/workflows/check_formatting.yml b/internal/stackql-parser-fork/.github/workflows/check_formatting.yml new file mode 100644 index 00000000..7233082d --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/check_formatting.yml @@ -0,0 +1,21 @@ +name: check_formatting +on: [pull_request] +jobs: + + build: + name: Check Formatting + runs-on: ubuntu-latest + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Run go fmt + run: | + gofmt -l . | grep -vF vendor/ && exit 1 || echo "All files formatted correctly" + diff --git a/internal/stackql-parser-fork/.github/workflows/check_make_parser.yml b/internal/stackql-parser-fork/.github/workflows/check_make_parser.yml new file mode 100644 index 00000000..4f89fe56 --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/check_make_parser.yml @@ -0,0 +1,35 @@ +name: check_make_parser +on: [push, pull_request] +jobs: + + build: + name: Check Make Parser + runs-on: ubuntu-latest + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + - name: Run make minimaltools + run: | + make minimaltools + + - name: check_make_parser + run: | + tools/check_make_parser.sh + diff --git a/internal/stackql-parser-fork/.github/workflows/check_make_visitor.yml b/internal/stackql-parser-fork/.github/workflows/check_make_visitor.yml new file mode 100644 index 00000000..68d8660b --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/check_make_visitor.yml @@ -0,0 +1,35 @@ +name: check_make_visitor +on: [push, pull_request] +jobs: + + build: + name: Check Make Visitor + runs-on: ubuntu-latest + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + - name: Run make minimaltools + run: | + make minimaltools + + - name: check_make_visitor + run: | + misc/git/hooks/visitorgen + diff --git a/internal/stackql-parser-fork/.github/workflows/cluster_endtoend.yml b/internal/stackql-parser-fork/.github/workflows/cluster_endtoend.yml new file mode 100644 index 00000000..401a147e --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/cluster_endtoend.yml @@ -0,0 +1,47 @@ +name: cluster_endtoend +on: [push, pull_request] +jobs: + + build: + runs-on: ubuntu-latest + strategy: + matrix: + name: [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Installing zookeeper and consul + run: | + # Only running for shard 18 and 24 where we need to install consul and zookeeper + if [[ ${{matrix.name}} == 18 || ${{matrix.name}} == 24 ]]; then + make tools + fi + + - name: sharded cluster_endtoend + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard ${{matrix.name}} + diff --git a/internal/stackql-parser-fork/.github/workflows/cluster_initial_sharding_multi.yml b/internal/stackql-parser-fork/.github/workflows/cluster_initial_sharding_multi.yml new file mode 100644 index 00000000..e2f5cf03 --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/cluster_initial_sharding_multi.yml @@ -0,0 +1,20 @@ +name: cluster_initial_sharding_multi +on: [push, pull_request] +jobs: + + build: + name: cluster initial sharding multi + runs-on: ubuntu-latest + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Run initial sharding multi + run: | + go run test.go -print-log initial_sharding_multi \ No newline at end of file diff --git a/internal/stackql-parser-fork/.github/workflows/cluster_vtctl_web.yml b/internal/stackql-parser-fork/.github/workflows/cluster_vtctl_web.yml new file mode 100644 index 00000000..82597559 --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/cluster_vtctl_web.yml @@ -0,0 +1,22 @@ +name: cluster_vtctl_web +on: [push, pull_request] +jobs: + + build: + name: cluster vtctl web + runs-on: ubuntu-latest + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Run vtctl web + run: | + # Running web test inside docker + go run test.go -docker=true -print-log -shard 10 + diff --git a/internal/stackql-parser-fork/.github/workflows/create_release.yml b/internal/stackql-parser-fork/.github/workflows/create_release.yml new file mode 100644 index 00000000..1f8bb410 --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/create_release.yml @@ -0,0 +1,40 @@ +# This creates a {tar.gz,deb,rpm} file and uploads it to a release. +# To trigger this, create a new release.. but make sure that you publish +# it immediately and do not save it as a DRAFT. + +name: Release +on: + release: + types: [created] + +jobs: + build: + name: Create Release + runs-on: ubuntu-latest + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y make ruby ruby-dev + go mod download + sudo gem install fpm + + - name: Make Packages + run: | + ./tools/make-release-packages.sh + + - name: Upload Files + uses: csexton/release-asset-action@master + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + pattern: "releases/*.{tar.gz,rpm,deb}" + diff --git a/internal/stackql-parser-fork/.github/workflows/e2e_race.yml b/internal/stackql-parser-fork/.github/workflows/e2e_race.yml new file mode 100644 index 00000000..5484bf81 --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/e2e_race.yml @@ -0,0 +1,35 @@ +name: e2e_race +on: [push, pull_request] +jobs: + + build: + name: End-to-End Test (Race) + runs-on: ubuntu-latest + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + - name: Run make minimaltools + run: | + make minimaltools + + - name: e2e_race + timeout-minutes: 30 + run: | + make e2e_test_race diff --git a/internal/stackql-parser-fork/.github/workflows/endtoend.yml b/internal/stackql-parser-fork/.github/workflows/endtoend.yml new file mode 100644 index 00000000..5e039acf --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/endtoend.yml @@ -0,0 +1,39 @@ +name: endtoend +on: [push, pull_request] +jobs: + + build: + name: End-to-End Test + runs-on: ubuntu-latest + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + - name: Run make minimaltools + run: | + make minimaltools + + - name: Build + run: | + make build + + - name: endtoend + timeout-minutes: 30 + run: | + tools/e2e_test_runner.sh diff --git a/internal/stackql-parser-fork/.github/workflows/golangci-linter.yml b/internal/stackql-parser-fork/.github/workflows/golangci-linter.yml new file mode 100644 index 00000000..110231f5 --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/golangci-linter.yml @@ -0,0 +1,23 @@ +name: golangci-lint +on: [push,pull_request] +jobs: + + build: + name: Build + runs-on: ubuntu-latest + steps: + + - name: Set up Go 1.13 + uses: actions/setup-go@v1 + with: + go-version: 1.13 + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v1 + + - name: Install golangci-lint + run: curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(go env GOPATH)/bin v1.45.2 + + - name: Run golangci-lint + run: $(go env GOPATH)/bin/golangci-lint run --disable=errcheck --timeout=10m go/... diff --git a/internal/stackql-parser-fork/.github/workflows/legacy_local_example.yml b/internal/stackql-parser-fork/.github/workflows/legacy_local_example.yml new file mode 100644 index 00000000..5780f90c --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/legacy_local_example.yml @@ -0,0 +1,54 @@ +name: local_example +on: [push, pull_request] +jobs: + + build: + name: Legacy local example using ${{ matrix.topo }} on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + topo: [etcd,k8s] + + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + if [ ${{matrix.os}} = "ubuntu-latest" ]; then + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + elif [ ${{matrix.os}} = "macos-latest" ]; then + brew install mysql@5.7 make unzip etcd curl git wget + fi + go mod download + + - name: Run make minimaltools + run: | + make minimaltools + + - name: Build + run: | + make build + + - name: local_example + timeout-minutes: 30 + run: | + export TOPO=${{matrix.topo}} + if [ ${{matrix.os}} = "macos-latest" ]; then + export PATH="/usr/local/opt/mysql@5.7/bin:$PATH" + fi + # Make sure that testing is entirely non-reliant on config + mv config config-moved + eatmydata -- test/legacy_local_example.sh diff --git a/internal/stackql-parser-fork/.github/workflows/local_example.yml b/internal/stackql-parser-fork/.github/workflows/local_example.yml new file mode 100644 index 00000000..47d3c8c2 --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/local_example.yml @@ -0,0 +1,54 @@ +name: local_example +on: [push, pull_request] +jobs: + + build: + name: Local example using ${{ matrix.topo }} on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + topo: [etcd,k8s] + + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + if [ ${{matrix.os}} = "ubuntu-latest" ]; then + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + elif [ ${{matrix.os}} = "macos-latest" ]; then + brew install mysql@5.7 make unzip etcd curl git wget + fi + go mod download + + - name: Run make minimaltools + run: | + make minimaltools + + - name: Build + run: | + make build + + - name: local_example + timeout-minutes: 30 + run: | + export TOPO=${{matrix.topo}} + if [ ${{matrix.os}} = "macos-latest" ]; then + export PATH="/usr/local/opt/mysql@5.7/bin:$PATH" + fi + # Make sure that testing is entirely non-reliant on config + mv config config-moved + eatmydata -- test/local_example.sh diff --git a/internal/stackql-parser-fork/.github/workflows/misc_test_docker.yml b/internal/stackql-parser-fork/.github/workflows/misc_test_docker.yml new file mode 100644 index 00000000..fd617e46 --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/misc_test_docker.yml @@ -0,0 +1,20 @@ +name: misc test +on: [push, pull_request] +jobs: + + build: + name: Misc Test + runs-on: ubuntu-latest + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Run Misc test which requires docker + run: | + go run test.go -docker=true -shard 25 \ No newline at end of file diff --git a/internal/stackql-parser-fork/.github/workflows/region_example.yml b/internal/stackql-parser-fork/.github/workflows/region_example.yml new file mode 100644 index 00000000..0da77d8c --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/region_example.yml @@ -0,0 +1,55 @@ +name: region_example +on: [push, pull_request] +jobs: + + build: + name: Region Sharding example using ${{ matrix.topo }} on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + topo: [etcd] + + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + if [ ${{matrix.os}} = "ubuntu-latest" ]; then + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + elif [ ${{matrix.os}} = "macos-latest" ]; then + brew install mysql@5.7 make unzip etcd curl git wget + fi + go mod download + + - name: Run make minimaltools + run: | + make minimaltools + + - name: Build + run: | + make build + + - name: region_example + timeout-minutes: 30 + run: | + export TOPO=${{matrix.topo}} + if [ ${{matrix.os}} = "macos-latest" ]; then + export PATH="/usr/local/opt/mysql@5.7/bin:$PATH" + fi + # Make sure that testing is entirely non-reliant on config + mv config config-moved + sed -i 's/user\/my-vitess/runner\/work\/vitess\/vitess/g' examples/region_sharding/main_vschema.json #set correct path to countries.json + eatmydata -- test/region_example.sh diff --git a/internal/stackql-parser-fork/.github/workflows/sonar_analysis.yml b/internal/stackql-parser-fork/.github/workflows/sonar_analysis.yml new file mode 100644 index 00000000..3381d93b --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/sonar_analysis.yml @@ -0,0 +1,53 @@ +name: sonar_analysis +on: + push: + branches: + - 'sonartest' +jobs: + + build: + runs-on: ubuntu-latest + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + - name: Execute unit test and cluster endtoend test + run: | + eatmydata -- ./tools/all_test_for_coverage.sh + mkdir report + cp /tmp/*.out ./report/. + + - name: Analyse sonar + run: | + export SONAR_SCANNER_VERSION=4.2.0.1873 + export SONAR_SCANNER_HOME=$HOME/.sonar/sonar-scanner-$SONAR_SCANNER_VERSION-linux + curl --create-dirs -sSLo $HOME/.sonar/sonar-scanner.zip https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-$SONAR_SCANNER_VERSION-linux.zip + unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ + export PATH=$SONAR_SCANNER_HOME/bin:$PATH + export SONAR_SCANNER_OPTS="-server" + + sonar-scanner \ + -Dsonar.projectKey=vitessio \ + -Dsonar.organization=vitess \ + -Dsonar.host.url=https://sonarcloud.io \ + -Dsonar.login=${SONAR_TOKEN} \ + -Dsonar.go.coverage.reportPaths=report/*.out + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} diff --git a/internal/stackql-parser-fork/.github/workflows/unit.yml b/internal/stackql-parser-fork/.github/workflows/unit.yml new file mode 100644 index 00000000..5929e7f3 --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/unit.yml @@ -0,0 +1,90 @@ +name: unit +on: [push, pull_request] +jobs: + + build: + runs-on: ubuntu-latest + strategy: + matrix: + name: [percona56, mysql57, mysql80, mariadb101, mariadb102, mariadb103] + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + export DEBIAN_FRONTEND="noninteractive" + sudo apt-get update + + if [ ${{matrix.name}} = "mysql57" ]; then + sudo apt-get install -y mysql-server mysql-client + else + # Uninstall likely installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + if [ ${{matrix.name}} = "percona56" ]; then + sudo rm -rf /var/lib/mysql + sudo apt install -y gnupg2 + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y percona-server-server-5.6 percona-server-client-5.6 + elif [ ${{matrix.name}} = "mysql80" ]; then + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + elif [ ${{matrix.name}} = "mariadb101" ]; then + sudo apt-get install -y software-properties-common + sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 + sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.1/ubuntu bionic main' + sudo apt update + sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server + elif [ ${{matrix.name}} = "mariadb102" ]; then + sudo apt-get install -y software-properties-common + sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 + sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.2/ubuntu bionic main' + sudo apt update + sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server + elif [ ${{matrix.name}} = "mariadb103" ]; then + sudo apt-get install -y software-properties-common + sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 + sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://nyc2.mirrors.digitalocean.com/mariadb/repo/10.3/ubuntu bionic main' + sudo apt update + sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server + fi + fi + + sudo apt-get install -y make unzip g++ curl git wget ant openjdk-8-jdk eatmydata + sudo service mysql stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + + mkdir -p dist bin + curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + + go mod download + + - name: Run make tools + run: | + make tools + + - name: unit + timeout-minutes: 30 + run: | + eatmydata -- make unit_test diff --git a/internal/stackql-parser-fork/.github/workflows/unit_race.yml b/internal/stackql-parser-fork/.github/workflows/unit_race.yml new file mode 100644 index 00000000..09f1d996 --- /dev/null +++ b/internal/stackql-parser-fork/.github/workflows/unit_race.yml @@ -0,0 +1,39 @@ +name: unit_race +on: [push, pull_request] +jobs: + + build: + name: Unit Test (Race) + runs-on: ubuntu-latest + steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.13 + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ curl git wget eatmydata + sudo service mysql stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + + mkdir -p dist bin + curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + + go mod download + + - name: Run make tools + run: | + make tools + + - name: unit_race + timeout-minutes: 30 + run: | + eatmydata -- make unit_test_race diff --git a/internal/stackql-parser-fork/.gitignore b/internal/stackql-parser-fork/.gitignore new file mode 100644 index 00000000..041443ee --- /dev/null +++ b/internal/stackql-parser-fork/.gitignore @@ -0,0 +1,87 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.py[cod] + +# For mac users +.DS_Store + +# Produced by yacc +*.output + +# vim files +*.swp +tags + +# emacs +*~ + +# Eclipse files +.classpath +.project +.pydevproject +.settings/ + +# intellij files +*.iml +.idea + +# vscode +.vscode/ + +# C build dirs +**/build + +# generated protobuf files +/go/vt/.proto.tmp + +# Godeps files +/Godeps/_workspace/pkg +/Godeps/_workspace/bin + +# Eclipse Java CheckStyle plugin configuration. +/java/*/.checkstyle +# java target files +/java/*/target/ +/java/*/bin/ +# pom generated file +/java/jdbc/dependency-reduced-pom.xml +# Version backups generated by "mvn versions:set". +/java/pom.xml.versionsBackup +/java/*/pom.xml.versionsBackup + +# php downloaded dependencies +/php/composer.phar +/php/vendor + +# vitess.io preview site +/preview-vitess.io/ + +# vitess.io generated site files +/docs/ + +# test.go output files +_test/ +/test/stats.json + +# Go vendored libs +/vendor/*/ + +# release folder +releases + +# Angular2 Bower Libs +/web/vtctld2/.bowerrc~ +/web/vtctld2/bower.json~ +/web/vtctld2/public/bower_components/ + + +# Vagrant +.vagrant + +/dist/ +/vthook/ +/bin/ +/vtdataroot/ +venv + +.scannerwork +report \ No newline at end of file diff --git a/internal/stackql-parser-fork/.gitmodules b/internal/stackql-parser-fork/.gitmodules new file mode 100644 index 00000000..e69de29b diff --git a/internal/stackql-parser-fork/CODE_OF_CONDUCT.md b/internal/stackql-parser-fork/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..841332f7 --- /dev/null +++ b/internal/stackql-parser-fork/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +We adopt the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/internal/stackql-parser-fork/CONTRIBUTING.md b/internal/stackql-parser-fork/CONTRIBUTING.md new file mode 100644 index 00000000..2ac1a2d5 --- /dev/null +++ b/internal/stackql-parser-fork/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# Contributing to Vitess + +## Workflow + +For all contributors, we recommend the standard [GitHub flow](https://guides.github.com/introduction/flow/) +based on [forking and pull requests](https://guides.github.com/activities/forking/). + +For significant changes, please [create an issue](https://github.com/vitessio/vitess/issues) +to let everyone know what you're planning to work on, and to track progress and design decisions. + diff --git a/internal/stackql-parser-fork/LICENSE b/internal/stackql-parser-fork/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/internal/stackql-parser-fork/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/internal/stackql-parser-fork/MAINTAINERS.md b/internal/stackql-parser-fork/MAINTAINERS.md new file mode 100644 index 00000000..cac7dd84 --- /dev/null +++ b/internal/stackql-parser-fork/MAINTAINERS.md @@ -0,0 +1,12 @@ +This page lists all active maintainers and their areas of expertise. This can be used for routing PRs, questions, etc. to the right place. + +The following is the full list, alphabetically ordered. + +* Jeffrey Aven ([jeffreyaven](https://github.com/jeffreyaven)) javen@stackql.io +* Kieran Rimmer ([general-kroll-4-life](https://github.com/general-kroll-4-life)) krimmer@stackql.io + +## Areas of expertise + +### General + +general-kroll-4-life, jeffreyaven diff --git a/internal/stackql-parser-fork/README.md b/internal/stackql-parser-fork/README.md new file mode 100644 index 00000000..973adcdd --- /dev/null +++ b/internal/stackql-parser-fork/README.md @@ -0,0 +1,29 @@ + + +# StackQL Parser + +This is the `stackql` parser, a forked descendent of [vitess](https://github.com/vitessio/vitess); we are deeply grateful to and fully acknowledge this work. + +There are elements of the original work that are not required, but may take some time to excise. + + +## Rebuilding parser + + +```bash +make -C go/vt/sqlparser +``` + + +After changes to the ast: + +```bash +cicd/build_scripts/01_ast_rebuild.sh +``` + + +## License + +Unless otherwise noted, source files are distributed +under the Apache Version 2.0 license found in the LICENSE file. + diff --git a/internal/stackql-parser-fork/cicd/build_scripts/01_ast_rebuild.sh b/internal/stackql-parser-fork/cicd/build_scripts/01_ast_rebuild.sh new file mode 100755 index 00000000..553a274d --- /dev/null +++ b/internal/stackql-parser-fork/cicd/build_scripts/01_ast_rebuild.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CURDIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + +REPOSITORY_ROOT="$(realpath "${CURDIR}/../..")" + +cd "${REPOSITORY_ROOT}/go/vt/sqlparser" + +go run ./visitorgen/main -input=ast.go -output=rewriter.go diff --git a/internal/stackql-parser-fork/config/gomysql.pc.tmpl b/internal/stackql-parser-fork/config/gomysql.pc.tmpl new file mode 100644 index 00000000..ce603b0e --- /dev/null +++ b/internal/stackql-parser-fork/config/gomysql.pc.tmpl @@ -0,0 +1,2 @@ +Name: GoMysql +Description: Flags for using mysql C client in go diff --git a/internal/stackql-parser-fork/config/init_db.sql b/internal/stackql-parser-fork/config/init_db.sql new file mode 100644 index 00000000..64d2382e --- /dev/null +++ b/internal/stackql-parser-fork/config/init_db.sql @@ -0,0 +1,104 @@ +# This file is executed immediately after mysql_install_db, +# to initialize a fresh data directory. + +############################################################################### +# WARNING: This sql is *NOT* safe for production use, +# as it contains default well-known users and passwords. +# Care should be taken to change these users and passwords +# for production. +############################################################################### + +############################################################################### +# Equivalent of mysql_secure_installation +############################################################################### + +# Changes during the init db should not make it to the binlog. +# They could potentially create errant transactions on replicas. +SET sql_log_bin = 0; +# Remove anonymous users. +DELETE FROM mysql.user WHERE User = ''; + +# Disable remote root access (only allow UNIX socket). +DELETE FROM mysql.user WHERE User = 'root' AND Host != 'localhost'; + +# Remove test database. +DROP DATABASE IF EXISTS test; + +############################################################################### +# Vitess defaults +############################################################################### + +# Vitess-internal database. +CREATE DATABASE IF NOT EXISTS _vt; +# Note that definitions of local_metadata and shard_metadata should be the same +# as in production which is defined in go/vt/mysqlctl/metadata_tables.go. +CREATE TABLE IF NOT EXISTS _vt.local_metadata ( + name VARCHAR(255) NOT NULL, + value VARCHAR(255) NOT NULL, + db_name VARBINARY(255) NOT NULL, + PRIMARY KEY (db_name, name) + ) ENGINE=InnoDB; +CREATE TABLE IF NOT EXISTS _vt.shard_metadata ( + name VARCHAR(255) NOT NULL, + value MEDIUMBLOB NOT NULL, + db_name VARBINARY(255) NOT NULL, + PRIMARY KEY (db_name, name) + ) ENGINE=InnoDB; + +# Admin user with all privileges. +CREATE USER 'vt_dba'@'localhost'; +GRANT ALL ON *.* TO 'vt_dba'@'localhost'; +GRANT GRANT OPTION ON *.* TO 'vt_dba'@'localhost'; + +# User for app traffic, with global read-write access. +CREATE USER 'vt_app'@'localhost'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE, + REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, + LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, + SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER + ON *.* TO 'vt_app'@'localhost'; + +# User for app debug traffic, with global read access. +CREATE USER 'vt_appdebug'@'localhost'; +GRANT SELECT, SHOW DATABASES, PROCESS ON *.* TO 'vt_appdebug'@'localhost'; + +# User for administrative operations that need to be executed as non-SUPER. +# Same permissions as vt_app here. +CREATE USER 'vt_allprivs'@'localhost'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE, + REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, + LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, + SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER + ON *.* TO 'vt_allprivs'@'localhost'; + +# User for slave replication connections. +CREATE USER 'vt_repl'@'%'; +GRANT REPLICATION SLAVE ON *.* TO 'vt_repl'@'%'; + +# User for Vitess filtered replication (binlog player). +# Same permissions as vt_app. +CREATE USER 'vt_filtered'@'localhost'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE, + REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, + LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, + SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER + ON *.* TO 'vt_filtered'@'localhost'; + +# User for general MySQL monitoring. +CREATE USER 'vt_monitoring'@'localhost'; +GRANT SELECT, PROCESS, SUPER, REPLICATION CLIENT, RELOAD + ON *.* TO 'vt_monitoring'@'localhost'; +GRANT SELECT, UPDATE, DELETE, DROP + ON performance_schema.* TO 'vt_monitoring'@'localhost'; + +# User for Orchestrator (https://github.com/openark/orchestrator). +CREATE USER 'orc_client_user'@'%' IDENTIFIED BY 'orc_client_user_password'; +GRANT SUPER, PROCESS, REPLICATION SLAVE, RELOAD + ON *.* TO 'orc_client_user'@'%'; +GRANT SELECT + ON _vt.* TO 'orc_client_user'@'%'; + +FLUSH PRIVILEGES; + +RESET SLAVE ALL; +RESET MASTER; diff --git a/internal/stackql-parser-fork/config/mycnf/default-fast.cnf b/internal/stackql-parser-fork/config/mycnf/default-fast.cnf new file mode 100644 index 00000000..1880704e --- /dev/null +++ b/internal/stackql-parser-fork/config/mycnf/default-fast.cnf @@ -0,0 +1,22 @@ +# This sets some unsafe settings specifically for +# the test-suite which is currently MySQL 5.7 based +# In future it should be renamed testsuite.cnf + +innodb_buffer_pool_size = 32M +innodb_flush_log_at_trx_commit = 0 +innodb_log_buffer_size = 1M +innodb_log_file_size = 5M + +# Native AIO tends to run into aio-max-nr limit during test startup. +innodb_use_native_aio = 0 + +key_buffer_size = 2M +sync_binlog=0 +innodb_doublewrite=0 + +# These two settings are required for the testsuite to pass, +# but enabling them does not spark joy. They should be removed +# in the future. See: +# https://github.com/vitessio/vitess/issues/5396 + +sql_mode = STRICT_TRANS_TABLES diff --git a/internal/stackql-parser-fork/config/mycnf/default.cnf b/internal/stackql-parser-fork/config/mycnf/default.cnf new file mode 100644 index 00000000..8facbbe0 --- /dev/null +++ b/internal/stackql-parser-fork/config/mycnf/default.cnf @@ -0,0 +1,37 @@ +# Global configuration that is auto-included for all MySQL/MariaDB versions + +datadir = {{.DataDir}} +innodb_data_home_dir = {{.InnodbDataHomeDir}} +innodb_log_group_home_dir = {{.InnodbLogGroupHomeDir}} +log-error = {{.ErrorLogPath}} +log-bin = {{.BinLogPath}} +relay-log = {{.RelayLogPath}} +relay-log-index = {{.RelayLogIndexPath}} +pid-file = {{.PidFile}} +port = {{.MysqlPort}} + +# all db instances should start in read-only mode - once the db is started and +# fully functional, we'll push it into read-write mode +read-only +server-id = {{.ServerID}} + +# all db instances should skip the slave startup - that way we can do any +# additional configuration (like enabling semi-sync) before we connect to +# the master. +skip_slave_start +socket = {{.SocketFile}} +tmpdir = {{.TmpDir}} + +slow-query-log-file = {{.SlowLogPath}} + +# These are sensible defaults that apply to all MySQL/MariaDB versions + +long_query_time = 2 +slow-query-log +skip-name-resolve +connect_timeout = 30 +innodb_lock_wait_timeout = 20 +max_allowed_packet = 64M +max_connections = 500 + + diff --git a/internal/stackql-parser-fork/config/mycnf/master_mariadb100.cnf b/internal/stackql-parser-fork/config/mycnf/master_mariadb100.cnf new file mode 100644 index 00000000..4772ab3d --- /dev/null +++ b/internal/stackql-parser-fork/config/mycnf/master_mariadb100.cnf @@ -0,0 +1,43 @@ +# This file is auto-included when MariaDB 10.0 is detected. + +# Semi-sync replication is required for automated unplanned failover +# (when the master goes away). Here we just load the plugin so it's +# available if desired, but it's disabled at startup. +# +# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync +# at the proper time when replication is set up, or when masters are +# promoted or demoted. +plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so + +slave_net_timeout = 60 + +# MariaDB 10.0 is unstrict by default +sql_mode = STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION + +# enable strict mode so it's safe to compare sequence numbers across different server IDs. +gtid_strict_mode = 1 +innodb_stats_persistent = 0 + +# When semi-sync is enabled, don't allow fallback to async +# if you get no ack, or have no slaves. This is necessary to +# prevent alternate futures when doing a failover in response to +# a master that becomes unresponsive. +rpl_semi_sync_master_timeout = 1000000000000000000 +rpl_semi_sync_master_wait_no_slave = 1 + + +character_set_server = utf8 +collation_server = utf8_general_ci + +expire_logs_days = 3 + +sync_binlog = 1 +binlog_format = ROW +log_slave_updates +expire_logs_days = 3 + +# In MariaDB the default charset is latin1 + +character_set_server = utf8 +collation_server = utf8_general_ci + diff --git a/internal/stackql-parser-fork/config/mycnf/master_mariadb101.cnf b/internal/stackql-parser-fork/config/mycnf/master_mariadb101.cnf new file mode 100644 index 00000000..245ffdc4 --- /dev/null +++ b/internal/stackql-parser-fork/config/mycnf/master_mariadb101.cnf @@ -0,0 +1,42 @@ +# This file is auto-included when MariaDB 10.1 is detected. + +# Semi-sync replication is required for automated unplanned failover +# (when the master goes away). Here we just load the plugin so it's +# available if desired, but it's disabled at startup. +# +# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync +# at the proper time when replication is set up, or when masters are +# promoted or demoted. +plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so + +slave_net_timeout = 60 + +# MariaDB 10.1 default is only no-engine-substitution and no-auto-create-user +sql_mode = STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION,NO_AUTO_CREATE_USER + +# enable strict mode so it's safe to compare sequence numbers across different server IDs. +gtid_strict_mode = 1 +innodb_stats_persistent = 0 + +# When semi-sync is enabled, don't allow fallback to async +# if you get no ack, or have no slaves. This is necessary to +# prevent alternate futures when doing a failover in response to +# a master that becomes unresponsive. +rpl_semi_sync_master_timeout = 1000000000000000000 +rpl_semi_sync_master_wait_no_slave = 1 + + +character_set_server = utf8 +collation_server = utf8_general_ci + +expire_logs_days = 3 + +sync_binlog = 1 +binlog_format = ROW +log_slave_updates +expire_logs_days = 3 + +# In MariaDB the default charset is latin1 + +character_set_server = utf8 +collation_server = utf8_general_ci diff --git a/internal/stackql-parser-fork/config/mycnf/master_mariadb102.cnf b/internal/stackql-parser-fork/config/mycnf/master_mariadb102.cnf new file mode 100644 index 00000000..2f04d680 --- /dev/null +++ b/internal/stackql-parser-fork/config/mycnf/master_mariadb102.cnf @@ -0,0 +1,37 @@ +# This file is auto-included when MariaDB 10.2 is detected. + +# Semi-sync replication is required for automated unplanned failover +# (when the master goes away). Here we just load the plugin so it's +# available if desired, but it's disabled at startup. +# +# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync +# at the proper time when replication is set up, or when masters are +# promoted or demoted. +plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so + +# enable strict mode so it's safe to compare sequence numbers across different server IDs. +gtid_strict_mode = 1 +innodb_stats_persistent = 0 + +# When semi-sync is enabled, don't allow fallback to async +# if you get no ack, or have no slaves. This is necessary to +# prevent alternate futures when doing a failover in response to +# a master that becomes unresponsive. +rpl_semi_sync_master_timeout = 1000000000000000000 +rpl_semi_sync_master_wait_no_slave = 1 + + +character_set_server = utf8 +collation_server = utf8_general_ci + +expire_logs_days = 3 + +sync_binlog = 1 +binlog_format = ROW +log_slave_updates +expire_logs_days = 3 + +# In MariaDB the default charset is latin1 + +character_set_server = utf8 +collation_server = utf8_general_ci diff --git a/internal/stackql-parser-fork/config/mycnf/master_mariadb103.cnf b/internal/stackql-parser-fork/config/mycnf/master_mariadb103.cnf new file mode 100644 index 00000000..3181e368 --- /dev/null +++ b/internal/stackql-parser-fork/config/mycnf/master_mariadb103.cnf @@ -0,0 +1,30 @@ +# This file is auto-included when MariaDB 10.3 is detected. + +# enable strict mode so it's safe to compare sequence numbers across different server IDs. +gtid_strict_mode = 1 +innodb_stats_persistent = 0 + +# When semi-sync is enabled, don't allow fallback to async +# if you get no ack, or have no slaves. This is necessary to +# prevent alternate futures when doing a failover in response to +# a master that becomes unresponsive. +rpl_semi_sync_master_timeout = 1000000000000000000 +rpl_semi_sync_master_wait_no_slave = 1 + + +character_set_server = utf8 +collation_server = utf8_general_ci + +expire_logs_days = 3 + +sync_binlog = 1 +binlog_format = ROW +log_slave_updates +expire_logs_days = 3 + +# In MariaDB the default charset is latin1 + +character_set_server = utf8 +collation_server = utf8_general_ci + + diff --git a/internal/stackql-parser-fork/config/mycnf/master_mariadb104.cnf b/internal/stackql-parser-fork/config/mycnf/master_mariadb104.cnf new file mode 100644 index 00000000..a1111d85 --- /dev/null +++ b/internal/stackql-parser-fork/config/mycnf/master_mariadb104.cnf @@ -0,0 +1,30 @@ +# This file is auto-included when MariaDB 10.4 is detected. + +# enable strict mode so it's safe to compare sequence numbers across different server IDs. +gtid_strict_mode = 1 +innodb_stats_persistent = 0 + +# When semi-sync is enabled, don't allow fallback to async +# if you get no ack, or have no slaves. This is necessary to +# prevent alternate futures when doing a failover in response to +# a master that becomes unresponsive. +rpl_semi_sync_master_timeout = 1000000000000000000 +rpl_semi_sync_master_wait_no_slave = 1 + + +character_set_server = utf8 +collation_server = utf8_general_ci + +expire_logs_days = 3 + +sync_binlog = 1 +binlog_format = ROW +log_slave_updates +expire_logs_days = 3 + +# In MariaDB the default charset is latin1 + +character_set_server = utf8 +collation_server = utf8_general_ci + + diff --git a/internal/stackql-parser-fork/config/mycnf/master_mysql56.cnf b/internal/stackql-parser-fork/config/mycnf/master_mysql56.cnf new file mode 100644 index 00000000..f38e1c17 --- /dev/null +++ b/internal/stackql-parser-fork/config/mycnf/master_mysql56.cnf @@ -0,0 +1,42 @@ +# This file is auto-included when MySQL 5.6 is detected. + +# MySQL 5.6 does not enable the binary log by default, and +# the default for sync_binlog is unsafe. The format is TABLE, and +# info repositories also default to file. + +sync_binlog = 1 +gtid_mode = ON +binlog_format = ROW +log_slave_updates +enforce_gtid_consistency +expire_logs_days = 3 +master_info_repository = TABLE +relay_log_info_repository = TABLE +relay_log_purge = 1 +relay_log_recovery = 1 +slave_net_timeout = 60 + +# In MySQL 5.6 the default charset is latin1 + +character_set_server = utf8 +collation_server = utf8_general_ci + +# MySQL 5.6 is unstrict by default +sql_mode = STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION + +# Semi-sync replication is required for automated unplanned failover +# (when the master goes away). Here we just load the plugin so it's +# available if desired, but it's disabled at startup. +# +# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync +# at the proper time when replication is set up, or when masters are +# promoted or demoted. +plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so + +# When semi-sync is enabled, don't allow fallback to async +# if you get no ack, or have no slaves. This is necessary to +# prevent alternate futures when doing a failover in response to +# a master that becomes unresponsive. +rpl_semi_sync_master_timeout = 1000000000000000000 +rpl_semi_sync_master_wait_no_slave = 1 + diff --git a/internal/stackql-parser-fork/config/mycnf/master_mysql57.cnf b/internal/stackql-parser-fork/config/mycnf/master_mysql57.cnf new file mode 100644 index 00000000..4282f788 --- /dev/null +++ b/internal/stackql-parser-fork/config/mycnf/master_mysql57.cnf @@ -0,0 +1,35 @@ +# This file is auto-included when MySQL 5.7 is detected. + +# MySQL 5.7 does not enable the binary log by default, and +# info repositories default to file + +gtid_mode = ON +log_slave_updates +enforce_gtid_consistency +expire_logs_days = 3 +master_info_repository = TABLE +relay_log_info_repository = TABLE +relay_log_purge = 1 +relay_log_recovery = 1 + +# In MySQL 5.7 the default charset is latin1 + +character_set_server = utf8 +collation_server = utf8_general_ci + +# Semi-sync replication is required for automated unplanned failover +# (when the master goes away). Here we just load the plugin so it's +# available if desired, but it's disabled at startup. +# +# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync +# at the proper time when replication is set up, or when masters are +# promoted or demoted. +plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so + +# When semi-sync is enabled, don't allow fallback to async +# if you get no ack, or have no slaves. This is necessary to +# prevent alternate futures when doing a failover in response to +# a master that becomes unresponsive. +rpl_semi_sync_master_timeout = 1000000000000000000 +rpl_semi_sync_master_wait_no_slave = 1 + diff --git a/internal/stackql-parser-fork/config/mycnf/master_mysql80.cnf b/internal/stackql-parser-fork/config/mycnf/master_mysql80.cnf new file mode 100644 index 00000000..6c3d77d5 --- /dev/null +++ b/internal/stackql-parser-fork/config/mycnf/master_mysql80.cnf @@ -0,0 +1,31 @@ +# This file is auto-included when MySQL 8.0 is detected. + +# MySQL 8.0 enables binlog by default with sync_binlog and TABLE info repositories +# It does not enable GTIDs or enforced GTID consistency + +gtid_mode = ON +enforce_gtid_consistency +relay_log_recovery = 1 +binlog_expire_logs_seconds = 259200 + +# disable mysqlx +mysqlx = 0 + +# 8.0 changes the default auth-plugin to caching_sha2_password +default_authentication_plugin = mysql_native_password + +# Semi-sync replication is required for automated unplanned failover +# (when the master goes away). Here we just load the plugin so it's +# available if desired, but it's disabled at startup. +# +# If the -enable_semi_sync flag is used, VTTablet will enable semi-sync +# at the proper time when replication is set up, or when masters are +# promoted or demoted. +plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so + +# MySQL 8.0 will not load plugins during --initialize +# which makes these options unknown. Prefixing with --loose +# tells the server it's fine if they are not understood. +loose_rpl_semi_sync_master_timeout = 1000000000000000000 +loose_rpl_semi_sync_master_wait_no_slave = 1 + diff --git a/internal/stackql-parser-fork/config/mycnf/sbr.cnf b/internal/stackql-parser-fork/config/mycnf/sbr.cnf new file mode 100644 index 00000000..5808e043 --- /dev/null +++ b/internal/stackql-parser-fork/config/mycnf/sbr.cnf @@ -0,0 +1,3 @@ +# This file is used to allow legacy tests to pass +# In theory it should not be required +binlog_format=statement diff --git a/internal/stackql-parser-fork/config/tablet/default.yaml b/internal/stackql-parser-fork/config/tablet/default.yaml new file mode 100644 index 00000000..68de6106 --- /dev/null +++ b/internal/stackql-parser-fork/config/tablet/default.yaml @@ -0,0 +1,115 @@ +tabletID: zone-1234 + +init: + dbName: # init_db_name_override + keyspace: # init_keyspace + shard: # init_shard + tabletType: # init_tablet_type + timeoutSeconds: 60 # init_timeout + +db: + socket: # db_socket + host: # db_host + port: 0 # db_port + charSet: # db_charset + flags: 0 # db_flags + flavor: # db_flavor + sslCa: # db_ssl_ca + sslCaPath: # db_ssl_ca_path + sslCert: # db_ssl_cert + sslKey: # db_ssl_key + serverName: # db_server_name + connectTimeoutMilliseconds: 0 # db_connect_timeout_ms + app: + user: vt_app # db_app_user + password: # db_app_password + useSsl: true # db_app_use_ssl + preferTcp: false + dba: + user: vt_dba # db_dba_user + password: # db_dba_password + useSsl: true # db_dba_use_ssl + preferTcp: false + filtered: + user: vt_filtered # db_filtered_user + password: # db_filtered_password + useSsl: true # db_filtered_use_ssl + preferTcp: false + repl: + user: vt_repl # db_repl_user + password: # db_repl_password + useSsl: true # db_repl_use_ssl + preferTcp: false + appdebug: + user: vt_appdebug # db_appdebug_user + password: # db_appdebug_password + useSsl: true # db_appdebug_use_ssl + preferTcp: false + allprivs: + user: vt_allprivs # db_allprivs_user + password: # db_allprivs_password + useSsl: true # db_allprivs_use_ssl + preferTcp: false + +oltpReadPool: + size: 16 # queryserver-config-pool-size + timeoutSeconds: 0 # queryserver-config-query-pool-timeout + idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout + prefillParallelism: 0 # queryserver-config-pool-prefill-parallelism + maxWaiters: 50000 # queryserver-config-query-pool-waiter-cap + +olapReadPool: + size: 200 # queryserver-config-stream-pool-size + timeoutSeconds: 0 # queryserver-config-query-pool-timeout + idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout + prefillParallelism: 0 # queryserver-config-stream-pool-prefill-parallelism + maxWaiters: 0 + +txPool: + size: 20 # queryserver-config-transaction-cap + timeoutSeconds: 1 # queryserver-config-txpool-timeout + idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout + prefillParallelism: 0 # queryserver-config-transaction-prefill-parallelism + maxWaiters: 50000 # queryserver-config-txpool-waiter-cap + +oltp: + queryTimeoutSeconds: 30 # queryserver-config-query-timeout + txTimeoutSeconds: 30 # queryserver-config-transaction-timeout + maxRows: 10000 # queryserver-config-max-result-size + warnRows: 0 # queryserver-config-warn-result-size + +hotRowProtection: + mode: disable|dryRun|enable # enable_hot_row_protection, enable_hot_row_protection_dry_run + # Default value is same as txPool.size. + maxQueueSize: 20 # hot_row_protection_max_queue_size + maxGlobalQueueSize: 1000 # hot_row_protection_max_global_queue_size + maxConcurrency: 5 # hot_row_protection_concurrent_transactions + +consolidator: enable|disable|notOnMaster # enable-consolidator, enable-consolidator-replicas +heartbeatIntervalMilliseconds: 0 # heartbeat_enable, heartbeat_interval +shutdownGracePeriodSeconds: 0 # transaction_shutdown_grace_period +passthroughDML: false # queryserver-config-passthrough-dmls +streamBufferSize: 32768 # queryserver-config-stream-buffer-size +queryCacheSize: 5000 # queryserver-config-query-cache-size +schemaReloadIntervalSeconds: 1800 # queryserver-config-schema-reload-time +watchReplication: false # watch_replication_stream +terseErrors: false # queryserver-config-terse-errors +messagePostponeParallelism: 4 # queryserver-config-message-postpone-cap +cacheResultFields: true # enable-query-plan-field-caching + + +# The following flags are currently not supported. +# enforce_strict_trans_tables +# queryserver-config-strict-table-acl +# queryserver-config-enable-table-acl-dry-run +# queryserver-config-acl-exempt-acl +# enable-tx-throttler +# tx-throttler-config +# tx-throttler-healthcheck-cells +# enable_transaction_limit +# enable_transaction_limit_dry_run +# transaction_limit_per_user +# transaction_limit_by_username +# transaction_limit_by_principal +# transaction_limit_by_component +# transaction_limit_by_subcomponent diff --git a/internal/stackql-parser-fork/config/zk-client-dev.json b/internal/stackql-parser-fork/config/zk-client-dev.json new file mode 100644 index 00000000..e0df7c3d --- /dev/null +++ b/internal/stackql-parser-fork/config/zk-client-dev.json @@ -0,0 +1,4 @@ +{ + "local": "localhost:3863", + "global": "localhost:3963" +} diff --git a/internal/stackql-parser-fork/config/zkcfg/zoo.cfg b/internal/stackql-parser-fork/config/zkcfg/zoo.cfg new file mode 100644 index 00000000..8b1b0099 --- /dev/null +++ b/internal/stackql-parser-fork/config/zkcfg/zoo.cfg @@ -0,0 +1,9 @@ +tickTime=2000 +dataDir={{.DataDir}} +clientPort={{.ClientPort}} +initLimit=5 +syncLimit=2 +maxClientCnxns=0 +{{range .Servers}} +server.{{.ServerId}}={{.Hostname}}:{{.LeaderPort}}:{{.ElectionPort}} +{{end}} diff --git a/internal/stackql-parser-fork/go.mod b/internal/stackql-parser-fork/go.mod new file mode 100644 index 00000000..5888a234 --- /dev/null +++ b/internal/stackql-parser-fork/go.mod @@ -0,0 +1,41 @@ +module github.com/stackql/stackql-parser + +go 1.19 + +require ( + github.com/Masterminds/semver v1.4.2 + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + github.com/golang/mock v1.3.1 + github.com/golang/protobuf v1.3.2 + github.com/google/go-cmp v0.4.0 + github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 + github.com/opentracing/opentracing-go v1.1.0 + github.com/stretchr/testify v1.4.0 + github.com/uber/jaeger-client-go v2.16.0+incompatible + golang.org/x/lint v0.0.0-20190409202823-959b441ac422 + golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 + golang.org/x/tools v0.1.1 + google.golang.org/grpc v1.24.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 + honnef.co/go/tools v0.0.1-2019.2.3 +) + +require ( + github.com/BurntSushi/toml v0.3.1 // indirect + github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/kr/pretty v0.2.0 // indirect + github.com/philhofer/fwd v1.0.0 // indirect + github.com/pkg/errors v0.8.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/tinylib/msgp v1.1.1 // indirect + github.com/uber-go/atomic v1.4.0 // indirect + github.com/uber/jaeger-lib v2.0.0+incompatible // indirect + golang.org/x/mod v0.4.2 // indirect + golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect + golang.org/x/text v0.3.3 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/genproto v0.0.0-20190926190326-7ee9db18f195 // indirect + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect +) diff --git a/internal/stackql-parser-fork/go.sum b/internal/stackql-parser-fork/go.sum new file mode 100644 index 00000000..76cdcbd1 --- /dev/null +++ b/internal/stackql-parser-fork/go.sum @@ -0,0 +1,124 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 h1:0R5mDLI66Qw13qN80TRz85zthQ2nf2+uDyiV23w6c3Q= +github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tinylib/msgp v1.1.1 h1:TnCZ3FIuKeaIy+F45+Cnp+caqdXGy4z74HvwXN+570Y= +github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= +github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= +github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY= +github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw= +github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190926190326-7ee9db18f195 h1:dWzgMaXfaHsnkRKZ1l3iJLDmTEB40JMl/dqRbJX4D/o= +google.golang.org/genproto v0.0.0-20190926190326-7ee9db18f195/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 h1:j9vAp9Re9bbtA/QFehkJpNba/6W2IbJtNuXZophCa54= +gopkg.in/DataDog/dd-trace-go.v1 v1.17.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/internal/stackql-parser-fork/go/README.md b/internal/stackql-parser-fork/go/README.md new file mode 100644 index 00000000..fc6efdde --- /dev/null +++ b/internal/stackql-parser-fork/go/README.md @@ -0,0 +1,19 @@ +This directory contains all the Go code for Vitess. + +Most of the packages at the top level are general-purpose and are suitable +for use outside Vitess. Packages that are specific to Vitess are in the *vt* +subdirectory. Binaries are in the *cmd* subdirectory. + +Please see [GoDoc](https://godoc.org/vitess.io/vitess/go) for +a listing of the packages and their purposes. + +vt/proto contains the compiled protos for go, one per each directory. +When importing these protos (for instance XXX.proto), we rename them on +import to XXXpb. For instance: + +```go +import ( + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) +``` + diff --git a/internal/stackql-parser-fork/go/bytes2/buffer.go b/internal/stackql-parser-fork/go/bytes2/buffer.go new file mode 100644 index 00000000..a7dacf2d --- /dev/null +++ b/internal/stackql-parser-fork/go/bytes2/buffer.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bytes2 + +// Buffer implements a subset of the write portion of +// bytes.Buffer, but more efficiently. This is meant to +// be used in very high QPS operations, especially for +// WriteByte, and without abstracting it as a Writer. +// Function signatures contain errors for compatibility, +// but they do not return errors. +type Buffer struct { + bytes []byte +} + +// NewBuffer is equivalent to bytes.NewBuffer. +func NewBuffer(b []byte) *Buffer { + return &Buffer{bytes: b} +} + +// Write is equivalent to bytes.Buffer.Write. +func (buf *Buffer) Write(b []byte) (int, error) { + buf.bytes = append(buf.bytes, b...) + return len(b), nil +} + +// WriteString is equivalent to bytes.Buffer.WriteString. +func (buf *Buffer) WriteString(s string) (int, error) { + buf.bytes = append(buf.bytes, s...) + return len(s), nil +} + +// WriteByte is equivalent to bytes.Buffer.WriteByte. +func (buf *Buffer) WriteByte(b byte) error { + buf.bytes = append(buf.bytes, b) + return nil +} + +// Bytes is equivalent to bytes.Buffer.Bytes. +func (buf *Buffer) Bytes() []byte { + return buf.bytes +} + +// Strings is equivalent to bytes.Buffer.Strings. +func (buf *Buffer) String() string { + return string(buf.bytes) +} + +// Len is equivalent to bytes.Buffer.Len. +func (buf *Buffer) Len() int { + return len(buf.bytes) +} diff --git a/internal/stackql-parser-fork/go/bytes2/buffer_test.go b/internal/stackql-parser-fork/go/bytes2/buffer_test.go new file mode 100644 index 00000000..83cdb346 --- /dev/null +++ b/internal/stackql-parser-fork/go/bytes2/buffer_test.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bytes2 + +import ( + "testing" +) + +func TestBuffer(t *testing.T) { + b := NewBuffer(nil) + b.Write([]byte("ab")) + b.WriteString("cd") + b.WriteByte('e') + want := "abcde" + if got := string(b.Bytes()); got != want { + t.Errorf("b.Bytes(): %s, want %s", got, want) + } + if got := b.String(); got != want { + t.Errorf("b.String(): %s, want %s", got, want) + } + if got := b.Len(); got != 5 { + t.Errorf("b.Len(): %d, want 5", got) + } +} diff --git a/internal/stackql-parser-fork/go/cache/lru_cache.go b/internal/stackql-parser-fork/go/cache/lru_cache.go new file mode 100644 index 00000000..cf332356 --- /dev/null +++ b/internal/stackql-parser-fork/go/cache/lru_cache.go @@ -0,0 +1,284 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache implements a LRU cache. +// +// The implementation borrows heavily from SmallLRUCache +// (originally by Nathan Schrenk). The object maintains a doubly-linked list of +// elements. When an element is accessed, it is promoted to the head of the +// list. When space is needed, the element at the tail of the list +// (the least recently used element) is evicted. +package cache + +import ( + "container/list" + "fmt" + "sync" + "time" +) + +// LRUCache is a typical LRU cache implementation. If the cache +// reaches the capacity, the least recently used item is deleted from +// the cache. Note the capacity is not the number of items, but the +// total sum of the Size() of each item. +type LRUCache struct { + mu sync.Mutex + + // list & table contain *entry objects. + list *list.List + table map[string]*list.Element + + size int64 + capacity int64 + evictions int64 +} + +// Value is the interface values that go into LRUCache need to satisfy +type Value interface { + // Size returns how big this value is. If you want to just track + // the cache by number of objects, you may return the size as 1. + Size() int +} + +// Item is what is stored in the cache +type Item struct { + Key string + Value Value +} + +type entry struct { + key string + value Value + size int64 + timeAccessed time.Time +} + +// NewLRUCache creates a new empty cache with the given capacity. +func NewLRUCache(capacity int64) *LRUCache { + return &LRUCache{ + list: list.New(), + table: make(map[string]*list.Element), + capacity: capacity, + } +} + +// Get returns a value from the cache, and marks the entry as most +// recently used. +func (lru *LRUCache) Get(key string) (v Value, ok bool) { + lru.mu.Lock() + defer lru.mu.Unlock() + + element := lru.table[key] + if element == nil { + return nil, false + } + lru.moveToFront(element) + return element.Value.(*entry).value, true +} + +// Peek returns a value from the cache without changing the LRU order. +func (lru *LRUCache) Peek(key string) (v Value, ok bool) { + lru.mu.Lock() + defer lru.mu.Unlock() + + element := lru.table[key] + if element == nil { + return nil, false + } + return element.Value.(*entry).value, true +} + +// Set sets a value in the cache. +func (lru *LRUCache) Set(key string, value Value) { + lru.mu.Lock() + defer lru.mu.Unlock() + + if element := lru.table[key]; element != nil { + lru.updateInplace(element, value) + } else { + lru.addNew(key, value) + } +} + +// SetIfAbsent will set the value in the cache if not present. If the +// value exists in the cache, we don't set it. +func (lru *LRUCache) SetIfAbsent(key string, value Value) { + lru.mu.Lock() + defer lru.mu.Unlock() + + if element := lru.table[key]; element != nil { + lru.moveToFront(element) + } else { + lru.addNew(key, value) + } +} + +// Delete removes an entry from the cache, and returns if the entry existed. +func (lru *LRUCache) Delete(key string) bool { + lru.mu.Lock() + defer lru.mu.Unlock() + + element := lru.table[key] + if element == nil { + return false + } + + lru.list.Remove(element) + delete(lru.table, key) + lru.size -= element.Value.(*entry).size + return true +} + +// Clear will clear the entire cache. +func (lru *LRUCache) Clear() { + lru.mu.Lock() + defer lru.mu.Unlock() + + lru.list.Init() + lru.table = make(map[string]*list.Element) + lru.size = 0 +} + +// SetCapacity will set the capacity of the cache. If the capacity is +// smaller, and the current cache size exceed that capacity, the cache +// will be shrank. +func (lru *LRUCache) SetCapacity(capacity int64) { + lru.mu.Lock() + defer lru.mu.Unlock() + + lru.capacity = capacity + lru.checkCapacity() +} + +// Stats returns a few stats on the cache. +func (lru *LRUCache) Stats() (length, size, capacity, evictions int64, oldest time.Time) { + lru.mu.Lock() + defer lru.mu.Unlock() + if lastElem := lru.list.Back(); lastElem != nil { + oldest = lastElem.Value.(*entry).timeAccessed + } + return int64(lru.list.Len()), lru.size, lru.capacity, lru.evictions, oldest +} + +// StatsJSON returns stats as a JSON object in a string. +func (lru *LRUCache) StatsJSON() string { + if lru == nil { + return "{}" + } + l, s, c, e, o := lru.Stats() + return fmt.Sprintf("{\"Length\": %v, \"Size\": %v, \"Capacity\": %v, \"Evictions\": %v, \"OldestAccess\": \"%v\"}", l, s, c, e, o) +} + +// Length returns how many elements are in the cache +func (lru *LRUCache) Length() int64 { + lru.mu.Lock() + defer lru.mu.Unlock() + return int64(lru.list.Len()) +} + +// Size returns the sum of the objects' Size() method. +func (lru *LRUCache) Size() int64 { + lru.mu.Lock() + defer lru.mu.Unlock() + return lru.size +} + +// Capacity returns the cache maximum capacity. +func (lru *LRUCache) Capacity() int64 { + lru.mu.Lock() + defer lru.mu.Unlock() + return lru.capacity +} + +// Evictions returns the eviction count. +func (lru *LRUCache) Evictions() int64 { + lru.mu.Lock() + defer lru.mu.Unlock() + return lru.evictions +} + +// Oldest returns the insertion time of the oldest element in the cache, +// or a IsZero() time if cache is empty. +func (lru *LRUCache) Oldest() (oldest time.Time) { + lru.mu.Lock() + defer lru.mu.Unlock() + if lastElem := lru.list.Back(); lastElem != nil { + oldest = lastElem.Value.(*entry).timeAccessed + } + return +} + +// Keys returns all the keys for the cache, ordered from most recently +// used to least recently used. +func (lru *LRUCache) Keys() []string { + lru.mu.Lock() + defer lru.mu.Unlock() + + keys := make([]string, 0, lru.list.Len()) + for e := lru.list.Front(); e != nil; e = e.Next() { + keys = append(keys, e.Value.(*entry).key) + } + return keys +} + +// Items returns all the values for the cache, ordered from most recently +// used to least recently used. +func (lru *LRUCache) Items() []Item { + lru.mu.Lock() + defer lru.mu.Unlock() + + items := make([]Item, 0, lru.list.Len()) + for e := lru.list.Front(); e != nil; e = e.Next() { + v := e.Value.(*entry) + items = append(items, Item{Key: v.key, Value: v.value}) + } + return items +} + +func (lru *LRUCache) updateInplace(element *list.Element, value Value) { + valueSize := int64(value.Size()) + sizeDiff := valueSize - element.Value.(*entry).size + element.Value.(*entry).value = value + element.Value.(*entry).size = valueSize + lru.size += sizeDiff + lru.moveToFront(element) + lru.checkCapacity() +} + +func (lru *LRUCache) moveToFront(element *list.Element) { + lru.list.MoveToFront(element) + element.Value.(*entry).timeAccessed = time.Now() +} + +func (lru *LRUCache) addNew(key string, value Value) { + newEntry := &entry{key, value, int64(value.Size()), time.Now()} + element := lru.list.PushFront(newEntry) + lru.table[key] = element + lru.size += newEntry.size + lru.checkCapacity() +} + +func (lru *LRUCache) checkCapacity() { + // Partially duplicated from Delete + for lru.size > lru.capacity { + delElem := lru.list.Back() + delValue := delElem.Value.(*entry) + lru.list.Remove(delElem) + delete(lru.table, delValue.key) + lru.size -= delValue.size + lru.evictions++ + } +} diff --git a/internal/stackql-parser-fork/go/cache/lru_cache_test.go b/internal/stackql-parser-fork/go/cache/lru_cache_test.go new file mode 100644 index 00000000..9a7f0923 --- /dev/null +++ b/internal/stackql-parser-fork/go/cache/lru_cache_test.go @@ -0,0 +1,303 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "encoding/json" + "testing" + "time" +) + +type CacheValue struct { + size int +} + +func (cv *CacheValue) Size() int { + return cv.size +} + +func TestInitialState(t *testing.T) { + cache := NewLRUCache(5) + l, sz, c, e, _ := cache.Stats() + if l != 0 { + t.Errorf("length = %v, want 0", l) + } + if sz != 0 { + t.Errorf("size = %v, want 0", sz) + } + if c != 5 { + t.Errorf("capacity = %v, want 5", c) + } + if e != 0 { + t.Errorf("evictions = %v, want 0", c) + } +} + +func TestSetInsertsValue(t *testing.T) { + cache := NewLRUCache(100) + data := &CacheValue{0} + key := "key" + cache.Set(key, data) + + v, ok := cache.Get(key) + if !ok || v.(*CacheValue) != data { + t.Errorf("Cache has incorrect value: %v != %v", data, v) + } + + k := cache.Keys() + if len(k) != 1 || k[0] != key { + t.Errorf("Cache.Keys() returned incorrect values: %v", k) + } + values := cache.Items() + if len(values) != 1 || values[0].Key != key { + t.Errorf("Cache.Values() returned incorrect values: %v", values) + } +} + +func TestSetIfAbsent(t *testing.T) { + cache := NewLRUCache(100) + data := &CacheValue{0} + key := "key" + cache.SetIfAbsent(key, data) + + v, ok := cache.Get(key) + if !ok || v.(*CacheValue) != data { + t.Errorf("Cache has incorrect value: %v != %v", data, v) + } + + cache.SetIfAbsent(key, &CacheValue{1}) + + v, ok = cache.Get(key) + if !ok || v.(*CacheValue) != data { + t.Errorf("Cache has incorrect value: %v != %v", data, v) + } +} + +func TestGetValueWithMultipleTypes(t *testing.T) { + cache := NewLRUCache(100) + data := &CacheValue{0} + key := "key" + cache.Set(key, data) + + v, ok := cache.Get("key") + if !ok || v.(*CacheValue) != data { + t.Errorf("Cache has incorrect value for \"key\": %v != %v", data, v) + } + + v, ok = cache.Get(string([]byte{'k', 'e', 'y'})) + if !ok || v.(*CacheValue) != data { + t.Errorf("Cache has incorrect value for []byte {'k','e','y'}: %v != %v", data, v) + } +} + +func TestSetUpdatesSize(t *testing.T) { + cache := NewLRUCache(100) + emptyValue := &CacheValue{0} + key := "key1" + cache.Set(key, emptyValue) + if _, sz, _, _, _ := cache.Stats(); sz != 0 { + t.Errorf("cache.Size() = %v, expected 0", sz) + } + someValue := &CacheValue{20} + key = "key2" + cache.Set(key, someValue) + if _, sz, _, _, _ := cache.Stats(); sz != 20 { + t.Errorf("cache.Size() = %v, expected 20", sz) + } +} + +func TestSetWithOldKeyUpdatesValue(t *testing.T) { + cache := NewLRUCache(100) + emptyValue := &CacheValue{0} + key := "key1" + cache.Set(key, emptyValue) + someValue := &CacheValue{20} + cache.Set(key, someValue) + + v, ok := cache.Get(key) + if !ok || v.(*CacheValue) != someValue { + t.Errorf("Cache has incorrect value: %v != %v", someValue, v) + } +} + +func TestSetWithOldKeyUpdatesSize(t *testing.T) { + cache := NewLRUCache(100) + emptyValue := &CacheValue{0} + key := "key1" + cache.Set(key, emptyValue) + + if _, sz, _, _, _ := cache.Stats(); sz != 0 { + t.Errorf("cache.Size() = %v, expected %v", sz, 0) + } + + someValue := &CacheValue{20} + cache.Set(key, someValue) + expected := int64(someValue.size) + if _, sz, _, _, _ := cache.Stats(); sz != expected { + t.Errorf("cache.Size() = %v, expected %v", sz, expected) + } +} + +func TestGetNonExistent(t *testing.T) { + cache := NewLRUCache(100) + + if _, ok := cache.Get("notthere"); ok { + t.Error("Cache returned a notthere value after no inserts.") + } +} + +func TestPeek(t *testing.T) { + cache := NewLRUCache(2) + val1 := &CacheValue{1} + cache.Set("key1", val1) + val2 := &CacheValue{1} + cache.Set("key2", val2) + // Make key1 the most recent. + cache.Get("key1") + // Peek key2. + if v, ok := cache.Peek("key2"); ok && v.(*CacheValue) != val2 { + t.Errorf("key2 received: %v, want %v", v, val2) + } + // Push key2 out + cache.Set("key3", &CacheValue{1}) + if v, ok := cache.Peek("key2"); ok { + t.Errorf("key2 received: %v, want absent", v) + } +} + +func TestDelete(t *testing.T) { + cache := NewLRUCache(100) + value := &CacheValue{1} + key := "key" + + if cache.Delete(key) { + t.Error("Item unexpectedly already in cache.") + } + + cache.Set(key, value) + + if !cache.Delete(key) { + t.Error("Expected item to be in cache.") + } + + if _, sz, _, _, _ := cache.Stats(); sz != 0 { + t.Errorf("cache.Size() = %v, expected 0", sz) + } + + if _, ok := cache.Get(key); ok { + t.Error("Cache returned a value after deletion.") + } +} + +func TestClear(t *testing.T) { + cache := NewLRUCache(100) + value := &CacheValue{1} + key := "key" + + cache.Set(key, value) + cache.Clear() + + if _, sz, _, _, _ := cache.Stats(); sz != 0 { + t.Errorf("cache.Size() = %v, expected 0 after Clear()", sz) + } +} + +func TestCapacityIsObeyed(t *testing.T) { + size := int64(3) + cache := NewLRUCache(100) + cache.SetCapacity(size) + value := &CacheValue{1} + + // Insert up to the cache's capacity. + cache.Set("key1", value) + cache.Set("key2", value) + cache.Set("key3", value) + if _, sz, _, _, _ := cache.Stats(); sz != size { + t.Errorf("cache.Size() = %v, expected %v", sz, size) + } + // Insert one more; something should be evicted to make room. + cache.Set("key4", value) + _, sz, _, evictions, _ := cache.Stats() + if sz != size { + t.Errorf("post-evict cache.Size() = %v, expected %v", sz, size) + } + if evictions != 1 { + t.Errorf("post-evict cache.evictions = %v, expected 1", evictions) + } + + // Check json stats + data := cache.StatsJSON() + m := make(map[string]interface{}) + if err := json.Unmarshal([]byte(data), &m); err != nil { + t.Errorf("cache.StatsJSON() returned bad json data: %v %v", data, err) + } + if m["Size"].(float64) != float64(size) { + t.Errorf("cache.StatsJSON() returned bad size: %v", m) + } + + // Check various other stats + if l := cache.Length(); l != size { + t.Errorf("cache.StatsJSON() returned bad length: %v", l) + } + if s := cache.Size(); s != size { + t.Errorf("cache.StatsJSON() returned bad size: %v", s) + } + if c := cache.Capacity(); c != size { + t.Errorf("cache.StatsJSON() returned bad length: %v", c) + } + + // checks StatsJSON on nil + cache = nil + if s := cache.StatsJSON(); s != "{}" { + t.Errorf("cache.StatsJSON() on nil object returned %v", s) + } +} + +func TestLRUIsEvicted(t *testing.T) { + size := int64(3) + cache := NewLRUCache(size) + + cache.Set("key1", &CacheValue{1}) + cache.Set("key2", &CacheValue{1}) + cache.Set("key3", &CacheValue{1}) + // lru: [key3, key2, key1] + + // Look up the elements. This will rearrange the LRU ordering. + cache.Get("key3") + beforeKey2 := time.Now() + cache.Get("key2") + afterKey2 := time.Now() + cache.Get("key1") + // lru: [key1, key2, key3] + + cache.Set("key0", &CacheValue{1}) + // lru: [key0, key1, key2] + + // The least recently used one should have been evicted. + if _, ok := cache.Get("key3"); ok { + t.Error("Least recently used element was not evicted.") + } + + // Check oldest + if o := cache.Oldest(); o.Before(beforeKey2) || o.After(afterKey2) { + t.Errorf("cache.Oldest returned an unexpected value: got %v, expected a value between %v and %v", o, beforeKey2, afterKey2) + } + + if e, want := cache.Evictions(), int64(1); e != want { + t.Errorf("evictions: %d, want: %d", e, want) + } +} diff --git a/internal/stackql-parser-fork/go/cache/perf_test.go b/internal/stackql-parser-fork/go/cache/perf_test.go new file mode 100644 index 00000000..b5c9a1a8 --- /dev/null +++ b/internal/stackql-parser-fork/go/cache/perf_test.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "testing" +) + +type MyValue []byte + +func (mv MyValue) Size() int { + return cap(mv) +} + +func BenchmarkGet(b *testing.B) { + cache := NewLRUCache(64 * 1024 * 1024) + value := make(MyValue, 1000) + cache.Set("stuff", value) + for i := 0; i < b.N; i++ { + val, ok := cache.Get("stuff") + if !ok { + panic("error") + } + _ = val + } +} diff --git a/internal/stackql-parser-fork/go/exit/exit.go b/internal/stackql-parser-fork/go/exit/exit.go new file mode 100644 index 00000000..83eb0eff --- /dev/null +++ b/internal/stackql-parser-fork/go/exit/exit.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package exit provides an alternative to os.Exit(int). + +Unlike os.Exit(int), exit.Return(int) will run deferred functions before +terminating. It's effectively like a return from main(), except you can specify +the exit code. + +Defer a call to exit.Recover() or exit.RecoverAll() at the beginning of main(). +Use exit.Return(int) to initiate an exit. + + func main() { + defer exit.Recover() + defer cleanup() + ... + if err != nil { + // Return from main() with a non-zero exit code, + // making sure to run deferred cleanup. + exit.Return(1) + } + ... + } + +All functions deferred *after* defer exit.Recover()/RecoverAll() will be +executed before the exit. This is why the defer for this package should +be the first statement in main(). + +NOTE: This mechanism only works if exit.Return() is called from the same +goroutine that deferred exit.Recover(). Usually this means Return() should +only be used from within main(), or within functions that are only ever +called from main(). See Recover() and Return() for more details. +*/ +package exit + +import ( + "os" + + "github.com/stackql/stackql-parser/go/tb" + "github.com/stackql/stackql-parser/go/vt/log" +) + +type exitCode int + +var ( + exitFunc = os.Exit // can be faked out for testing +) + +// Recover should be deferred as the first line of main(). It recovers the +// panic initiated by Return and converts it to a call to os.Exit. Any +// functions deferred after Recover in the main goroutine will be executed +// prior to exiting. Recover will re-panic anything other than the panic it +// expects from Return. +func Recover() { + doRecover(recover(), false) +} + +// RecoverAll can be deferred instead of Recover as the first line of main(). +// Instead of re-panicking, RecoverAll will absorb any panic and convert it to +// an error log entry with a stack trace, followed by a call to os.Exit(255). +func RecoverAll() { + doRecover(recover(), true) +} + +func doRecover(err interface{}, recoverAll bool) { + if err == nil { + return + } + + switch code := err.(type) { + case exitCode: + exitFunc(int(code)) + default: + if recoverAll { + log.Errorf("panic: %v", tb.Errorf("%v", err)) + exitFunc(255) + } else { + panic(err) + } + } +} + +// Return initiates a panic that sends the return code to the deferred Recover, +// executing other deferred functions along the way. When the panic reaches +// Recover, the return code will be passed to os.Exit. This should only be +// called from the main goroutine. +func Return(code int) { + panic(exitCode(code)) +} diff --git a/internal/stackql-parser-fork/go/exit/exit_test.go b/internal/stackql-parser-fork/go/exit/exit_test.go new file mode 100644 index 00000000..e51a0938 --- /dev/null +++ b/internal/stackql-parser-fork/go/exit/exit_test.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exit + +import ( + "testing" +) + +type repanicType int + +func TestReturn(t *testing.T) { + defer func() { + err := recover() + if err == nil { + t.Errorf("Return() did not panic with exit code") + } + + switch code := err.(type) { + case exitCode: + if code != 152 { + t.Errorf("got %v, want %v", code, 152) + } + default: + panic(err) + } + }() + + Return(152) +} + +func TestRecover(t *testing.T) { + var code int + + exitFunc = func(c int) { + code = c + } + + func() { + defer Recover() + Return(8235) + }() + + if code != 8235 { + t.Errorf("got %v, want %v", code, 8235) + } +} + +func TestRecoverRepanic(t *testing.T) { + defer func() { + err := recover() + + if err == nil { + t.Errorf("Recover() didn't re-panic an error other than exitCode") + return + } + + if _, ok := err.(repanicType); !ok { + panic(err) // something unexpected went wrong + } + }() + + defer Recover() + + panic(repanicType(1)) +} + +func TestRecoverAll(t *testing.T) { + exitFunc = func(int) {} + + defer func() { + err := recover() + + if err != nil { + t.Errorf("RecoverAll() didn't absorb all panics") + } + }() + + defer RecoverAll() + + panic(repanicType(1)) +} + +// TestRecoverNil checks that Recover() does nothing when there is no panic. +func TestRecoverNil(t *testing.T) { + defer Recover() +} diff --git a/internal/stackql-parser-fork/go/hack/hack.go b/internal/stackql-parser-fork/go/hack/hack.go new file mode 100644 index 00000000..0ae22575 --- /dev/null +++ b/internal/stackql-parser-fork/go/hack/hack.go @@ -0,0 +1,39 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package hack gives you some efficient functionality at the cost of +// breaking some Go rules. +package hack + +import ( + "reflect" + "unsafe" +) + +// String force casts a []byte to a string. +// USE AT YOUR OWN RISK +func String(b []byte) (s string) { + if len(b) == 0 { + return "" + } + return *(*string)(unsafe.Pointer(&b)) +} + +// StringPointer returns &s[0], which is not allowed in go +func StringPointer(s string) unsafe.Pointer { + pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) + return unsafe.Pointer(pstring.Data) +} diff --git a/internal/stackql-parser-fork/go/hack/hack_test.go b/internal/stackql-parser-fork/go/hack/hack_test.go new file mode 100644 index 00000000..862d6bbd --- /dev/null +++ b/internal/stackql-parser-fork/go/hack/hack_test.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hack + +import ( + "testing" +) + +func TestByteToString(t *testing.T) { + v1 := []byte("1234") + if s := String(v1); s != "1234" { + t.Errorf("String(\"1234\"): %q, want 1234", s) + } + + v1 = []byte("") + if s := String(v1); s != "" { + t.Errorf("String(\"\"): %q, want empty", s) + } + + v1 = nil + if s := String(v1); s != "" { + t.Errorf("String(\"\"): %q, want empty", s) + } +} diff --git a/internal/stackql-parser-fork/go/sqlescape/ids.go b/internal/stackql-parser-fork/go/sqlescape/ids.go new file mode 100644 index 00000000..79244b04 --- /dev/null +++ b/internal/stackql-parser-fork/go/sqlescape/ids.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Vitess Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlescape + +import ( + "bytes" +) + +// EscapeID returns a backticked identifier given an input string. +func EscapeID(in string) string { + var buf bytes.Buffer + WriteEscapeID(&buf, in) + return buf.String() +} + +// WriteEscapeID writes a backticked identifier from an input string into buf. +func WriteEscapeID(buf *bytes.Buffer, in string) { + buf.WriteByte('`') + for _, c := range in { + buf.WriteRune(c) + if c == '`' { + buf.WriteByte('`') + } + } + buf.WriteByte('`') +} diff --git a/internal/stackql-parser-fork/go/sqlescape/ids_test.go b/internal/stackql-parser-fork/go/sqlescape/ids_test.go new file mode 100644 index 00000000..cc62b553 --- /dev/null +++ b/internal/stackql-parser-fork/go/sqlescape/ids_test.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Vitess Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlescape + +import ( + "testing" +) + +func TestEscapeID(t *testing.T) { + testcases := []struct { + in, out string + }{{ + in: "aa", + out: "`aa`", + }, { + in: "a`a", + out: "`a``a`", + }} + for _, tc := range testcases { + out := EscapeID(tc.in) + if out != tc.out { + t.Errorf("EscapeID(%s): %s, want %s", tc.in, out, tc.out) + } + } +} diff --git a/internal/stackql-parser-fork/go/sqltypes/bind_variables.go b/internal/stackql-parser-fork/go/sqltypes/bind_variables.go new file mode 100644 index 00000000..9e17b14e --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/bind_variables.go @@ -0,0 +1,329 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "bytes" + "errors" + "fmt" + "strconv" + + "github.com/golang/protobuf/proto" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +// NullBindVariable is a bindvar with NULL value. +var NullBindVariable = &querypb.BindVariable{Type: querypb.Type_NULL_TYPE} + +// ValueToProto converts Value to a *querypb.Value. +func ValueToProto(v Value) *querypb.Value { + return &querypb.Value{Type: v.typ, Value: v.val} +} + +// ProtoToValue converts a *querypb.Value to a Value. +func ProtoToValue(v *querypb.Value) Value { + return MakeTrusted(v.Type, v.Value) +} + +// BuildBindVariables builds a map[string]*querypb.BindVariable from a map[string]interface{}. +func BuildBindVariables(in map[string]interface{}) (map[string]*querypb.BindVariable, error) { + if len(in) == 0 { + return nil, nil + } + + out := make(map[string]*querypb.BindVariable, len(in)) + for k, v := range in { + bv, err := BuildBindVariable(v) + if err != nil { + return nil, fmt.Errorf("%s: %v", k, err) + } + out[k] = bv + } + return out, nil +} + +// Int8BindVariable converts an int8 to a bind var. +func Int8BindVariable(v int8) *querypb.BindVariable { + return ValueBindVariable(NewInt8(v)) +} + +// Int32BindVariable converts an int32 to a bind var. +func Int32BindVariable(v int32) *querypb.BindVariable { + return ValueBindVariable(NewInt32(v)) +} + +// Int64BindVariable converts an int64 to a bind var. +func Int64BindVariable(v int64) *querypb.BindVariable { + return ValueBindVariable(NewInt64(v)) +} + +// Uint64BindVariable converts a uint64 to a bind var. +func Uint64BindVariable(v uint64) *querypb.BindVariable { + return ValueBindVariable(NewUint64(v)) +} + +// Float64BindVariable converts a float64 to a bind var. +func Float64BindVariable(v float64) *querypb.BindVariable { + return ValueBindVariable(NewFloat64(v)) +} + +// StringBindVariable converts a string to a bind var. +func StringBindVariable(v string) *querypb.BindVariable { + return ValueBindVariable(NewVarBinary(v)) +} + +// BytesBindVariable converts a []byte to a bind var. +func BytesBindVariable(v []byte) *querypb.BindVariable { + return &querypb.BindVariable{Type: VarBinary, Value: v} +} + +// ValueBindVariable converts a Value to a bind var. +func ValueBindVariable(v Value) *querypb.BindVariable { + return &querypb.BindVariable{Type: v.typ, Value: v.val} +} + +// BuildBindVariable builds a *querypb.BindVariable from a valid input type. +func BuildBindVariable(v interface{}) (*querypb.BindVariable, error) { + switch v := v.(type) { + case string: + return BytesBindVariable([]byte(v)), nil + case []byte: + return BytesBindVariable(v), nil + case bool: + if v { + return Int8BindVariable(1), nil + } + return Int8BindVariable(0), nil + case int: + return &querypb.BindVariable{ + Type: querypb.Type_INT64, + Value: strconv.AppendInt(nil, int64(v), 10), + }, nil + case int64: + return Int64BindVariable(v), nil + case uint64: + return Uint64BindVariable(v), nil + case float64: + return Float64BindVariable(v), nil + case nil: + return NullBindVariable, nil + case Value: + return ValueBindVariable(v), nil + case *querypb.BindVariable: + return v, nil + case []interface{}: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + lbv, err := BuildBindVariable(lv) + if err != nil { + return nil, err + } + values[i].Type = lbv.Type + values[i].Value = lbv.Value + bv.Values[i] = &values[i] + } + return bv, nil + case []string: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + values[i].Type = querypb.Type_VARBINARY + values[i].Value = []byte(lv) + bv.Values[i] = &values[i] + } + return bv, nil + case [][]byte: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + values[i].Type = querypb.Type_VARBINARY + values[i].Value = lv + bv.Values[i] = &values[i] + } + return bv, nil + case []int: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + values[i].Type = querypb.Type_INT64 + values[i].Value = strconv.AppendInt(nil, int64(lv), 10) + bv.Values[i] = &values[i] + } + return bv, nil + case []int64: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + values[i].Type = querypb.Type_INT64 + values[i].Value = strconv.AppendInt(nil, lv, 10) + bv.Values[i] = &values[i] + } + return bv, nil + case []uint64: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + values[i].Type = querypb.Type_UINT64 + values[i].Value = strconv.AppendUint(nil, lv, 10) + bv.Values[i] = &values[i] + } + return bv, nil + case []float64: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + values[i].Type = querypb.Type_FLOAT64 + values[i].Value = strconv.AppendFloat(nil, lv, 'g', -1, 64) + bv.Values[i] = &values[i] + } + return bv, nil + } + return nil, fmt.Errorf("type %T not supported as bind var: %v", v, v) +} + +// ValidateBindVariables validates a map[string]*querypb.BindVariable. +func ValidateBindVariables(bv map[string]*querypb.BindVariable) error { + for k, v := range bv { + if err := ValidateBindVariable(v); err != nil { + return fmt.Errorf("%s: %v", k, err) + } + } + return nil +} + +// ValidateBindVariable returns an error if the bind variable has inconsistent +// fields. +func ValidateBindVariable(bv *querypb.BindVariable) error { + if bv == nil { + return errors.New("bind variable is nil") + } + + if bv.Type == querypb.Type_TUPLE { + if len(bv.Values) == 0 { + return errors.New("empty tuple is not allowed") + } + for _, val := range bv.Values { + if val.Type == querypb.Type_TUPLE { + return errors.New("tuple not allowed inside another tuple") + } + if err := ValidateBindVariable(&querypb.BindVariable{Type: val.Type, Value: val.Value}); err != nil { + return err + } + } + return nil + } + + // If NewValue succeeds, the value is valid. + _, err := NewValue(bv.Type, bv.Value) + return err +} + +// BindVariableToValue converts a bind var into a Value. +func BindVariableToValue(bv *querypb.BindVariable) (Value, error) { + if bv.Type == querypb.Type_TUPLE { + return NULL, errors.New("cannot convert a TUPLE bind var into a value") + } + return MakeTrusted(bv.Type, bv.Value), nil +} + +// BindVariablesEqual compares two maps of bind variables. +// For protobuf messages we have to use "proto.Equal". +func BindVariablesEqual(x, y map[string]*querypb.BindVariable) bool { + return proto.Equal(&querypb.BoundQuery{BindVariables: x}, &querypb.BoundQuery{BindVariables: y}) +} + +// CopyBindVariables returns a shallow-copy of the given bindVariables map. +func CopyBindVariables(bindVariables map[string]*querypb.BindVariable) map[string]*querypb.BindVariable { + result := make(map[string]*querypb.BindVariable, len(bindVariables)) + for key, value := range bindVariables { + result[key] = value + } + return result +} + +// FormatBindVariables returns a string representation of the +// bind variables. +// +// If full is false, then large string or tuple values are truncated +// to only print the lengths. +// +// If asJson is true, then the resulting string is a valid JSON +// representation, otherwise it is the golang printed map representation. +func FormatBindVariables(bindVariables map[string]*querypb.BindVariable, full, asJSON bool) string { + var out map[string]*querypb.BindVariable + if full { + out = bindVariables + } else { + // NOTE(szopa): I am getting rid of potentially large bind + // variables. + out = make(map[string]*querypb.BindVariable) + for k, v := range bindVariables { + if IsIntegral(v.Type) || IsFloat(v.Type) { + out[k] = v + } else if v.Type == querypb.Type_TUPLE { + out[k] = StringBindVariable(fmt.Sprintf("%v items", len(v.Values))) + } else { + out[k] = StringBindVariable(fmt.Sprintf("%v bytes", len(v.Value))) + } + } + } + + if asJSON { + var buf bytes.Buffer + buf.WriteString("{") + first := true + for k, v := range out { + if !first { + buf.WriteString(", ") + } else { + first = false + } + if IsIntegral(v.Type) || IsFloat(v.Type) { + fmt.Fprintf(&buf, "%q: {\"type\": %q, \"value\": %v}", k, v.Type, string(v.Value)) + } else { + fmt.Fprintf(&buf, "%q: {\"type\": %q, \"value\": %q}", k, v.Type, string(v.Value)) + } + } + buf.WriteString("}") + return buf.String() + } + + return fmt.Sprintf("%v", out) +} diff --git a/internal/stackql-parser-fork/go/sqltypes/bind_variables_test.go b/internal/stackql-parser-fork/go/sqltypes/bind_variables_test.go new file mode 100644 index 00000000..a7cb921e --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/bind_variables_test.go @@ -0,0 +1,644 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "github.com/stackql/stackql-parser/go/test/utils" + + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/require" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +func TestProtoConversions(t *testing.T) { + v := TestValue(Int64, "1") + got := ValueToProto(v) + want := &querypb.Value{Type: Int64, Value: []byte("1")} + if !proto.Equal(got, want) { + t.Errorf("ValueToProto: %v, want %v", got, want) + } + gotback := ProtoToValue(got) + if !reflect.DeepEqual(gotback, v) { + t.Errorf("ProtoToValue: %v, want %v", gotback, v) + } +} + +func TestBuildBindVariables(t *testing.T) { + tcases := []struct { + in map[string]interface{} + out map[string]*querypb.BindVariable + err string + }{{ + in: nil, + out: nil, + }, { + in: map[string]interface{}{ + "k": int64(1), + }, + out: map[string]*querypb.BindVariable{ + "k": Int64BindVariable(1), + }, + }, { + in: map[string]interface{}{ + "k": byte(1), + }, + err: "k: type uint8 not supported as bind var: 1", + }} + for _, tcase := range tcases { + bindVars, err := BuildBindVariables(tcase.in) + if err != nil { + if err.Error() != tcase.err { + t.Errorf("MapToBindVars(%v) error: %v, want %s", tcase.in, err, tcase.err) + } + continue + } + if tcase.err != "" { + t.Errorf("MapToBindVars(%v) error: nil, want %s", tcase.in, tcase.err) + continue + } + if !BindVariablesEqual(bindVars, tcase.out) { + t.Errorf("MapToBindVars(%v): %v, want %s", tcase.in, bindVars, tcase.out) + } + } +} + +func TestBuildBindVariable(t *testing.T) { + tcases := []struct { + in interface{} + out *querypb.BindVariable + err string + }{{ + in: "aa", + out: &querypb.BindVariable{ + Type: querypb.Type_VARBINARY, + Value: []byte("aa"), + }, + }, { + in: []byte("aa"), + out: &querypb.BindVariable{ + Type: querypb.Type_VARBINARY, + Value: []byte("aa"), + }, + }, { + in: true, + out: &querypb.BindVariable{ + Type: querypb.Type_INT8, + Value: []byte("1"), + }, + }, { + in: false, + out: &querypb.BindVariable{ + Type: querypb.Type_INT8, + Value: []byte("0"), + }, + }, { + in: int(1), + out: &querypb.BindVariable{ + Type: querypb.Type_INT64, + Value: []byte("1"), + }, + }, { + in: int64(1), + out: &querypb.BindVariable{ + Type: querypb.Type_INT64, + Value: []byte("1"), + }, + }, { + in: uint64(1), + out: &querypb.BindVariable{ + Type: querypb.Type_UINT64, + Value: []byte("1"), + }, + }, { + in: float64(1), + out: &querypb.BindVariable{ + Type: querypb.Type_FLOAT64, + Value: []byte("1"), + }, + }, { + in: nil, + out: NullBindVariable, + }, { + in: MakeTrusted(Int64, []byte("1")), + out: &querypb.BindVariable{ + Type: querypb.Type_INT64, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_INT64, + Value: []byte("1"), + }, + out: &querypb.BindVariable{ + Type: querypb.Type_INT64, + Value: []byte("1"), + }, + }, { + in: []interface{}{"aa", int64(1)}, + out: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{ + Type: querypb.Type_VARBINARY, + Value: []byte("aa"), + }, { + Type: querypb.Type_INT64, + Value: []byte("1"), + }}, + }, + }, { + in: []string{"aa", "bb"}, + out: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{ + Type: querypb.Type_VARBINARY, + Value: []byte("aa"), + }, { + Type: querypb.Type_VARBINARY, + Value: []byte("bb"), + }}, + }, + }, { + in: [][]byte{[]byte("aa"), []byte("bb")}, + out: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{ + Type: querypb.Type_VARBINARY, + Value: []byte("aa"), + }, { + Type: querypb.Type_VARBINARY, + Value: []byte("bb"), + }}, + }, + }, { + in: []int{1, 2}, + out: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{ + Type: querypb.Type_INT64, + Value: []byte("1"), + }, { + Type: querypb.Type_INT64, + Value: []byte("2"), + }}, + }, + }, { + in: []int64{1, 2}, + out: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{ + Type: querypb.Type_INT64, + Value: []byte("1"), + }, { + Type: querypb.Type_INT64, + Value: []byte("2"), + }}, + }, + }, { + in: []uint64{1, 2}, + out: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{ + Type: querypb.Type_UINT64, + Value: []byte("1"), + }, { + Type: querypb.Type_UINT64, + Value: []byte("2"), + }}, + }, + }, { + in: []float64{1, 2}, + out: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{ + Type: querypb.Type_FLOAT64, + Value: []byte("1"), + }, { + Type: querypb.Type_FLOAT64, + Value: []byte("2"), + }}, + }, + }, { + in: byte(1), + err: "type uint8 not supported as bind var: 1", + }, { + in: []interface{}{1, byte(1)}, + err: "type uint8 not supported as bind var: 1", + }} + for _, tcase := range tcases { + t.Run(fmt.Sprintf("%v", tcase.in), func(t *testing.T) { + bv, err := BuildBindVariable(tcase.in) + if tcase.err != "" { + require.EqualError(t, err, tcase.err) + } else { + utils.MustMatch(t, tcase.out, bv, "binvar output did not match") + } + }) + } +} + +func TestValidateBindVarables(t *testing.T) { + tcases := []struct { + in map[string]*querypb.BindVariable + err string + }{{ + in: map[string]*querypb.BindVariable{ + "v": { + Type: querypb.Type_INT64, + Value: []byte("1"), + }, + }, + err: "", + }, { + in: map[string]*querypb.BindVariable{ + "v": { + Type: querypb.Type_INT64, + Value: []byte("a"), + }, + }, + err: `v: strconv.ParseInt: parsing "a": invalid syntax`, + }, { + in: map[string]*querypb.BindVariable{ + "v": { + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{ + Type: Int64, + Value: []byte("a"), + }}, + }, + }, + err: `v: strconv.ParseInt: parsing "a": invalid syntax`, + }} + for _, tcase := range tcases { + err := ValidateBindVariables(tcase.in) + if tcase.err != "" { + if err == nil || err.Error() != tcase.err { + t.Errorf("ValidateBindVars(%v): %v, want %s", tcase.in, err, tcase.err) + } + continue + } + if err != nil { + t.Errorf("ValidateBindVars(%v): %v, want nil", tcase.in, err) + } + } +} + +func TestValidateBindVariable(t *testing.T) { + testcases := []struct { + in *querypb.BindVariable + err string + }{{ + in: &querypb.BindVariable{ + Type: querypb.Type_INT8, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_INT16, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_INT24, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_INT32, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_INT64, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_UINT8, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_UINT16, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_UINT24, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_UINT32, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_UINT64, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_FLOAT32, + Value: []byte("1.00"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_FLOAT64, + Value: []byte("1.00"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_DECIMAL, + Value: []byte("1.00"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_TIMESTAMP, + Value: []byte("2012-02-24 23:19:43"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_DATE, + Value: []byte("2012-02-24"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_TIME, + Value: []byte("23:19:43"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_DATETIME, + Value: []byte("2012-02-24 23:19:43"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_YEAR, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_TEXT, + Value: []byte("a"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_BLOB, + Value: []byte("a"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_VARCHAR, + Value: []byte("a"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_BINARY, + Value: []byte("a"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_CHAR, + Value: []byte("a"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_BIT, + Value: []byte("1"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_ENUM, + Value: []byte("a"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_SET, + Value: []byte("a"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_VARBINARY, + Value: []byte("a"), + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_INT64, + Value: []byte(InvalidNeg), + }, + err: "out of range", + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_INT64, + Value: []byte(InvalidPos), + }, + err: "out of range", + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_UINT64, + Value: []byte("-1"), + }, + err: "invalid syntax", + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_UINT64, + Value: []byte(InvalidPos), + }, + err: "out of range", + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_FLOAT64, + Value: []byte("a"), + }, + err: "invalid syntax", + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_EXPRESSION, + Value: []byte("a"), + }, + err: "invalid type specified for MakeValue: EXPRESSION", + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{ + Type: querypb.Type_INT64, + Value: []byte("1"), + }}, + }, + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + }, + err: "empty tuple is not allowed", + }, { + in: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{ + Type: querypb.Type_TUPLE, + }}, + }, + err: "tuple not allowed inside another tuple", + }} + for _, tcase := range testcases { + err := ValidateBindVariable(tcase.in) + if tcase.err != "" { + if err == nil || !strings.Contains(err.Error(), tcase.err) { + t.Errorf("ValidateBindVar(%v) error: %v, must contain %v", tcase.in, err, tcase.err) + } + continue + } + if err != nil { + t.Errorf("ValidateBindVar(%v) error: %v", tcase.in, err) + } + } + + // Special case: nil bind var. + err := ValidateBindVariable(nil) + want := "bind variable is nil" + if err == nil || err.Error() != want { + t.Errorf("ValidateBindVar(nil) error: %v, want %s", err, want) + } +} + +func TestBindVariableToValue(t *testing.T) { + v, err := BindVariableToValue(Int64BindVariable(1)) + require.NoError(t, err) + want := MakeTrusted(querypb.Type_INT64, []byte("1")) + if !reflect.DeepEqual(v, want) { + t.Errorf("BindVarToValue(1): %v, want %v", v, want) + } + + v, err = BindVariableToValue(&querypb.BindVariable{Type: querypb.Type_TUPLE}) + wantErr := "cannot convert a TUPLE bind var into a value" + if err == nil || err.Error() != wantErr { + t.Errorf(" BindVarToValue(TUPLE): (%v, %v), want %s", v, err, wantErr) + } +} + +func TestBindVariablesEqual(t *testing.T) { + bv1 := map[string]*querypb.BindVariable{ + "k": { + Type: querypb.Type_INT64, + Value: []byte("1"), + }, + } + bv2 := map[string]*querypb.BindVariable{ + "k": { + Type: querypb.Type_INT64, + Value: []byte("1"), + }, + } + bv3 := map[string]*querypb.BindVariable{ + "k": { + Type: querypb.Type_INT64, + Value: []byte("1"), + }, + } + if !BindVariablesEqual(bv1, bv2) { + t.Errorf("%v != %v, want equal", bv1, bv2) + } + if !BindVariablesEqual(bv1, bv3) { + t.Errorf("%v = %v, want not equal", bv1, bv3) + } +} + +func TestBindVariablesFormat(t *testing.T) { + tupleBindVar, err := BuildBindVariable([]int64{1, 2}) + if err != nil { + t.Fatalf("failed to create a tuple bind var: %v", err) + } + + bindVariables := map[string]*querypb.BindVariable{ + "key_1": StringBindVariable("val_1"), + "key_2": Int64BindVariable(789), + "key_3": BytesBindVariable([]byte("val_3")), + "key_4": tupleBindVar, + } + + formattedStr := FormatBindVariables(bindVariables, true /* full */, false /* asJSON */) + if !strings.Contains(formattedStr, "key_1") || + !strings.Contains(formattedStr, "val_1") { + t.Fatalf("bind variable 'key_1': 'val_1' is not formatted") + } + if !strings.Contains(formattedStr, "key_2") || + !strings.Contains(formattedStr, "789") { + t.Fatalf("bind variable 'key_2': '789' is not formatted") + } + if !strings.Contains(formattedStr, "key_3") || !strings.Contains(formattedStr, "val_3") { + t.Fatalf("bind variable 'key_3': 'val_3' is not formatted") + } + if !strings.Contains(formattedStr, "key_4") || + !strings.Contains(formattedStr, "values: values:") { + t.Fatalf("bind variable 'key_4': (1, 2) is not formatted") + } + + formattedStr = FormatBindVariables(bindVariables, false /* full */, false /* asJSON */) + if !strings.Contains(formattedStr, "key_1") { + t.Fatalf("bind variable 'key_1' is not formatted") + } + if !strings.Contains(formattedStr, "key_2") || + !strings.Contains(formattedStr, "789") { + t.Fatalf("bind variable 'key_2': '789' is not formatted") + } + if !strings.Contains(formattedStr, "key_3") || !strings.Contains(formattedStr, "5 bytes") { + t.Fatalf("bind variable 'key_3' is not formatted") + } + if !strings.Contains(formattedStr, "key_4") || !strings.Contains(formattedStr, "2 items") { + t.Fatalf("bind variable 'key_4' is not formatted") + } + + formattedStr = FormatBindVariables(bindVariables, true /* full */, true /* asJSON */) + t.Logf("%q", formattedStr) + if !strings.Contains(formattedStr, "\"key_1\": {\"type\": \"VARBINARY\", \"value\": \"val_1\"}") { + t.Fatalf("bind variable 'key_1' is not formatted") + } + + if !strings.Contains(formattedStr, "\"key_2\": {\"type\": \"INT64\", \"value\": 789}") { + t.Fatalf("bind variable 'key_2' is not formatted") + } + + if !strings.Contains(formattedStr, "\"key_3\": {\"type\": \"VARBINARY\", \"value\": \"val_3\"}") { + t.Fatalf("bind variable 'key_3' is not formatted") + } + + if !strings.Contains(formattedStr, "\"key_4\": {\"type\": \"TUPLE\", \"value\": \"\"}") { + t.Fatalf("bind variable 'key_4' is not formatted") + } + + formattedStr = FormatBindVariables(bindVariables, false /* full */, true /* asJSON */) + if !strings.Contains(formattedStr, "\"key_1\": {\"type\": \"VARBINARY\", \"value\": \"5 bytes\"}") { + t.Fatalf("bind variable 'key_1' is not formatted") + } + + if !strings.Contains(formattedStr, "\"key_2\": {\"type\": \"INT64\", \"value\": 789}") { + t.Fatalf("bind variable 'key_2' is not formatted") + } + + if !strings.Contains(formattedStr, "\"key_3\": {\"type\": \"VARBINARY\", \"value\": \"5 bytes\"}") { + t.Fatalf("bind variable 'key_3' is not formatted") + } + + if !strings.Contains(formattedStr, "\"key_4\": {\"type\": \"VARBINARY\", \"value\": \"2 items\"}") { + t.Fatalf("bind variable 'key_4' is not formatted") + } +} diff --git a/internal/stackql-parser-fork/go/sqltypes/event_token.go b/internal/stackql-parser-fork/go/sqltypes/event_token.go new file mode 100644 index 00000000..0a2d1f59 --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/event_token.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +// EventTokenMinimum returns an event token that is guaranteed to +// happen before both provided EventToken objects. Note it doesn't +// parse the position, but rather only uses the timestamp. This is +// meant to be used for EventToken objects coming from different +// source shard. +func EventTokenMinimum(ev1, ev2 *querypb.EventToken) *querypb.EventToken { + if ev1 == nil || ev2 == nil { + // One or the other is not set, we can't do anything. + return nil + } + + if ev1.Timestamp < ev2.Timestamp { + return &querypb.EventToken{ + Timestamp: ev1.Timestamp, + } + } + return &querypb.EventToken{ + Timestamp: ev2.Timestamp, + } +} diff --git a/internal/stackql-parser-fork/go/sqltypes/event_token_test.go b/internal/stackql-parser-fork/go/sqltypes/event_token_test.go new file mode 100644 index 00000000..f7ecc3e8 --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/event_token_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "testing" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" + + "github.com/golang/protobuf/proto" +) + +func TestEventTokenMinimum(t *testing.T) { + testcases := []struct { + ev1 *querypb.EventToken + ev2 *querypb.EventToken + expected *querypb.EventToken + }{{ + ev1: nil, + ev2: nil, + expected: nil, + }, { + ev1: &querypb.EventToken{ + Timestamp: 123, + }, + ev2: nil, + expected: nil, + }, { + ev1: nil, + ev2: &querypb.EventToken{ + Timestamp: 123, + }, + expected: nil, + }, { + ev1: &querypb.EventToken{ + Timestamp: 123, + }, + ev2: &querypb.EventToken{ + Timestamp: 456, + }, + expected: &querypb.EventToken{ + Timestamp: 123, + }, + }, { + ev1: &querypb.EventToken{ + Timestamp: 456, + }, + ev2: &querypb.EventToken{ + Timestamp: 123, + }, + expected: &querypb.EventToken{ + Timestamp: 123, + }, + }} + + for _, tcase := range testcases { + got := EventTokenMinimum(tcase.ev1, tcase.ev2) + if tcase.expected == nil && got != nil { + t.Errorf("expected nil result for Minimum(%v, %v) but got: %v", tcase.ev1, tcase.ev2, got) + continue + } + if !proto.Equal(got, tcase.expected) { + t.Errorf("got %v but expected %v for Minimum(%v, %v)", got, tcase.expected, tcase.ev1, tcase.ev2) + } + } +} diff --git a/internal/stackql-parser-fork/go/sqltypes/plan_value.go b/internal/stackql-parser-fork/go/sqltypes/plan_value.go new file mode 100644 index 00000000..5e38d651 --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/plan_value.go @@ -0,0 +1,267 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "encoding/json" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" + "github.com/stackql/stackql-parser/go/vt/vterrors" +) + +// PlanValue represents a value or a list of values for +// a column that will later be resolved using bind vars and used +// to perform plan actions like generating the final query or +// deciding on a route. +// +// Plan values are typically used as a slice ([]planValue) +// where each entry is for one column. For situations where +// the required output is a list of rows (like in the case +// of multi-value inserts), the representation is pivoted. +// For example, a statement like this: +// +// INSERT INTO t VALUES (1, 2), (3, 4) +// +// will be represented as follows: +// +// []PlanValue{ +// Values: {1, 3}, +// Values: {2, 4}, +// } +// +// For WHERE clause items that contain a combination of +// equality expressions and IN clauses like this: +// +// WHERE pk1 = 1 AND pk2 IN (2, 3, 4) +// +// The plan values will be represented as follows: +// +// []PlanValue{ +// Value: 1, +// Values: {2, 3, 4}, +// } +// +// When converted into rows, columns with single values +// are replicated as the same for all rows: +// +// [][]Value{ +// {1, 2}, +// {1, 3}, +// {1, 4}, +// } +type PlanValue struct { + Key string + Value Value + ListKey string + Values []PlanValue +} + +// IsNull returns true if the PlanValue is NULL. +func (pv PlanValue) IsNull() bool { + return pv.Key == "" && pv.Value.IsNull() && pv.ListKey == "" && pv.Values == nil +} + +// IsList returns true if the PlanValue is a list. +func (pv PlanValue) IsList() bool { + return pv.ListKey != "" || pv.Values != nil +} + +// ResolveValue resolves a PlanValue as a single value based on the supplied bindvars. +func (pv PlanValue) ResolveValue(bindVars map[string]*querypb.BindVariable) (Value, error) { + switch { + case pv.Key != "": + bv, err := pv.lookupValue(bindVars) + if err != nil { + return NULL, err + } + return MakeTrusted(bv.Type, bv.Value), nil + case !pv.Value.IsNull(): + return pv.Value, nil + case pv.ListKey != "" || pv.Values != nil: + // This code is unreachable because the parser does not allow + // multi-value constructs where a single value is expected. + return NULL, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "a list was supplied where a single value was expected") + } + return NULL, nil +} + +func (pv PlanValue) lookupValue(bindVars map[string]*querypb.BindVariable) (*querypb.BindVariable, error) { + bv, ok := bindVars[pv.Key] + if !ok { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "missing bind var %s", pv.Key) + } + if bv.Type == querypb.Type_TUPLE { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "TUPLE was supplied for single value bind var %s", pv.ListKey) + } + return bv, nil +} + +// ResolveList resolves a PlanValue as a list of values based on the supplied bindvars. +func (pv PlanValue) ResolveList(bindVars map[string]*querypb.BindVariable) ([]Value, error) { + switch { + case pv.ListKey != "": + bv, err := pv.lookupList(bindVars) + if err != nil { + return nil, err + } + values := make([]Value, 0, len(bv.Values)) + for _, val := range bv.Values { + values = append(values, MakeTrusted(val.Type, val.Value)) + } + return values, nil + case pv.Values != nil: + values := make([]Value, 0, len(pv.Values)) + for _, val := range pv.Values { + v, err := val.ResolveValue(bindVars) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil + } + // This code is unreachable because the parser does not allow + // single value constructs where multiple values are expected. + return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "a single value was supplied where a list was expected") +} + +func (pv PlanValue) lookupList(bindVars map[string]*querypb.BindVariable) (*querypb.BindVariable, error) { + bv, ok := bindVars[pv.ListKey] + if !ok { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "missing bind var %s", pv.ListKey) + } + if bv.Type != querypb.Type_TUPLE { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "single value was supplied for TUPLE bind var %s", pv.ListKey) + } + return bv, nil +} + +// MarshalJSON should be used only for testing. +func (pv PlanValue) MarshalJSON() ([]byte, error) { + switch { + case pv.Key != "": + return json.Marshal(":" + pv.Key) + case !pv.Value.IsNull(): + if pv.Value.IsIntegral() { + return pv.Value.ToBytes(), nil + } + return json.Marshal(pv.Value.ToString()) + case pv.ListKey != "": + return json.Marshal("::" + pv.ListKey) + case pv.Values != nil: + return json.Marshal(pv.Values) + } + return []byte("null"), nil +} + +func rowCount(pvs []PlanValue, bindVars map[string]*querypb.BindVariable) (int, error) { + count := -1 + setCount := func(l int) error { + switch count { + case -1: + count = l + return nil + case l: + return nil + default: + return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "mismatch in number of column values") + } + } + + for _, pv := range pvs { + switch { + case pv.Key != "" || !pv.Value.IsNull(): + continue + case pv.Values != nil: + if err := setCount(len(pv.Values)); err != nil { + return 0, err + } + case pv.ListKey != "": + bv, err := pv.lookupList(bindVars) + if err != nil { + return 0, err + } + if err := setCount(len(bv.Values)); err != nil { + return 0, err + } + } + } + + if count == -1 { + // If there were no lists inside, it was a single row. + // Note that count can never be 0 because there is enough + // protection at the top level: list bind vars must have + // at least one value (enforced by vtgate), and AST lists + // must have at least one value (enforced by the parser). + // Also lists created internally after vtgate validation + // ensure at least one value. + // TODO(sougou): verify and change API to enforce this. + return 1, nil + } + return count, nil +} + +// ResolveRows resolves a []PlanValue as rows based on the supplied bindvars. +func ResolveRows(pvs []PlanValue, bindVars map[string]*querypb.BindVariable) ([][]Value, error) { + count, err := rowCount(pvs, bindVars) + if err != nil { + return nil, err + } + + // Allocate the rows. + rows := make([][]Value, count) + for i := range rows { + rows[i] = make([]Value, len(pvs)) + } + + // Using j because we're resolving by columns. + for j, pv := range pvs { + switch { + case pv.Key != "": + bv, err := pv.lookupValue(bindVars) + if err != nil { + return nil, err + } + for i := range rows { + rows[i][j] = MakeTrusted(bv.Type, bv.Value) + } + case !pv.Value.IsNull(): + for i := range rows { + rows[i][j] = pv.Value + } + case pv.ListKey != "": + bv, err := pv.lookupList(bindVars) + if err != nil { + // This code is unreachable because pvRowCount already checks this. + return nil, err + } + for i := range rows { + rows[i][j] = MakeTrusted(bv.Values[i].Type, bv.Values[i].Value) + } + case pv.Values != nil: + for i := range rows { + rows[i][j], err = pv.Values[i].ResolveValue(bindVars) + if err != nil { + return nil, err + } + } + // default case is a NULL value, which the row values are already initialized to. + } + } + return rows, nil +} diff --git a/internal/stackql-parser-fork/go/sqltypes/plan_value_test.go b/internal/stackql-parser-fork/go/sqltypes/plan_value_test.go new file mode 100644 index 00000000..dbe05513 --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/plan_value_test.go @@ -0,0 +1,314 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "fmt" + "reflect" + "strings" + "testing" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" + + "github.com/stretchr/testify/assert" +) + +func TestPlanValueIsNull(t *testing.T) { + tcases := []struct { + in PlanValue + out bool + }{{ + in: PlanValue{}, + out: true, + }, { + in: PlanValue{Key: "aa"}, + out: false, + }, { + in: PlanValue{Value: NewVarBinary("aa")}, + out: false, + }, { + in: PlanValue{ListKey: "aa"}, + out: false, + }, { + in: PlanValue{Values: []PlanValue{}}, + out: false, + }} + for _, tc := range tcases { + got := tc.in.IsNull() + if got != tc.out { + t.Errorf("IsNull(%v): %v, want %v", tc.in, got, tc.out) + } + } +} + +func TestPlanValueIsList(t *testing.T) { + tcases := []struct { + in PlanValue + out bool + }{{ + in: PlanValue{}, + out: false, + }, { + in: PlanValue{Key: "aa"}, + out: false, + }, { + in: PlanValue{Value: NewVarBinary("aa")}, + out: false, + }, { + in: PlanValue{ListKey: "aa"}, + out: true, + }, { + in: PlanValue{Values: []PlanValue{}}, + out: true, + }} + for _, tc := range tcases { + got := tc.in.IsList() + if got != tc.out { + t.Errorf("IsList(%v): %v, want %v", tc.in, got, tc.out) + } + } +} + +func TestResolveRows(t *testing.T) { + testBindVars := map[string]*querypb.BindVariable{ + "int": Int64BindVariable(10), + "intstr": TestBindVariable([]interface{}{10, "aa"}), + } + intValue := MakeTrusted(Int64, []byte("10")) + strValue := MakeTrusted(VarBinary, []byte("aa")) + tcases := []struct { + in []PlanValue + out [][]Value + err string + }{{ + // Simple cases. + in: []PlanValue{ + {Key: "int"}, + }, + out: [][]Value{ + {intValue}, + }, + }, { + in: []PlanValue{ + {Value: intValue}, + }, + out: [][]Value{ + {intValue}, + }, + }, { + in: []PlanValue{ + {ListKey: "intstr"}, + }, + out: [][]Value{ + {intValue}, + {strValue}, + }, + }, { + in: []PlanValue{ + {Values: []PlanValue{{Value: intValue}, {Value: strValue}}}, + }, + out: [][]Value{ + {intValue}, + {strValue}, + }, + }, { + in: []PlanValue{ + {Values: []PlanValue{{Key: "int"}, {Value: strValue}}}, + }, + out: [][]Value{ + {intValue}, + {strValue}, + }, + }, { + in: []PlanValue{{}}, + out: [][]Value{ + {NULL}, + }, + }, { + // Cases with varying rowcounts. + // All types of input.. + in: []PlanValue{ + {Key: "int"}, + {Value: strValue}, + {ListKey: "intstr"}, + {Values: []PlanValue{{Value: strValue}, {Value: intValue}}}, + }, + out: [][]Value{ + {intValue, strValue, intValue, strValue}, + {intValue, strValue, strValue, intValue}, + }, + }, { + // list, val, list. + in: []PlanValue{ + {Value: strValue}, + {Key: "int"}, + {Values: []PlanValue{{Value: strValue}, {Value: intValue}}}, + }, + out: [][]Value{ + {strValue, intValue, strValue}, + {strValue, intValue, intValue}, + }, + }, { + // list, list + in: []PlanValue{ + {ListKey: "intstr"}, + {Values: []PlanValue{{Value: strValue}, {Value: intValue}}}, + }, + out: [][]Value{ + {intValue, strValue}, + {strValue, intValue}, + }, + }, { + // Error cases + in: []PlanValue{ + {ListKey: "intstr"}, + {Values: []PlanValue{{Value: strValue}}}, + }, + err: "mismatch in number of column values", + }, { + // This is a different code path for a similar validation. + in: []PlanValue{ + {Values: []PlanValue{{Value: strValue}}}, + {ListKey: "intstr"}, + }, + err: "mismatch in number of column values", + }, { + in: []PlanValue{ + {Key: "absent"}, + }, + err: "missing bind var absent", + }, { + in: []PlanValue{ + {ListKey: "absent"}, + }, + err: "missing bind var absent", + }, { + in: []PlanValue{ + {Values: []PlanValue{{Key: "absent"}}}, + }, + err: "missing bind var absent", + }} + + for _, tc := range tcases { + t.Run(fmt.Sprintf("%v", tc.in), func(t *testing.T) { + got, err := ResolveRows(tc.in, testBindVars) + if tc.err != "" { + assert.EqualError(t, err, tc.err) + } else { + if !reflect.DeepEqual(got, tc.out) { + t.Errorf("ResolveRows(%v): %v, want %v", tc.in, got, tc.out) + } + } + }) + } +} + +func TestResolveList(t *testing.T) { + testBindVars := map[string]*querypb.BindVariable{ + "int": Int64BindVariable(10), + "intstr": TestBindVariable([]interface{}{10, "aa"}), + } + intValue := MakeTrusted(Int64, []byte("10")) + strValue := MakeTrusted(VarBinary, []byte("aa")) + tcases := []struct { + in PlanValue + out []Value + err string + }{{ + in: PlanValue{ListKey: "intstr"}, + out: []Value{intValue, strValue}, + }, { + in: PlanValue{Values: []PlanValue{{Value: intValue}, {Value: strValue}}}, + out: []Value{intValue, strValue}, + }, { + in: PlanValue{Values: []PlanValue{{Key: "int"}, {Value: strValue}}}, + out: []Value{intValue, strValue}, + }, { + in: PlanValue{ListKey: "absent"}, + err: "missing bind var absent", + }, { + in: PlanValue{Values: []PlanValue{{Key: "absent"}, {Value: strValue}}}, + err: "missing bind var absent", + }, { + in: PlanValue{ListKey: "int"}, + err: "single value was supplied for TUPLE bind var int", + }, { + in: PlanValue{Key: "int"}, + err: "a single value was supplied where a list was expected", + }} + + for _, tc := range tcases { + t.Run(fmt.Sprintf("%v", tc.in), func(t *testing.T) { + got, err := tc.in.ResolveList(testBindVars) + if tc.err != "" { + assert.EqualError(t, err, tc.err) + } else { + if !reflect.DeepEqual(got, tc.out) { + t.Errorf("ResolveList(%v): %v, want %v", tc.in, got, tc.out) + } + } + }) + } +} + +func TestResolveValue(t *testing.T) { + testBindVars := map[string]*querypb.BindVariable{ + "int": Int64BindVariable(10), + "intstr": TestBindVariable([]interface{}{10, "aa"}), + } + intValue := MakeTrusted(Int64, []byte("10")) + tcases := []struct { + in PlanValue + out Value + err string + }{{ + in: PlanValue{Key: "int"}, + out: intValue, + }, { + in: PlanValue{Value: intValue}, + out: intValue, + }, { + in: PlanValue{}, + out: NULL, + }, { + in: PlanValue{Key: "absent"}, + err: "missing bind var", + }, { + in: PlanValue{Key: "intstr"}, + err: "TUPLE was supplied for single value bind var", + }, { + in: PlanValue{ListKey: "intstr"}, + err: "a list was supplied where a single value was expected", + }} + + for _, tc := range tcases { + got, err := tc.in.ResolveValue(testBindVars) + if err != nil { + if !strings.Contains(err.Error(), tc.err) { + t.Errorf("ResolveValue(%v) error: %v, want '%s'", tc.in, err, tc.err) + } + continue + } + if tc.err != "" { + t.Errorf("ResolveValue(%v) error: nil, want '%s'", tc.in, tc.err) + continue + } + if !reflect.DeepEqual(got, tc.out) { + t.Errorf("ResolveValue(%v): %v, want %v", tc.in, got, tc.out) + } + } +} diff --git a/internal/stackql-parser-fork/go/sqltypes/proto3.go b/internal/stackql-parser-fork/go/sqltypes/proto3.go new file mode 100644 index 00000000..747a85c4 --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/proto3.go @@ -0,0 +1,218 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "github.com/golang/protobuf/proto" + + "github.com/stackql/stackql-parser/go/vt/vterrors" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +// This file contains the proto3 conversion functions for the structures +// defined here. + +// RowToProto3 converts []Value to proto3. +func RowToProto3(row []Value) *querypb.Row { + result := &querypb.Row{} + result.Lengths = make([]int64, 0, len(row)) + total := 0 + for _, c := range row { + if c.IsNull() { + result.Lengths = append(result.Lengths, -1) + continue + } + length := c.Len() + result.Lengths = append(result.Lengths, int64(length)) + total += length + } + result.Values = make([]byte, 0, total) + for _, c := range row { + if c.IsNull() { + continue + } + result.Values = append(result.Values, c.Raw()...) + } + return result +} + +// RowsToProto3 converts [][]Value to proto3. +func RowsToProto3(rows [][]Value) []*querypb.Row { + if len(rows) == 0 { + return nil + } + + result := make([]*querypb.Row, len(rows)) + for i, r := range rows { + result[i] = RowToProto3(r) + } + return result +} + +// proto3ToRows converts a proto3 rows to [][]Value. The function is private +// because it uses the trusted API. +func proto3ToRows(fields []*querypb.Field, rows []*querypb.Row) [][]Value { + if len(rows) == 0 { + // TODO(sougou): This is needed for backward compatibility. + // Remove when it's not needed any more. + return [][]Value{} + } + + result := make([][]Value, len(rows)) + for i, r := range rows { + result[i] = MakeRowTrusted(fields, r) + } + return result +} + +// ResultToProto3 converts Result to proto3. +func ResultToProto3(qr *Result) *querypb.QueryResult { + if qr == nil { + return nil + } + return &querypb.QueryResult{ + Fields: qr.Fields, + RowsAffected: qr.RowsAffected, + InsertId: qr.InsertID, + Rows: RowsToProto3(qr.Rows), + } +} + +// Proto3ToResult converts a proto3 Result to an internal data structure. This function +// should be used only if the field info is populated in qr. +func Proto3ToResult(qr *querypb.QueryResult) *Result { + if qr == nil { + return nil + } + return &Result{ + Fields: qr.Fields, + RowsAffected: qr.RowsAffected, + InsertID: qr.InsertId, + Rows: proto3ToRows(qr.Fields, qr.Rows), + } +} + +// CustomProto3ToResult converts a proto3 Result to an internal data structure. This function +// takes a separate fields input because not all QueryResults contain the field info. +// In particular, only the first packet of streaming queries contain the field info. +func CustomProto3ToResult(fields []*querypb.Field, qr *querypb.QueryResult) *Result { + if qr == nil { + return nil + } + return &Result{ + Fields: qr.Fields, + RowsAffected: qr.RowsAffected, + InsertID: qr.InsertId, + Rows: proto3ToRows(fields, qr.Rows), + } +} + +// ResultsToProto3 converts []Result to proto3. +func ResultsToProto3(qr []Result) []*querypb.QueryResult { + if len(qr) == 0 { + return nil + } + result := make([]*querypb.QueryResult, len(qr)) + for i, q := range qr { + result[i] = ResultToProto3(&q) + } + return result +} + +// Proto3ToResults converts proto3 results to []Result. +func Proto3ToResults(qr []*querypb.QueryResult) []Result { + if len(qr) == 0 { + return nil + } + result := make([]Result, len(qr)) + for i, q := range qr { + result[i] = *Proto3ToResult(q) + } + return result +} + +// QueryResponsesToProto3 converts []QueryResponse to proto3. +func QueryResponsesToProto3(qr []QueryResponse) []*querypb.ResultWithError { + if len(qr) == 0 { + return nil + } + result := make([]*querypb.ResultWithError, len(qr)) + for i, q := range qr { + result[i] = &querypb.ResultWithError{ + Result: ResultToProto3(q.QueryResult), + Error: vterrors.ToVTRPC(q.QueryError), + } + } + return result +} + +// Proto3ToQueryReponses converts proto3 queryResponse to []QueryResponse. +func Proto3ToQueryReponses(qr []*querypb.ResultWithError) []QueryResponse { + if len(qr) == 0 { + return nil + } + result := make([]QueryResponse, len(qr)) + for i, q := range qr { + result[i] = QueryResponse{ + QueryResult: Proto3ToResult(q.Result), + QueryError: vterrors.FromVTRPC(q.Error), + } + } + return result +} + +// Proto3ResultsEqual compares two arrays of proto3 Result. +// reflect.DeepEqual shouldn't be used because of the protos. +func Proto3ResultsEqual(r1, r2 []*querypb.QueryResult) bool { + if len(r1) != len(r2) { + return false + } + for i, r := range r1 { + if !proto.Equal(r, r2[i]) { + return false + } + } + return true +} + +// Proto3QueryResponsesEqual compares two arrays of proto3 QueryResponse. +// reflect.DeepEqual shouldn't be used because of the protos. +func Proto3QueryResponsesEqual(r1, r2 []*querypb.ResultWithError) bool { + if len(r1) != len(r2) { + return false + } + for i, r := range r1 { + if !proto.Equal(r, r2[i]) { + return false + } + } + return true +} + +// Proto3ValuesEqual compares two arrays of proto3 Value. +func Proto3ValuesEqual(v1, v2 []*querypb.Value) bool { + if len(v1) != len(v2) { + return false + } + for i, v := range v1 { + if !proto.Equal(v, v2[i]) { + return false + } + } + return true +} diff --git a/internal/stackql-parser-fork/go/sqltypes/proto3_test.go b/internal/stackql-parser-fork/go/sqltypes/proto3_test.go new file mode 100644 index 00000000..98bfabc4 --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/proto3_test.go @@ -0,0 +1,247 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "testing" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" + "github.com/stackql/stackql-parser/go/vt/vterrors" + + "github.com/golang/protobuf/proto" +) + +func TestResult(t *testing.T) { + fields := []*querypb.Field{{ + Name: "col1", + Type: VarChar, + }, { + Name: "col2", + Type: Int64, + }, { + Name: "col3", + Type: Float64, + }} + sqlResult := &Result{ + Fields: fields, + InsertID: 1, + RowsAffected: 2, + Rows: [][]Value{{ + TestValue(VarChar, "aa"), + TestValue(Int64, "1"), + TestValue(Float64, "2"), + }, { + MakeTrusted(VarChar, []byte("bb")), + NULL, + NULL, + }}, + } + p3Result := &querypb.QueryResult{ + Fields: fields, + InsertId: 1, + RowsAffected: 2, + Rows: []*querypb.Row{{ + Lengths: []int64{2, 1, 1}, + Values: []byte("aa12"), + }, { + Lengths: []int64{2, -1, -1}, + Values: []byte("bb"), + }}, + } + p3converted := ResultToProto3(sqlResult) + if !proto.Equal(p3converted, p3Result) { + t.Errorf("P3:\n%v, want\n%v", p3converted, p3Result) + } + + reverse := Proto3ToResult(p3Result) + if !reverse.Equal(sqlResult) { + t.Errorf("reverse:\n%#v, want\n%#v", reverse, sqlResult) + } + + // Test custom fields. + fields[1].Type = VarBinary + sqlResult.Rows[0][1] = TestValue(VarBinary, "1") + reverse = CustomProto3ToResult(fields, p3Result) + if !reverse.Equal(sqlResult) { + t.Errorf("reverse:\n%#v, want\n%#v", reverse, sqlResult) + } +} + +func TestResults(t *testing.T) { + fields1 := []*querypb.Field{{ + Name: "col1", + Type: VarChar, + }, { + Name: "col2", + Type: Int64, + }, { + Name: "col3", + Type: Float64, + }} + fields2 := []*querypb.Field{{ + Name: "col11", + Type: VarChar, + }, { + Name: "col12", + Type: Int64, + }, { + Name: "col13", + Type: Float64, + }} + sqlResults := []Result{{ + Fields: fields1, + InsertID: 1, + RowsAffected: 2, + Rows: [][]Value{{ + TestValue(VarChar, "aa"), + TestValue(Int64, "1"), + TestValue(Float64, "2"), + }}, + }, { + Fields: fields2, + InsertID: 3, + RowsAffected: 4, + Rows: [][]Value{{ + TestValue(VarChar, "bb"), + TestValue(Int64, "3"), + TestValue(Float64, "4"), + }}, + }} + p3Results := []*querypb.QueryResult{{ + Fields: fields1, + InsertId: 1, + RowsAffected: 2, + Rows: []*querypb.Row{{ + Lengths: []int64{2, 1, 1}, + Values: []byte("aa12"), + }}, + }, { + Fields: fields2, + InsertId: 3, + RowsAffected: 4, + Rows: []*querypb.Row{{ + Lengths: []int64{2, 1, 1}, + Values: []byte("bb34"), + }}, + }} + p3converted := ResultsToProto3(sqlResults) + if !Proto3ResultsEqual(p3converted, p3Results) { + t.Errorf("P3:\n%v, want\n%v", p3converted, p3Results) + } + + reverse := Proto3ToResults(p3Results) + if !ResultsEqual(reverse, sqlResults) { + t.Errorf("reverse:\n%#v, want\n%#v", reverse, sqlResults) + } +} + +func TestQueryReponses(t *testing.T) { + fields1 := []*querypb.Field{{ + Name: "col1", + Type: VarChar, + }, { + Name: "col2", + Type: Int64, + }, { + Name: "col3", + Type: Float64, + }} + fields2 := []*querypb.Field{{ + Name: "col11", + Type: VarChar, + }, { + Name: "col12", + Type: Int64, + }, { + Name: "col13", + Type: Float64, + }} + + queryResponses := []QueryResponse{ + { + QueryResult: &Result{ + Fields: fields1, + InsertID: 1, + RowsAffected: 2, + Rows: [][]Value{{ + TestValue(VarChar, "aa"), + TestValue(Int64, "1"), + TestValue(Float64, "2"), + }}, + }, + QueryError: nil, + }, { + QueryResult: &Result{ + Fields: fields2, + InsertID: 3, + RowsAffected: 4, + Rows: [][]Value{{ + TestValue(VarChar, "bb"), + TestValue(Int64, "3"), + TestValue(Float64, "4"), + }}, + }, + QueryError: nil, + }, { + QueryResult: nil, + QueryError: vterrors.New(vtrpcpb.Code_DEADLINE_EXCEEDED, "deadline exceeded"), + }, + } + + p3ResultWithError := []*querypb.ResultWithError{ + { + Error: nil, + Result: &querypb.QueryResult{ + Fields: fields1, + InsertId: 1, + RowsAffected: 2, + Rows: []*querypb.Row{{ + Lengths: []int64{2, 1, 1}, + Values: []byte("aa12"), + }}, + }, + }, { + Error: nil, + Result: &querypb.QueryResult{ + Fields: fields2, + InsertId: 3, + RowsAffected: 4, + Rows: []*querypb.Row{{ + Lengths: []int64{2, 1, 1}, + Values: []byte("bb34"), + }}, + }, + }, { + Error: &vtrpcpb.RPCError{ + LegacyCode: vtrpcpb.LegacyErrorCode_DEADLINE_EXCEEDED_LEGACY, + Message: "deadline exceeded", + Code: vtrpcpb.Code_DEADLINE_EXCEEDED, + }, + Result: nil, + }, + } + p3converted := QueryResponsesToProto3(queryResponses) + if !Proto3QueryResponsesEqual(p3converted, p3ResultWithError) { + t.Errorf("P3:\n%v, want\n%v", p3converted, p3ResultWithError) + } + + reverse := Proto3ToQueryReponses(p3ResultWithError) + if !QueryResponsesEqual(reverse, queryResponses) { + t.Errorf("reverse:\n%#v, want\n%#v", reverse, queryResponses) + } +} diff --git a/internal/stackql-parser-fork/go/sqltypes/query_response.go b/internal/stackql-parser-fork/go/sqltypes/query_response.go new file mode 100644 index 00000000..04c6d229 --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/query_response.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "github.com/stackql/stackql-parser/go/vt/vterrors" +) + +// QueryResponse represents a query response for ExecuteBatch. +type QueryResponse struct { + QueryResult *Result + QueryError error +} + +// QueryResponsesEqual compares two arrays of QueryResponse. +// They contain protos, so we cannot use reflect.DeepEqual. +func QueryResponsesEqual(r1, r2 []QueryResponse) bool { + if len(r1) != len(r2) { + return false + } + for i, r := range r1 { + if !r.QueryResult.Equal(r2[i].QueryResult) { + return false + } + if !vterrors.Equals(r.QueryError, r2[i].QueryError) { + return false + } + } + return true +} diff --git a/internal/stackql-parser-fork/go/sqltypes/result.go b/internal/stackql-parser-fork/go/sqltypes/result.go new file mode 100644 index 00000000..999f20dc --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/result.go @@ -0,0 +1,221 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "reflect" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" + + "github.com/golang/protobuf/proto" +) + +// Result represents a query result. +type Result struct { + Fields []*querypb.Field `json:"fields"` + RowsAffected uint64 `json:"rows_affected"` + InsertID uint64 `json:"insert_id"` + Rows [][]Value `json:"rows"` +} + +// ResultStream is an interface for receiving Result. It is used for +// RPC interfaces. +type ResultStream interface { + // Recv returns the next result on the stream. + // It will return io.EOF if the stream ended. + Recv() (*Result, error) +} + +// Repair fixes the type info in the rows +// to conform to the supplied field types. +func (result *Result) Repair(fields []*querypb.Field) { + // Usage of j is intentional. + for j, f := range fields { + for _, r := range result.Rows { + if r[j].typ != Null { + r[j].typ = f.Type + } + } + } +} + +// Copy creates a deep copy of Result. +func (result *Result) Copy() *Result { + out := &Result{ + InsertID: result.InsertID, + RowsAffected: result.RowsAffected, + } + if result.Fields != nil { + fieldsp := make([]*querypb.Field, len(result.Fields)) + fields := make([]querypb.Field, len(result.Fields)) + for i, f := range result.Fields { + fields[i] = *f + fieldsp[i] = &fields[i] + } + out.Fields = fieldsp + } + if result.Rows != nil { + out.Rows = make([][]Value, 0, len(result.Rows)) + for _, r := range result.Rows { + out.Rows = append(out.Rows, CopyRow(r)) + } + } + return out +} + +// CopyRow makes a copy of the row. +func CopyRow(r []Value) []Value { + // The raw bytes of the values are supposed to be treated as read-only. + // So, there's no need to copy them. + out := make([]Value, len(r)) + copy(out, r) + return out +} + +// Truncate returns a new Result with all the rows truncated +// to the specified number of columns. +func (result *Result) Truncate(l int) *Result { + if l == 0 { + return result + } + + out := &Result{ + InsertID: result.InsertID, + RowsAffected: result.RowsAffected, + } + if result.Fields != nil { + out.Fields = result.Fields[:l] + } + if result.Rows != nil { + out.Rows = make([][]Value, 0, len(result.Rows)) + for _, r := range result.Rows { + out.Rows = append(out.Rows, r[:l]) + } + } + return out +} + +// FieldsEqual compares two arrays of fields. +// reflect.DeepEqual shouldn't be used because of the protos. +func FieldsEqual(f1, f2 []*querypb.Field) bool { + if len(f1) != len(f2) { + return false + } + for i, f := range f1 { + if !proto.Equal(f, f2[i]) { + return false + } + } + return true +} + +// Equal compares the Result with another one. +// reflect.DeepEqual shouldn't be used because of the protos. +func (result *Result) Equal(other *Result) bool { + // Check for nil cases + if result == nil { + return other == nil + } + if other == nil { + return false + } + + // Compare Fields, RowsAffected, InsertID, Rows. + return FieldsEqual(result.Fields, other.Fields) && + result.RowsAffected == other.RowsAffected && + result.InsertID == other.InsertID && + reflect.DeepEqual(result.Rows, other.Rows) +} + +// ResultsEqual compares two arrays of Result. +// reflect.DeepEqual shouldn't be used because of the protos. +func ResultsEqual(r1, r2 []Result) bool { + if len(r1) != len(r2) { + return false + } + for i, r := range r1 { + if !r.Equal(&r2[i]) { + return false + } + } + return true +} + +// MakeRowTrusted converts a *querypb.Row to []Value based on the types +// in fields. It does not sanity check the values against the type. +// Every place this function is called, a comment is needed that explains +// why it's justified. +func MakeRowTrusted(fields []*querypb.Field, row *querypb.Row) []Value { + sqlRow := make([]Value, len(row.Lengths)) + var offset int64 + for i, length := range row.Lengths { + if length < 0 { + continue + } + sqlRow[i] = MakeTrusted(fields[i].Type, row.Values[offset:offset+length]) + offset += length + } + return sqlRow +} + +// IncludeFieldsOrDefault normalizes the passed Execution Options. +// It returns the default value if options is nil. +func IncludeFieldsOrDefault(options *querypb.ExecuteOptions) querypb.ExecuteOptions_IncludedFields { + if options == nil { + return querypb.ExecuteOptions_TYPE_AND_NAME + } + + return options.IncludedFields +} + +// StripMetadata will return a new Result that has the same Rows, +// but the Field objects will have their non-critical metadata emptied. Note we don't +// proto.Copy each Field for performance reasons, but we only copy the +// individual fields. +func (result *Result) StripMetadata(incl querypb.ExecuteOptions_IncludedFields) *Result { + if incl == querypb.ExecuteOptions_ALL || len(result.Fields) == 0 { + return result + } + r := *result + r.Fields = make([]*querypb.Field, len(result.Fields)) + newFieldsArray := make([]querypb.Field, len(result.Fields)) + for i, f := range result.Fields { + r.Fields[i] = &newFieldsArray[i] + newFieldsArray[i].Type = f.Type + if incl == querypb.ExecuteOptions_TYPE_AND_NAME { + newFieldsArray[i].Name = f.Name + } + } + return &r +} + +// AppendResult will combine the Results Objects of one result +// to another result.Note currently it doesn't handle cases like +// if two results have different fields.We will enhance this function. +func (result *Result) AppendResult(src *Result) { + if src.RowsAffected == 0 && len(src.Fields) == 0 { + return + } + if result.Fields == nil { + result.Fields = src.Fields + } + result.RowsAffected += src.RowsAffected + if src.InsertID != 0 { + result.InsertID = src.InsertID + } + result.Rows = append(result.Rows, src.Rows...) +} diff --git a/internal/stackql-parser-fork/go/sqltypes/result_test.go b/internal/stackql-parser-fork/go/sqltypes/result_test.go new file mode 100644 index 00000000..aa7f74d3 --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/result_test.go @@ -0,0 +1,298 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "reflect" + "testing" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +func TestRepair(t *testing.T) { + fields := []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }} + in := Result{ + Rows: [][]Value{ + {TestValue(VarBinary, "1"), TestValue(VarBinary, "aa")}, + {TestValue(VarBinary, "2"), TestValue(VarBinary, "bb")}, + }, + } + want := Result{ + Rows: [][]Value{ + {TestValue(Int64, "1"), TestValue(VarChar, "aa")}, + {TestValue(Int64, "2"), TestValue(VarChar, "bb")}, + }, + } + in.Repair(fields) + if !reflect.DeepEqual(in, want) { + t.Errorf("Repair:\n%#v, want\n%#v", in, want) + } +} + +func TestCopy(t *testing.T) { + in := &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }}, + InsertID: 1, + RowsAffected: 2, + Rows: [][]Value{ + {TestValue(Int64, "1"), MakeTrusted(Null, nil)}, + {TestValue(Int64, "2"), MakeTrusted(VarChar, nil)}, + {TestValue(Int64, "3"), TestValue(VarChar, "")}, + }, + } + out := in.Copy() + if !reflect.DeepEqual(out, in) { + t.Errorf("Copy:\n%v, want\n%v", out, in) + } +} + +func TestTruncate(t *testing.T) { + in := &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }}, + InsertID: 1, + RowsAffected: 2, + Rows: [][]Value{ + {TestValue(Int64, "1"), MakeTrusted(Null, nil)}, + {TestValue(Int64, "2"), MakeTrusted(VarChar, nil)}, + {TestValue(Int64, "3"), TestValue(VarChar, "")}, + }, + } + + out := in.Truncate(0) + if !reflect.DeepEqual(out, in) { + t.Errorf("Truncate(0):\n%v, want\n%v", out, in) + } + + out = in.Truncate(1) + want := &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + }}, + InsertID: 1, + RowsAffected: 2, + Rows: [][]Value{ + {TestValue(Int64, "1")}, + {TestValue(Int64, "2")}, + {TestValue(Int64, "3")}, + }, + } + if !reflect.DeepEqual(out, want) { + t.Errorf("Truncate(1):\n%v, want\n%v", out, want) + } +} + +func TestStripMetaData(t *testing.T) { + testcases := []struct { + name string + in *Result + expected *Result + includedFields querypb.ExecuteOptions_IncludedFields + }{{ + name: "no fields", + in: &Result{}, + expected: &Result{}, + }, { + name: "empty fields", + in: &Result{ + Fields: []*querypb.Field{}, + }, + expected: &Result{ + Fields: []*querypb.Field{}, + }, + }, { + name: "no name", + includedFields: querypb.ExecuteOptions_TYPE_ONLY, + in: &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }}, + }, + expected: &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }}, + }, + }, { + name: "names", + includedFields: querypb.ExecuteOptions_TYPE_ONLY, + in: &Result{ + Fields: []*querypb.Field{{ + Name: "field1", + Type: Int64, + }, { + Name: "field2", + Type: VarChar, + }}, + }, + expected: &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }}, + }, + }, { + name: "all fields - strip to type", + includedFields: querypb.ExecuteOptions_TYPE_ONLY, + in: &Result{ + Fields: []*querypb.Field{{ + Name: "field1", + Table: "table1", + OrgTable: "orgtable1", + OrgName: "orgname1", + ColumnLength: 5, + Charset: 63, + Decimals: 0, + Flags: 2, + Type: Int64, + }, { + Name: "field2", + Table: "table2", + OrgTable: "orgtable2", + OrgName: "orgname2", + ColumnLength: 5, + Charset: 63, + Decimals: 0, + Flags: 2, + Type: VarChar, + }}, + }, + expected: &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }}, + }, + }, { + name: "all fields - not stripped", + includedFields: querypb.ExecuteOptions_ALL, + in: &Result{ + Fields: []*querypb.Field{{ + Name: "field1", + Table: "table1", + OrgTable: "orgtable1", + OrgName: "orgname1", + ColumnLength: 5, + Charset: 63, + Decimals: 0, + Flags: 2, + Type: Int64, + }, { + Name: "field2", + Table: "table2", + OrgTable: "orgtable2", + OrgName: "orgname2", + ColumnLength: 5, + Charset: 63, + Decimals: 0, + Flags: 2, + Type: VarChar, + }}, + }, + expected: &Result{ + Fields: []*querypb.Field{{ + Name: "field1", + Table: "table1", + OrgTable: "orgtable1", + OrgName: "orgname1", + ColumnLength: 5, + Charset: 63, + Decimals: 0, + Flags: 2, + Type: Int64, + }, { + Name: "field2", + Table: "table2", + OrgTable: "orgtable2", + OrgName: "orgname2", + ColumnLength: 5, + Charset: 63, + Decimals: 0, + Flags: 2, + Type: VarChar, + }}, + }, + }, { + name: "all fields - strip to type and name", + in: &Result{ + Fields: []*querypb.Field{{ + Name: "field1", + Table: "table1", + OrgTable: "orgtable1", + OrgName: "orgname1", + ColumnLength: 5, + Charset: 63, + Decimals: 0, + Flags: 2, + Type: Int64, + }, { + Name: "field2", + Table: "table2", + OrgTable: "orgtable2", + OrgName: "orgname2", + ColumnLength: 5, + Charset: 63, + Decimals: 0, + Flags: 2, + Type: VarChar, + }}, + }, + expected: &Result{ + Fields: []*querypb.Field{{ + Name: "field1", + Type: Int64, + }, { + Name: "field2", + Type: VarChar, + }}, + }, + }} + for _, tcase := range testcases { + inCopy := tcase.in.Copy() + out := inCopy.StripMetadata(tcase.includedFields) + if !reflect.DeepEqual(out, tcase.expected) { + t.Errorf("StripMetaData unexpected result for %v: %v", tcase.name, out) + } + if len(tcase.in.Fields) > 0 { + // check the out array is different than the in array. + if out.Fields[0] == inCopy.Fields[0] && tcase.includedFields != querypb.ExecuteOptions_ALL { + t.Errorf("StripMetaData modified original Field for %v", tcase.name) + } + } + // check we didn't change the original result. + if !reflect.DeepEqual(tcase.in, inCopy) { + t.Error("StripMetaData modified original result") + } + } +} diff --git a/internal/stackql-parser-fork/go/sqltypes/testing.go b/internal/stackql-parser-fork/go/sqltypes/testing.go new file mode 100644 index 00000000..f3de549f --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/testing.go @@ -0,0 +1,160 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "bytes" + "fmt" + "strings" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +// Functions in this file should only be used for testing. +// This is an experiment to see if test code bloat can be +// reduced and readability improved. + +// MakeTestFields builds a []*querypb.Field for testing. +// +// fields := sqltypes.MakeTestFields( +// "a|b", +// "int64|varchar", +// ) +// +// The field types are as defined in querypb and are case +// insensitive. Column delimiters must be used only to sepearate +// strings and not at the beginning or the end. +func MakeTestFields(names, types string) []*querypb.Field { + n := split(names) + t := split(types) + var fields []*querypb.Field + for i := range n { + fields = append(fields, &querypb.Field{ + Name: n[i], + Type: querypb.Type(querypb.Type_value[strings.ToUpper(t[i])]), + }) + } + return fields +} + +// MakeTestResult builds a *sqltypes.Result object for testing. +// +// result := sqltypes.MakeTestResult( +// fields, +// " 1|a", +// "10|abcd", +// ) +// +// The field type values are set as the types for the rows built. +// Spaces are trimmed from row values. "null" is treated as NULL. +func MakeTestResult(fields []*querypb.Field, rows ...string) *Result { + result := &Result{ + Fields: fields, + } + if len(rows) > 0 { + result.Rows = make([][]Value, len(rows)) + } + for i, row := range rows { + result.Rows[i] = make([]Value, len(fields)) + for j, col := range split(row) { + if col == "null" { + continue + } + result.Rows[i][j] = MakeTrusted(fields[j].Type, []byte(col)) + } + } + result.RowsAffected = uint64(len(result.Rows)) + return result +} + +// MakeTestStreamingResults builds a list of results for streaming. +// +// results := sqltypes.MakeStreamingResults( +// fields, +// "1|a", +// "2|b", +// "---", +// "c|c", +// ) +// +// The first result contains only the fields. Subsequent results +// are built using the field types. Every input that starts with a "-" +// is treated as streaming delimiter for one result. A final +// delimiter must not be supplied. +func MakeTestStreamingResults(fields []*querypb.Field, rows ...string) []*Result { + var results []*Result + results = append(results, &Result{Fields: fields}) + start := 0 + cur := 0 + // Add a final streaming delimiter to simplify the loop below. + rows = append(rows, "-") + for cur < len(rows) { + if rows[cur][0] != '-' { + cur++ + continue + } + result := MakeTestResult(fields, rows[start:cur]...) + result.Fields = nil + result.RowsAffected = 0 + results = append(results, result) + start = cur + 1 + cur = start + } + return results +} + +// TestBindVariable makes a *querypb.BindVariable from +// an interface{}.It panics on invalid input. +// This function should only be used for testing. +func TestBindVariable(v interface{}) *querypb.BindVariable { + if v == nil { + return NullBindVariable + } + bv, err := BuildBindVariable(v) + if err != nil { + panic(err) + } + return bv +} + +// TestValue builds a Value from typ and val. +// This function should only be used for testing. +func TestValue(typ querypb.Type, val string) Value { + return MakeTrusted(typ, []byte(val)) +} + +// PrintResults prints []*Results into a string. +// This function should only be used for testing. +func PrintResults(results []*Result) string { + b := new(bytes.Buffer) + for i, r := range results { + if i == 0 { + fmt.Fprintf(b, "%v", r) + continue + } + fmt.Fprintf(b, ", %v", r) + } + return b.String() +} + +func split(str string) []string { + splits := strings.Split(str, "|") + for i, v := range splits { + splits[i] = strings.TrimSpace(v) + } + return splits +} diff --git a/internal/stackql-parser-fork/go/sqltypes/type.go b/internal/stackql-parser-fork/go/sqltypes/type.go new file mode 100644 index 00000000..b63be31a --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/type.go @@ -0,0 +1,313 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "fmt" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +// This file provides wrappers and support +// functions for querypb.Type. + +// These bit flags can be used to query on the +// common properties of types. +const ( + flagIsIntegral = int(querypb.Flag_ISINTEGRAL) + flagIsUnsigned = int(querypb.Flag_ISUNSIGNED) + flagIsFloat = int(querypb.Flag_ISFLOAT) + flagIsQuoted = int(querypb.Flag_ISQUOTED) + flagIsText = int(querypb.Flag_ISTEXT) + flagIsBinary = int(querypb.Flag_ISBINARY) +) + +// IsIntegral returns true if querypb.Type is an integral +// (signed/unsigned) that can be represented using +// up to 64 binary bits. +// If you have a Value object, use its member function. +func IsIntegral(t querypb.Type) bool { + return int(t)&flagIsIntegral == flagIsIntegral +} + +// IsSigned returns true if querypb.Type is a signed integral. +// If you have a Value object, use its member function. +func IsSigned(t querypb.Type) bool { + return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral +} + +// IsUnsigned returns true if querypb.Type is an unsigned integral. +// Caution: this is not the same as !IsSigned. +// If you have a Value object, use its member function. +func IsUnsigned(t querypb.Type) bool { + return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral|flagIsUnsigned +} + +// IsFloat returns true is querypb.Type is a floating point. +// If you have a Value object, use its member function. +func IsFloat(t querypb.Type) bool { + return int(t)&flagIsFloat == flagIsFloat +} + +// IsQuoted returns true if querypb.Type is a quoted text or binary. +// If you have a Value object, use its member function. +func IsQuoted(t querypb.Type) bool { + return (int(t)&flagIsQuoted == flagIsQuoted) && t != Bit +} + +// IsText returns true if querypb.Type is a text. +// If you have a Value object, use its member function. +func IsText(t querypb.Type) bool { + return int(t)&flagIsText == flagIsText +} + +// IsBinary returns true if querypb.Type is a binary. +// If you have a Value object, use its member function. +func IsBinary(t querypb.Type) bool { + return int(t)&flagIsBinary == flagIsBinary +} + +// IsNumber returns true if the type is any type of number. +func IsNumber(t querypb.Type) bool { + return IsIntegral(t) || IsFloat(t) || t == Decimal +} + +// Vitess data types. These are idiomatically +// named synonyms for the querypb.Type values. +// Although these constants are interchangeable, +// they should be treated as different from querypb.Type. +// Use the synonyms only to refer to the type in Value. +// For proto variables, use the querypb.Type constants +// instead. +// The following conditions are non-overlapping +// and cover all types: IsSigned(), IsUnsigned(), +// IsFloat(), IsQuoted(), Null, Decimal, Expression, Bit +// Also, IsIntegral() == (IsSigned()||IsUnsigned()). +// TestCategory needs to be updated accordingly if +// you add a new type. +// If IsBinary or IsText is true, then IsQuoted is +// also true. But there are IsQuoted types that are +// neither binary or text. +// querypb.Type_TUPLE is not included in this list +// because it's not a valid Value type. +// TODO(sougou): provide a categorization function +// that returns enums, which will allow for cleaner +// switch statements for those who want to cover types +// by their category. +const ( + Null = querypb.Type_NULL_TYPE + Int8 = querypb.Type_INT8 + Uint8 = querypb.Type_UINT8 + Int16 = querypb.Type_INT16 + Uint16 = querypb.Type_UINT16 + Int24 = querypb.Type_INT24 + Uint24 = querypb.Type_UINT24 + Int32 = querypb.Type_INT32 + Uint32 = querypb.Type_UINT32 + Int64 = querypb.Type_INT64 + Uint64 = querypb.Type_UINT64 + Float32 = querypb.Type_FLOAT32 + Float64 = querypb.Type_FLOAT64 + Timestamp = querypb.Type_TIMESTAMP + Date = querypb.Type_DATE + Time = querypb.Type_TIME + Datetime = querypb.Type_DATETIME + Year = querypb.Type_YEAR + Decimal = querypb.Type_DECIMAL + Text = querypb.Type_TEXT + Blob = querypb.Type_BLOB + VarChar = querypb.Type_VARCHAR + VarBinary = querypb.Type_VARBINARY + Char = querypb.Type_CHAR + Binary = querypb.Type_BINARY + Bit = querypb.Type_BIT + Enum = querypb.Type_ENUM + Set = querypb.Type_SET + Geometry = querypb.Type_GEOMETRY + TypeJSON = querypb.Type_JSON + Expression = querypb.Type_EXPRESSION +) + +// bit-shift the mysql flags by two byte so we +// can merge them with the mysql or vitess types. +const ( + mysqlUnsigned = 32 + mysqlBinary = 128 + mysqlEnum = 256 + mysqlSet = 2048 +) + +// If you add to this map, make sure you add a test case +// in tabletserver/endtoend. +var mysqlToType = map[int64]querypb.Type{ + 0: Decimal, + 1: Int8, + 2: Int16, + 3: Int32, + 4: Float32, + 5: Float64, + 6: Null, + 7: Timestamp, + 8: Int64, + 9: Int24, + 10: Date, + 11: Time, + 12: Datetime, + 13: Year, + 15: VarChar, + 16: Bit, + 17: Timestamp, + 18: Datetime, + 19: Time, + 245: TypeJSON, + 246: Decimal, + 247: Enum, + 248: Set, + 249: Text, + 250: Text, + 251: Text, + 252: Text, + 253: VarChar, + 254: Char, + 255: Geometry, +} + +// modifyType modifies the vitess type based on the +// mysql flag. The function checks specific flags based +// on the type. This allows us to ignore stray flags +// that MySQL occasionally sets. +func modifyType(typ querypb.Type, flags int64) querypb.Type { + switch typ { + case Int8: + if flags&mysqlUnsigned != 0 { + return Uint8 + } + return Int8 + case Int16: + if flags&mysqlUnsigned != 0 { + return Uint16 + } + return Int16 + case Int32: + if flags&mysqlUnsigned != 0 { + return Uint32 + } + return Int32 + case Int64: + if flags&mysqlUnsigned != 0 { + return Uint64 + } + return Int64 + case Int24: + if flags&mysqlUnsigned != 0 { + return Uint24 + } + return Int24 + case Text: + if flags&mysqlBinary != 0 { + return Blob + } + return Text + case VarChar: + if flags&mysqlBinary != 0 { + return VarBinary + } + return VarChar + case Char: + if flags&mysqlBinary != 0 { + return Binary + } + if flags&mysqlEnum != 0 { + return Enum + } + if flags&mysqlSet != 0 { + return Set + } + return Char + } + return typ +} + +// MySQLToType computes the vitess type from mysql type and flags. +func MySQLToType(mysqlType, flags int64) (typ querypb.Type, err error) { + result, ok := mysqlToType[mysqlType] + if !ok { + return 0, fmt.Errorf("unsupported type: %d", mysqlType) + } + return modifyType(result, flags), nil +} + +// TypeEquivalenceCheck returns whether two types are equivalent. +func AreTypesEquivalent(mysqlTypeFromBinlog, mysqlTypeFromSchema querypb.Type) bool { + return (mysqlTypeFromBinlog == mysqlTypeFromSchema) || + (mysqlTypeFromBinlog == VarChar && mysqlTypeFromSchema == VarBinary) || + // Binlog only has base type. But doesn't have per-column-flags to differentiate + // various logical types. For Binary, Enum, Set types, binlog only returns Char + // as data type. + (mysqlTypeFromBinlog == Char && mysqlTypeFromSchema == Binary) || + (mysqlTypeFromBinlog == Char && mysqlTypeFromSchema == Enum) || + (mysqlTypeFromBinlog == Char && mysqlTypeFromSchema == Set) || + (mysqlTypeFromBinlog == Text && mysqlTypeFromSchema == Blob) || + (mysqlTypeFromBinlog == Int8 && mysqlTypeFromSchema == Uint8) || + (mysqlTypeFromBinlog == Int16 && mysqlTypeFromSchema == Uint16) || + (mysqlTypeFromBinlog == Int24 && mysqlTypeFromSchema == Uint24) || + (mysqlTypeFromBinlog == Int32 && mysqlTypeFromSchema == Uint32) || + (mysqlTypeFromBinlog == Int64 && mysqlTypeFromSchema == Uint64) +} + +// typeToMySQL is the reverse of mysqlToType. +var typeToMySQL = map[querypb.Type]struct { + typ int64 + flags int64 +}{ + Int8: {typ: 1}, + Uint8: {typ: 1, flags: mysqlUnsigned}, + Int16: {typ: 2}, + Uint16: {typ: 2, flags: mysqlUnsigned}, + Int32: {typ: 3}, + Uint32: {typ: 3, flags: mysqlUnsigned}, + Float32: {typ: 4}, + Float64: {typ: 5}, + Null: {typ: 6, flags: mysqlBinary}, + Timestamp: {typ: 7}, + Int64: {typ: 8}, + Uint64: {typ: 8, flags: mysqlUnsigned}, + Int24: {typ: 9}, + Uint24: {typ: 9, flags: mysqlUnsigned}, + Date: {typ: 10, flags: mysqlBinary}, + Time: {typ: 11, flags: mysqlBinary}, + Datetime: {typ: 12, flags: mysqlBinary}, + Year: {typ: 13, flags: mysqlUnsigned}, + Bit: {typ: 16, flags: mysqlUnsigned}, + TypeJSON: {typ: 245}, + Decimal: {typ: 246}, + Text: {typ: 252}, + Blob: {typ: 252, flags: mysqlBinary}, + VarChar: {typ: 253}, + VarBinary: {typ: 253, flags: mysqlBinary}, + Char: {typ: 254}, + Binary: {typ: 254, flags: mysqlBinary}, + Enum: {typ: 254, flags: mysqlEnum}, + Set: {typ: 254, flags: mysqlSet}, + Geometry: {typ: 255}, +} + +// TypeToMySQL returns the equivalent mysql type and flag for a vitess type. +func TypeToMySQL(typ querypb.Type) (mysqlType, flags int64) { + val := typeToMySQL[typ] + return val.typ, val.flags +} diff --git a/internal/stackql-parser-fork/go/sqltypes/type_test.go b/internal/stackql-parser-fork/go/sqltypes/type_test.go new file mode 100644 index 00000000..b298e1b4 --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/type_test.go @@ -0,0 +1,435 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "testing" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +func TestTypeValues(t *testing.T) { + testcases := []struct { + defined querypb.Type + expected int + }{{ + defined: Null, + expected: 0, + }, { + defined: Int8, + expected: 1 | flagIsIntegral, + }, { + defined: Uint8, + expected: 2 | flagIsIntegral | flagIsUnsigned, + }, { + defined: Int16, + expected: 3 | flagIsIntegral, + }, { + defined: Uint16, + expected: 4 | flagIsIntegral | flagIsUnsigned, + }, { + defined: Int24, + expected: 5 | flagIsIntegral, + }, { + defined: Uint24, + expected: 6 | flagIsIntegral | flagIsUnsigned, + }, { + defined: Int32, + expected: 7 | flagIsIntegral, + }, { + defined: Uint32, + expected: 8 | flagIsIntegral | flagIsUnsigned, + }, { + defined: Int64, + expected: 9 | flagIsIntegral, + }, { + defined: Uint64, + expected: 10 | flagIsIntegral | flagIsUnsigned, + }, { + defined: Float32, + expected: 11 | flagIsFloat, + }, { + defined: Float64, + expected: 12 | flagIsFloat, + }, { + defined: Timestamp, + expected: 13 | flagIsQuoted, + }, { + defined: Date, + expected: 14 | flagIsQuoted, + }, { + defined: Time, + expected: 15 | flagIsQuoted, + }, { + defined: Datetime, + expected: 16 | flagIsQuoted, + }, { + defined: Year, + expected: 17 | flagIsIntegral | flagIsUnsigned, + }, { + defined: Decimal, + expected: 18, + }, { + defined: Text, + expected: 19 | flagIsQuoted | flagIsText, + }, { + defined: Blob, + expected: 20 | flagIsQuoted | flagIsBinary, + }, { + defined: VarChar, + expected: 21 | flagIsQuoted | flagIsText, + }, { + defined: VarBinary, + expected: 22 | flagIsQuoted | flagIsBinary, + }, { + defined: Char, + expected: 23 | flagIsQuoted | flagIsText, + }, { + defined: Binary, + expected: 24 | flagIsQuoted | flagIsBinary, + }, { + defined: Bit, + expected: 25 | flagIsQuoted, + }, { + defined: Enum, + expected: 26 | flagIsQuoted, + }, { + defined: Set, + expected: 27 | flagIsQuoted, + }, { + defined: Geometry, + expected: 29 | flagIsQuoted, + }, { + defined: TypeJSON, + expected: 30 | flagIsQuoted, + }, { + defined: Expression, + expected: 31, + }} + for _, tcase := range testcases { + if int(tcase.defined) != tcase.expected { + t.Errorf("Type %s: %d, want: %d", tcase.defined, int(tcase.defined), tcase.expected) + } + } +} + +// TestCategory verifies that the type categorizations +// are non-overlapping and complete. +func TestCategory(t *testing.T) { + alltypes := []querypb.Type{ + Null, + Int8, + Uint8, + Int16, + Uint16, + Int24, + Uint24, + Int32, + Uint32, + Int64, + Uint64, + Float32, + Float64, + Timestamp, + Date, + Time, + Datetime, + Year, + Decimal, + Text, + Blob, + VarChar, + VarBinary, + Char, + Binary, + Bit, + Enum, + Set, + Geometry, + TypeJSON, + Expression, + } + for _, typ := range alltypes { + matched := false + if IsSigned(typ) { + if !IsIntegral(typ) { + t.Errorf("Signed type %v is not an integral", typ) + } + matched = true + } + if IsUnsigned(typ) { + if !IsIntegral(typ) { + t.Errorf("Unsigned type %v is not an integral", typ) + } + if matched { + t.Errorf("%v matched more than one category", typ) + } + matched = true + } + if IsFloat(typ) { + if matched { + t.Errorf("%v matched more than one category", typ) + } + matched = true + } + if IsQuoted(typ) { + if matched { + t.Errorf("%v matched more than one category", typ) + } + matched = true + } + if typ == Null || typ == Decimal || typ == Expression || typ == Bit { + if matched { + t.Errorf("%v matched more than one category", typ) + } + matched = true + } + if !matched { + t.Errorf("%v matched no category", typ) + } + } +} + +func TestIsFunctions(t *testing.T) { + if IsIntegral(Null) { + t.Error("Null: IsIntegral, must be false") + } + if !IsIntegral(Int64) { + t.Error("Int64: !IsIntegral, must be true") + } + if IsSigned(Uint64) { + t.Error("Uint64: IsSigned, must be false") + } + if !IsSigned(Int64) { + t.Error("Int64: !IsSigned, must be true") + } + if IsUnsigned(Int64) { + t.Error("Int64: IsUnsigned, must be false") + } + if !IsUnsigned(Uint64) { + t.Error("Uint64: !IsUnsigned, must be true") + } + if IsFloat(Int64) { + t.Error("Int64: IsFloat, must be false") + } + if !IsFloat(Float64) { + t.Error("Uint64: !IsFloat, must be true") + } + if IsQuoted(Int64) { + t.Error("Int64: IsQuoted, must be false") + } + if !IsQuoted(Binary) { + t.Error("Binary: !IsQuoted, must be true") + } + if IsText(Int64) { + t.Error("Int64: IsText, must be false") + } + if !IsText(Char) { + t.Error("Char: !IsText, must be true") + } + if IsBinary(Int64) { + t.Error("Int64: IsBinary, must be false") + } + if !IsBinary(Binary) { + t.Error("Char: !IsBinary, must be true") + } + if !IsNumber(Int64) { + t.Error("Int64: !isNumber, must be true") + } +} + +func TestTypeToMySQL(t *testing.T) { + v, f := TypeToMySQL(Bit) + if v != 16 { + t.Errorf("Bit: %d, want 16", v) + } + if f != mysqlUnsigned { + t.Errorf("Bit flag: %x, want %x", f, mysqlUnsigned) + } + v, f = TypeToMySQL(Date) + if v != 10 { + t.Errorf("Bit: %d, want 10", v) + } + if f != mysqlBinary { + t.Errorf("Bit flag: %x, want %x", f, mysqlBinary) + } +} + +func TestMySQLToType(t *testing.T) { + testcases := []struct { + intype int64 + inflags int64 + outtype querypb.Type + }{{ + intype: 1, + outtype: Int8, + }, { + intype: 1, + inflags: mysqlUnsigned, + outtype: Uint8, + }, { + intype: 2, + outtype: Int16, + }, { + intype: 2, + inflags: mysqlUnsigned, + outtype: Uint16, + }, { + intype: 3, + outtype: Int32, + }, { + intype: 3, + inflags: mysqlUnsigned, + outtype: Uint32, + }, { + intype: 4, + outtype: Float32, + }, { + intype: 5, + outtype: Float64, + }, { + intype: 6, + outtype: Null, + }, { + intype: 7, + outtype: Timestamp, + }, { + intype: 8, + outtype: Int64, + }, { + intype: 8, + inflags: mysqlUnsigned, + outtype: Uint64, + }, { + intype: 9, + outtype: Int24, + }, { + intype: 9, + inflags: mysqlUnsigned, + outtype: Uint24, + }, { + intype: 10, + outtype: Date, + }, { + intype: 11, + outtype: Time, + }, { + intype: 12, + outtype: Datetime, + }, { + intype: 13, + outtype: Year, + }, { + intype: 16, + outtype: Bit, + }, { + intype: 245, + outtype: TypeJSON, + }, { + intype: 246, + outtype: Decimal, + }, { + intype: 249, + outtype: Text, + }, { + intype: 250, + outtype: Text, + }, { + intype: 251, + outtype: Text, + }, { + intype: 252, + outtype: Text, + }, { + intype: 252, + inflags: mysqlBinary, + outtype: Blob, + }, { + intype: 253, + outtype: VarChar, + }, { + intype: 253, + inflags: mysqlBinary, + outtype: VarBinary, + }, { + intype: 254, + outtype: Char, + }, { + intype: 254, + inflags: mysqlBinary, + outtype: Binary, + }, { + intype: 254, + inflags: mysqlEnum, + outtype: Enum, + }, { + intype: 254, + inflags: mysqlSet, + outtype: Set, + }, { + intype: 255, + outtype: Geometry, + }, { + // Binary flag must be ignored. + intype: 8, + inflags: mysqlUnsigned | mysqlBinary, + outtype: Uint64, + }, { + // Unsigned flag must be ignored + intype: 252, + inflags: mysqlUnsigned | mysqlBinary, + outtype: Blob, + }} + for _, tcase := range testcases { + got, err := MySQLToType(tcase.intype, tcase.inflags) + if err != nil { + t.Error(err) + } + if got != tcase.outtype { + t.Errorf("MySQLToType(%d, %x): %v, want %v", tcase.intype, tcase.inflags, got, tcase.outtype) + } + } +} + +func TestTypeError(t *testing.T) { + _, err := MySQLToType(50, 0) + want := "unsupported type: 50" + if err == nil || err.Error() != want { + t.Errorf("MySQLToType: %v, want %s", err, want) + } +} + +func TestTypeEquivalenceCheck(t *testing.T) { + if !AreTypesEquivalent(Int16, Int16) { + t.Errorf("Int16 and Int16 are same types.") + } + if AreTypesEquivalent(Int16, Int24) { + t.Errorf("Int16 and Int24 are not same types.") + } + if !AreTypesEquivalent(VarChar, VarBinary) { + t.Errorf("VarChar in binlog and VarBinary in schema are equivalent types.") + } + if AreTypesEquivalent(VarBinary, VarChar) { + t.Errorf("VarBinary in binlog and VarChar in schema are not equivalent types.") + } + if !AreTypesEquivalent(Int16, Uint16) { + t.Errorf("Int16 in binlog and Uint16 in schema are equivalent types.") + } + if AreTypesEquivalent(Uint16, Int16) { + t.Errorf("Uint16 in binlog and Int16 in schema are not equivalent types.") + } +} diff --git a/internal/stackql-parser-fork/go/sqltypes/value.go b/internal/stackql-parser-fork/go/sqltypes/value.go new file mode 100644 index 00000000..4a82aff9 --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/value.go @@ -0,0 +1,398 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package sqltypes implements interfaces and types that represent SQL values. +package sqltypes + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/stackql/stackql-parser/go/bytes2" + "github.com/stackql/stackql-parser/go/hack" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +var ( + // NULL represents the NULL value. + NULL = Value{} + + // DontEscape tells you if a character should not be escaped. + DontEscape = byte(255) + + nullstr = []byte("null") +) + +// BinWriter interface is used for encoding values. +// Types like bytes.Buffer conform to this interface. +// We expect the writer objects to be in-memory buffers. +// So, we don't expect the write operations to fail. +type BinWriter interface { + Write([]byte) (int, error) +} + +// Value can store any SQL value. If the value represents +// an integral type, the bytes are always stored as a canonical +// representation that matches how MySQL returns such values. +type Value struct { + typ querypb.Type + val []byte +} + +// NewValue builds a Value using typ and val. If the value and typ +// don't match, it returns an error. +func NewValue(typ querypb.Type, val []byte) (v Value, err error) { + switch { + case IsSigned(typ): + if _, err := strconv.ParseInt(string(val), 0, 64); err != nil { + return NULL, err + } + return MakeTrusted(typ, val), nil + case IsUnsigned(typ): + if _, err := strconv.ParseUint(string(val), 0, 64); err != nil { + return NULL, err + } + return MakeTrusted(typ, val), nil + case IsFloat(typ) || typ == Decimal: + if _, err := strconv.ParseFloat(string(val), 64); err != nil { + return NULL, err + } + return MakeTrusted(typ, val), nil + case IsQuoted(typ) || typ == Bit || typ == Null: + return MakeTrusted(typ, val), nil + } + // All other types are unsafe or invalid. + return NULL, fmt.Errorf("invalid type specified for MakeValue: %v", typ) +} + +// MakeTrusted makes a new Value based on the type. +// This function should only be used if you know the value +// and type conform to the rules. Every place this function is +// called, a comment is needed that explains why it's justified. +// Exceptions: The current package and mysql package do not need +// comments. Other packages can also use the function to create +// VarBinary or VarChar values. +func MakeTrusted(typ querypb.Type, val []byte) Value { + + if typ == Null { + return NULL + } + + return Value{typ: typ, val: val} +} + +// NewInt64 builds an Int64 Value. +func NewInt64(v int64) Value { + return MakeTrusted(Int64, strconv.AppendInt(nil, v, 10)) +} + +// NewInt8 builds an Int8 Value. +func NewInt8(v int8) Value { + return MakeTrusted(Int8, strconv.AppendInt(nil, int64(v), 10)) +} + +// NewInt32 builds an Int64 Value. +func NewInt32(v int32) Value { + return MakeTrusted(Int32, strconv.AppendInt(nil, int64(v), 10)) +} + +// NewUint64 builds an Uint64 Value. +func NewUint64(v uint64) Value { + return MakeTrusted(Uint64, strconv.AppendUint(nil, v, 10)) +} + +// NewUint32 builds an Uint32 Value. +func NewUint32(v uint32) Value { + return MakeTrusted(Uint32, strconv.AppendUint(nil, uint64(v), 10)) +} + +// NewFloat64 builds an Float64 Value. +func NewFloat64(v float64) Value { + return MakeTrusted(Float64, strconv.AppendFloat(nil, v, 'g', -1, 64)) +} + +// NewVarChar builds a VarChar Value. +func NewVarChar(v string) Value { + return MakeTrusted(VarChar, []byte(v)) +} + +// NewVarBinary builds a VarBinary Value. +// The input is a string because it's the most common use case. +func NewVarBinary(v string) Value { + return MakeTrusted(VarBinary, []byte(v)) +} + +// NewIntegral builds an integral type from a string representation. +// The type will be Int64 or Uint64. Int64 will be preferred where possible. +func NewIntegral(val string) (n Value, err error) { + signed, err := strconv.ParseInt(val, 0, 64) + if err == nil { + return MakeTrusted(Int64, strconv.AppendInt(nil, signed, 10)), nil + } + unsigned, err := strconv.ParseUint(val, 0, 64) + if err != nil { + return Value{}, err + } + return MakeTrusted(Uint64, strconv.AppendUint(nil, unsigned, 10)), nil +} + +// InterfaceToValue builds a value from a go type. +// Supported types are nil, int64, uint64, float64, +// string and []byte. +// This function is deprecated. Use the type-specific +// functions instead. +func InterfaceToValue(goval interface{}) (Value, error) { + switch goval := goval.(type) { + case nil: + return NULL, nil + case []byte: + return MakeTrusted(VarBinary, goval), nil + case int64: + return NewInt64(goval), nil + case uint64: + return NewUint64(goval), nil + case float64: + return NewFloat64(goval), nil + case string: + return NewVarChar(goval), nil + default: + return NULL, fmt.Errorf("unexpected type %T: %v", goval, goval) + } +} + +// Type returns the type of Value. +func (v Value) Type() querypb.Type { + return v.typ +} + +// Raw returns the internal representation of the value. For newer types, +// this may not match MySQL's representation. +func (v Value) Raw() []byte { + return v.val +} + +// ToBytes returns the value as MySQL would return it as []byte. +// In contrast, Raw returns the internal representation of the Value, which may not +// match MySQL's representation for newer types. +// If the value is not convertible like in the case of Expression, it returns nil. +func (v Value) ToBytes() []byte { + if v.typ == Expression { + return nil + } + return v.val +} + +// Len returns the length. +func (v Value) Len() int { + return len(v.val) +} + +// ToString returns the value as MySQL would return it as string. +// If the value is not convertible like in the case of Expression, it returns nil. +func (v Value) ToString() string { + if v.typ == Expression { + return "" + } + return hack.String(v.val) +} + +// String returns a printable version of the value. +func (v Value) String() string { + if v.typ == Null { + return "NULL" + } + if v.IsQuoted() || v.typ == Bit { + return fmt.Sprintf("%v(%q)", v.typ, v.val) + } + return fmt.Sprintf("%v(%s)", v.typ, v.val) +} + +// EncodeSQL encodes the value into an SQL statement. Can be binary. +func (v Value) EncodeSQL(b BinWriter) { + switch { + case v.typ == Null: + b.Write(nullstr) + case v.IsQuoted(): + encodeBytesSQL(v.val, b) + case v.typ == Bit: + encodeBytesSQLBits(v.val, b) + default: + b.Write(v.val) + } +} + +// EncodeASCII encodes the value using 7-bit clean ascii bytes. +func (v Value) EncodeASCII(b BinWriter) { + switch { + case v.typ == Null: + b.Write(nullstr) + case v.IsQuoted() || v.typ == Bit: + encodeBytesASCII(v.val, b) + default: + b.Write(v.val) + } +} + +// IsNull returns true if Value is null. +func (v Value) IsNull() bool { + return v.typ == Null +} + +// IsIntegral returns true if Value is an integral. +func (v Value) IsIntegral() bool { + return IsIntegral(v.typ) +} + +// IsSigned returns true if Value is a signed integral. +func (v Value) IsSigned() bool { + return IsSigned(v.typ) +} + +// IsUnsigned returns true if Value is an unsigned integral. +func (v Value) IsUnsigned() bool { + return IsUnsigned(v.typ) +} + +// IsFloat returns true if Value is a float. +func (v Value) IsFloat() bool { + return IsFloat(v.typ) +} + +// IsQuoted returns true if Value must be SQL-quoted. +func (v Value) IsQuoted() bool { + return IsQuoted(v.typ) +} + +// IsText returns true if Value is a collatable text. +func (v Value) IsText() bool { + return IsText(v.typ) +} + +// IsBinary returns true if Value is binary. +func (v Value) IsBinary() bool { + return IsBinary(v.typ) +} + +// MarshalJSON should only be used for testing. +// It's not a complete implementation. +func (v Value) MarshalJSON() ([]byte, error) { + switch { + case v.IsQuoted() || v.typ == Bit: + return json.Marshal(v.ToString()) + case v.typ == Null: + return nullstr, nil + } + return v.val, nil +} + +// UnmarshalJSON should only be used for testing. +// It's not a complete implementation. +func (v *Value) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return fmt.Errorf("error unmarshaling empty bytes") + } + var val interface{} + var err error + switch b[0] { + case '-': + var ival int64 + err = json.Unmarshal(b, &ival) + val = ival + case '"': + var bval []byte + err = json.Unmarshal(b, &bval) + val = bval + case 'n': // null + err = json.Unmarshal(b, &val) + default: + var uval uint64 + err = json.Unmarshal(b, &uval) + val = uval + } + if err != nil { + return err + } + *v, err = InterfaceToValue(val) + return err +} + +func encodeBytesSQL(val []byte, b BinWriter) { + buf := &bytes2.Buffer{} + buf.WriteByte('\'') + for _, ch := range val { + if encodedChar := SQLEncodeMap[ch]; encodedChar == DontEscape { + buf.WriteByte(ch) + } else { + buf.WriteByte('\\') + buf.WriteByte(encodedChar) + } + } + buf.WriteByte('\'') + b.Write(buf.Bytes()) +} + +func encodeBytesSQLBits(val []byte, b BinWriter) { + fmt.Fprint(b, "b'") + for _, ch := range val { + fmt.Fprintf(b, "%08b", ch) + } + fmt.Fprint(b, "'") +} + +func encodeBytesASCII(val []byte, b BinWriter) { + buf := &bytes2.Buffer{} + buf.WriteByte('\'') + encoder := base64.NewEncoder(base64.StdEncoding, buf) + encoder.Write(val) + encoder.Close() + buf.WriteByte('\'') + b.Write(buf.Bytes()) +} + +// SQLEncodeMap specifies how to escape binary data with '\'. +// Complies to http://dev.mysql.com/doc/refman/5.1/en/string-syntax.html +var SQLEncodeMap [256]byte + +// SQLDecodeMap is the reverse of SQLEncodeMap +var SQLDecodeMap [256]byte + +var encodeRef = map[byte]byte{ + '\x00': '0', + '\'': '\'', + '"': '"', + '\b': 'b', + '\n': 'n', + '\r': 'r', + '\t': 't', + 26: 'Z', // ctl-Z + '\\': '\\', +} + +func init() { + for i := range SQLEncodeMap { + SQLEncodeMap[i] = DontEscape + SQLDecodeMap[i] = DontEscape + } + for i := range SQLEncodeMap { + if to, ok := encodeRef[byte(i)]; ok { + SQLEncodeMap[byte(i)] = to + SQLDecodeMap[to] = byte(i) + } + } +} diff --git a/internal/stackql-parser-fork/go/sqltypes/value_test.go b/internal/stackql-parser-fork/go/sqltypes/value_test.go new file mode 100644 index 00000000..98be2acd --- /dev/null +++ b/internal/stackql-parser-fork/go/sqltypes/value_test.go @@ -0,0 +1,412 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "bytes" + "reflect" + "strings" + "testing" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +const ( + InvalidNeg = "-9223372036854775809" + MinNeg = "-9223372036854775808" + MinPos = "18446744073709551615" + InvalidPos = "18446744073709551616" +) + +func TestNewValue(t *testing.T) { + testcases := []struct { + inType querypb.Type + inVal string + outVal Value + outErr string + }{{ + inType: Null, + inVal: "", + outVal: NULL, + }, { + inType: Int8, + inVal: "1", + outVal: TestValue(Int8, "1"), + }, { + inType: Int16, + inVal: "1", + outVal: TestValue(Int16, "1"), + }, { + inType: Int24, + inVal: "1", + outVal: TestValue(Int24, "1"), + }, { + inType: Int32, + inVal: "1", + outVal: TestValue(Int32, "1"), + }, { + inType: Int64, + inVal: "1", + outVal: TestValue(Int64, "1"), + }, { + inType: Uint8, + inVal: "1", + outVal: TestValue(Uint8, "1"), + }, { + inType: Uint16, + inVal: "1", + outVal: TestValue(Uint16, "1"), + }, { + inType: Uint24, + inVal: "1", + outVal: TestValue(Uint24, "1"), + }, { + inType: Uint32, + inVal: "1", + outVal: TestValue(Uint32, "1"), + }, { + inType: Uint64, + inVal: "1", + outVal: TestValue(Uint64, "1"), + }, { + inType: Float32, + inVal: "1.00", + outVal: TestValue(Float32, "1.00"), + }, { + inType: Float64, + inVal: "1.00", + outVal: TestValue(Float64, "1.00"), + }, { + inType: Decimal, + inVal: "1.00", + outVal: TestValue(Decimal, "1.00"), + }, { + inType: Timestamp, + inVal: "2012-02-24 23:19:43", + outVal: TestValue(Timestamp, "2012-02-24 23:19:43"), + }, { + inType: Date, + inVal: "2012-02-24", + outVal: TestValue(Date, "2012-02-24"), + }, { + inType: Time, + inVal: "23:19:43", + outVal: TestValue(Time, "23:19:43"), + }, { + inType: Datetime, + inVal: "2012-02-24 23:19:43", + outVal: TestValue(Datetime, "2012-02-24 23:19:43"), + }, { + inType: Year, + inVal: "1", + outVal: TestValue(Year, "1"), + }, { + inType: Text, + inVal: "a", + outVal: TestValue(Text, "a"), + }, { + inType: Blob, + inVal: "a", + outVal: TestValue(Blob, "a"), + }, { + inType: VarChar, + inVal: "a", + outVal: TestValue(VarChar, "a"), + }, { + inType: Binary, + inVal: "a", + outVal: TestValue(Binary, "a"), + }, { + inType: Char, + inVal: "a", + outVal: TestValue(Char, "a"), + }, { + inType: Bit, + inVal: "1", + outVal: TestValue(Bit, "1"), + }, { + inType: Enum, + inVal: "a", + outVal: TestValue(Enum, "a"), + }, { + inType: Set, + inVal: "a", + outVal: TestValue(Set, "a"), + }, { + inType: VarBinary, + inVal: "a", + outVal: TestValue(VarBinary, "a"), + }, { + inType: Int64, + inVal: InvalidNeg, + outErr: "out of range", + }, { + inType: Int64, + inVal: InvalidPos, + outErr: "out of range", + }, { + inType: Uint64, + inVal: "-1", + outErr: "invalid syntax", + }, { + inType: Uint64, + inVal: InvalidPos, + outErr: "out of range", + }, { + inType: Float64, + inVal: "a", + outErr: "invalid syntax", + }, { + inType: Expression, + inVal: "a", + outErr: "invalid type specified for MakeValue: EXPRESSION", + }} + for _, tcase := range testcases { + v, err := NewValue(tcase.inType, []byte(tcase.inVal)) + if tcase.outErr != "" { + if err == nil || !strings.Contains(err.Error(), tcase.outErr) { + t.Errorf("ValueFromBytes(%v, %v) error: %v, must contain %v", tcase.inType, tcase.inVal, err, tcase.outErr) + } + continue + } + if err != nil { + t.Errorf("ValueFromBytes(%v, %v) error: %v", tcase.inType, tcase.inVal, err) + continue + } + if !reflect.DeepEqual(v, tcase.outVal) { + t.Errorf("ValueFromBytes(%v, %v) = %v, want %v", tcase.inType, tcase.inVal, v, tcase.outVal) + } + } +} + +// TestNew tests 'New' functions that are not tested +// through other code paths. +func TestNew(t *testing.T) { + got := NewInt32(1) + want := MakeTrusted(Int32, []byte("1")) + if !reflect.DeepEqual(got, want) { + t.Errorf("NewInt32(aa): %v, want %v", got, want) + } + + got = NewVarBinary("aa") + want = MakeTrusted(VarBinary, []byte("aa")) + if !reflect.DeepEqual(got, want) { + t.Errorf("NewVarBinary(aa): %v, want %v", got, want) + } +} + +func TestMakeTrusted(t *testing.T) { + v := MakeTrusted(Null, []byte("abcd")) + if !reflect.DeepEqual(v, NULL) { + t.Errorf("MakeTrusted(Null...) = %v, want null", v) + } + v = MakeTrusted(Int64, []byte("1")) + want := TestValue(Int64, "1") + if !reflect.DeepEqual(v, want) { + t.Errorf("MakeTrusted(Int64, \"1\") = %v, want %v", v, want) + } +} + +func TestIntegralValue(t *testing.T) { + testcases := []struct { + in string + outVal Value + outErr string + }{{ + in: MinNeg, + outVal: TestValue(Int64, MinNeg), + }, { + in: "1", + outVal: TestValue(Int64, "1"), + }, { + in: MinPos, + outVal: TestValue(Uint64, MinPos), + }, { + in: InvalidPos, + outErr: "out of range", + }} + for _, tcase := range testcases { + v, err := NewIntegral(tcase.in) + if tcase.outErr != "" { + if err == nil || !strings.Contains(err.Error(), tcase.outErr) { + t.Errorf("BuildIntegral(%v) error: %v, must contain %v", tcase.in, err, tcase.outErr) + } + continue + } + if err != nil { + t.Errorf("BuildIntegral(%v) error: %v", tcase.in, err) + continue + } + if !reflect.DeepEqual(v, tcase.outVal) { + t.Errorf("BuildIntegral(%v) = %v, want %v", tcase.in, v, tcase.outVal) + } + } +} + +func TestInterfaceValue(t *testing.T) { + testcases := []struct { + in interface{} + out Value + }{{ + in: nil, + out: NULL, + }, { + in: []byte("a"), + out: TestValue(VarBinary, "a"), + }, { + in: int64(1), + out: TestValue(Int64, "1"), + }, { + in: uint64(1), + out: TestValue(Uint64, "1"), + }, { + in: float64(1.2), + out: TestValue(Float64, "1.2"), + }, { + in: "a", + out: TestValue(VarChar, "a"), + }} + for _, tcase := range testcases { + v, err := InterfaceToValue(tcase.in) + if err != nil { + t.Errorf("BuildValue(%#v) error: %v", tcase.in, err) + continue + } + if !reflect.DeepEqual(v, tcase.out) { + t.Errorf("BuildValue(%#v) = %v, want %v", tcase.in, v, tcase.out) + } + } + + _, err := InterfaceToValue(make(chan bool)) + want := "unexpected" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("BuildValue(chan): %v, want %v", err, want) + } +} + +func TestAccessors(t *testing.T) { + v := TestValue(Int64, "1") + if v.Type() != Int64 { + t.Errorf("v.Type=%v, want Int64", v.Type()) + } + if !bytes.Equal(v.Raw(), []byte("1")) { + t.Errorf("v.Raw=%s, want 1", v.Raw()) + } + if v.Len() != 1 { + t.Errorf("v.Len=%d, want 1", v.Len()) + } + if v.ToString() != "1" { + t.Errorf("v.String=%s, want 1", v.ToString()) + } + if v.IsNull() { + t.Error("v.IsNull: true, want false") + } + if !v.IsIntegral() { + t.Error("v.IsIntegral: false, want true") + } + if !v.IsSigned() { + t.Error("v.IsSigned: false, want true") + } + if v.IsUnsigned() { + t.Error("v.IsUnsigned: true, want false") + } + if v.IsFloat() { + t.Error("v.IsFloat: true, want false") + } + if v.IsQuoted() { + t.Error("v.IsQuoted: true, want false") + } + if v.IsText() { + t.Error("v.IsText: true, want false") + } + if v.IsBinary() { + t.Error("v.IsBinary: true, want false") + } +} + +func TestToBytesAndString(t *testing.T) { + for _, v := range []Value{ + NULL, + TestValue(Int64, "1"), + TestValue(Int64, "12"), + } { + if b := v.ToBytes(); !bytes.Equal(b, v.Raw()) { + t.Errorf("%v.ToBytes: %s, want %s", v, b, v.Raw()) + } + if s := v.ToString(); s != string(v.Raw()) { + t.Errorf("%v.ToString: %s, want %s", v, s, v.Raw()) + } + } + + tv := TestValue(Expression, "aa") + if b := tv.ToBytes(); b != nil { + t.Errorf("%v.ToBytes: %s, want nil", tv, b) + } + if s := tv.ToString(); s != "" { + t.Errorf("%v.ToString: %s, want \"\"", tv, s) + } +} + +func TestEncode(t *testing.T) { + testcases := []struct { + in Value + outSQL string + outASCII string + }{{ + in: NULL, + outSQL: "null", + outASCII: "null", + }, { + in: TestValue(Int64, "1"), + outSQL: "1", + outASCII: "1", + }, { + in: TestValue(VarChar, "foo"), + outSQL: "'foo'", + outASCII: "'Zm9v'", + }, { + in: TestValue(VarChar, "\x00'\"\b\n\r\t\x1A\\"), + outSQL: "'\\0\\'\\\"\\b\\n\\r\\t\\Z\\\\'", + outASCII: "'ACciCAoNCRpc'", + }, { + in: TestValue(Bit, "a"), + outSQL: "b'01100001'", + outASCII: "'YQ=='", + }} + for _, tcase := range testcases { + buf := &bytes.Buffer{} + tcase.in.EncodeSQL(buf) + if tcase.outSQL != buf.String() { + t.Errorf("%v.EncodeSQL = %q, want %q", tcase.in, buf.String(), tcase.outSQL) + } + buf = &bytes.Buffer{} + tcase.in.EncodeASCII(buf) + if tcase.outASCII != buf.String() { + t.Errorf("%v.EncodeASCII = %q, want %q", tcase.in, buf.String(), tcase.outASCII) + } + } +} + +// TestEncodeMap ensures DontEscape is not escaped +func TestEncodeMap(t *testing.T) { + if SQLEncodeMap[DontEscape] != DontEscape { + t.Errorf("SQLEncodeMap[DontEscape] = %v, want %v", SQLEncodeMap[DontEscape], DontEscape) + } + if SQLDecodeMap[DontEscape] != DontEscape { + t.Errorf("SQLDecodeMap[DontEscape] = %v, want %v", SQLEncodeMap[DontEscape], DontEscape) + } +} diff --git a/internal/stackql-parser-fork/go/tb/error.go b/internal/stackql-parser-fork/go/tb/error.go new file mode 100644 index 00000000..645bcadc --- /dev/null +++ b/internal/stackql-parser-fork/go/tb/error.go @@ -0,0 +1,138 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package tb exposes some handy traceback functionality buried in the runtime. +// +// It can also be used to provide context to errors reducing the temptation to +// panic carelessly, just to get stack information. +// +// The theory is that most errors that are created with the fmt.Errorf +// style are likely to be rare, but require more context to debug +// properly. The additional cost of computing a stack trace is +// therefore negligible. +package tb + +import ( + "bytes" + "fmt" + "io/ioutil" + "runtime" +) + +var ( + dunno = []byte("???") + centerDot = []byte("·") + dot = []byte(".") +) + +// StackError represents an error along with a stack trace. +type StackError interface { + Error() string + StackTrace() string +} + +type stackError struct { + err error + stackTrace string +} + +func (e stackError) Error() string { + return fmt.Sprintf("%v\n%v", e.err, e.stackTrace) +} + +func (e stackError) StackTrace() string { + return e.stackTrace +} + +func Errorf(msg string, args ...interface{}) error { + stack := "" + // See if any arg is already embedding a stack - no need to + // recompute something expensive and make the message unreadable. + for _, arg := range args { + if stackErr, ok := arg.(stackError); ok { + stack = stackErr.stackTrace + break + } + } + + if stack == "" { + // magic 5 trims off just enough stack data to be clear + stack = string(Stack(5)) + } + + return stackError{fmt.Errorf(msg, args...), stack} +} + +// Stack is taken from runtime/debug.go +// calldepth is the number of (bottommost) frames to skip. +func Stack(calldepth int) []byte { + return stack(calldepth) +} + +func stack(calldepth int) []byte { + buf := new(bytes.Buffer) // the returned data + // As we loop, we open files and read them. These variables record the currently + // loaded file. + var lines [][]byte + var lastFile string + for i := calldepth; ; i++ { // Caller we care about is the user, 2 frames up + pc, file, line, ok := runtime.Caller(i) + if !ok { + break + } + // Print this much at least. If we can't find the source, it won't show. + fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc) + if file != lastFile { + data, err := ioutil.ReadFile(file) + if err != nil { + continue + } + lines = bytes.Split(data, []byte{'\n'}) + lastFile = file + } + line-- // in stack trace, lines are 1-indexed but our array is 0-indexed + fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line)) + } + return buf.Bytes() +} + +// source returns a space-trimmed slice of the n'th line. +func source(lines [][]byte, n int) []byte { + if n < 0 || n >= len(lines) { + return dunno + } + return bytes.Trim(lines[n], " \t") +} + +// function returns, if possible, the name of the function containing the PC. +func function(pc uintptr) []byte { + fn := runtime.FuncForPC(pc) + if fn == nil { + return dunno + } + name := []byte(fn.Name()) + // The name includes the path name to the package, which is unnecessary + // since the file name is already included. Plus, it has center dots. + // That is, we see + // runtime/debug.*T·ptrmethod + // and want + // *T.ptrmethod + if period := bytes.Index(name, dot); period >= 0 { + name = name[period+1:] + } + name = bytes.Replace(name, centerDot, dot, -1) + return name +} diff --git a/internal/stackql-parser-fork/go/test/utils/diff.go b/internal/stackql-parser-fork/go/test/utils/diff.go new file mode 100644 index 00000000..05a1f9eb --- /dev/null +++ b/internal/stackql-parser-fork/go/test/utils/diff.go @@ -0,0 +1,83 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +// MustMatchFn is used to create a common diff function for a test file +// Usage in *_test.go file: +// +// Top declaration: +// +// var mustMatch = testutils.MustMatchFn( +// []interface{}{ // types with unexported fields +// type1{}, +// type2{}, +// ... +// typeN{}, +// }, +// []string{ // ignored fields +// ".id", // id numbers are unstable +// ".createAt", // created dates might not be interesting to compare +// }, +// ) +// +// In Test*() function: +// +// mustMatch(t, want, got, "something doesn't match") +func MustMatchFn(allowUnexportedTypes []interface{}, ignoredFields []string, extraOpts ...cmp.Option) func(t *testing.T, want, got interface{}, errMsg string) { + diffOpts := append([]cmp.Option{ + cmp.AllowUnexported(allowUnexportedTypes...), + cmpIgnoreFields(ignoredFields...), + }, extraOpts...) + // Diffs want/got and fails with errMsg on any failure. + return func(t *testing.T, want, got interface{}, errMsg string) { + t.Helper() + diff := cmp.Diff(want, got, diffOpts...) + if diff != "" { + t.Fatalf("%s: (-want +got)\n%v", errMsg, diff) + } + } +} + +// MustMatch is a convenience version of MustMatchFn with no overrides. +// Usage in Test*() function: +// +// testutils.MustMatch(t, want, got, "something doesn't match") +var MustMatch = MustMatchFn(nil, nil) + +// Skips fields of pathNames for cmp.Diff. +// Similar to standard cmpopts.IgnoreFields, but allows unexported fields. +func cmpIgnoreFields(pathNames ...string) cmp.Option { + skipFields := make(map[string]bool, len(pathNames)) + for _, name := range pathNames { + skipFields[name] = true + } + + return cmp.FilterPath(func(path cmp.Path) bool { + for _, ps := range path { + if skipFields[ps.String()] { + return true + } + } + return false + }, cmp.Ignore()) +} diff --git a/internal/stackql-parser-fork/go/test/utils/sort.go b/internal/stackql-parser-fork/go/test/utils/sort.go new file mode 100644 index 00000000..f3584ef7 --- /dev/null +++ b/internal/stackql-parser-fork/go/test/utils/sort.go @@ -0,0 +1,13 @@ +package utils + +import ( + "sort" + "strings" +) + +//SortString sorts the string. +func SortString(w string) string { + s := strings.Split(w, "") + sort.Strings(s) + return strings.Join(s, "") +} diff --git a/internal/stackql-parser-fork/go/trace/fake.go b/internal/stackql-parser-fork/go/trace/fake.go new file mode 100644 index 00000000..26821b96 --- /dev/null +++ b/internal/stackql-parser-fork/go/trace/fake.go @@ -0,0 +1,48 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "io" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type noopTracingServer struct{} + +func (noopTracingServer) New(Span, string) Span { return NoopSpan{} } +func (noopTracingServer) NewClientSpan(parent Span, serviceName, label string) Span { return NoopSpan{} } +func (noopTracingServer) FromContext(context.Context) (Span, bool) { return nil, false } +func (noopTracingServer) NewFromString(parent, label string) (Span, error) { return NoopSpan{}, nil } +func (noopTracingServer) NewContext(parent context.Context, _ Span) context.Context { return parent } +func (noopTracingServer) AddGrpcServerOptions(addInterceptors func(s grpc.StreamServerInterceptor, u grpc.UnaryServerInterceptor)) { +} +func (noopTracingServer) AddGrpcClientOptions(addInterceptors func(s grpc.StreamClientInterceptor, u grpc.UnaryClientInterceptor)) { +} + +// NoopSpan implements Span with no-op methods. +type NoopSpan struct{} + +func (NoopSpan) Finish() {} +func (NoopSpan) Annotate(string, interface{}) {} + +func init() { + tracingBackendFactories["noop"] = func(_ string) (tracingService, io.Closer, error) { + return noopTracingServer{}, &nilCloser{}, nil + } +} diff --git a/internal/stackql-parser-fork/go/trace/opentracing.go b/internal/stackql-parser-fork/go/trace/opentracing.go new file mode 100644 index 00000000..ce88e2b6 --- /dev/null +++ b/internal/stackql-parser-fork/go/trace/opentracing.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "strings" + + "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" + "github.com/stackql/stackql-parser/go/vt/vterrors" + + otgrpc "github.com/opentracing-contrib/go-grpc" + "github.com/opentracing/opentracing-go" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +var _ Span = (*openTracingSpan)(nil) + +type openTracingSpan struct { + otSpan opentracing.Span +} + +// Finish will mark a span as finished +func (js openTracingSpan) Finish() { + js.otSpan.Finish() +} + +// Annotate will add information to an existing span +func (js openTracingSpan) Annotate(key string, value interface{}) { + js.otSpan.SetTag(key, value) +} + +var _ tracingService = (*openTracingService)(nil) + +type tracer interface { + GetOpenTracingTracer() opentracing.Tracer +} + +type openTracingService struct { + Tracer tracer +} + +// AddGrpcServerOptions is part of an interface implementation +func (jf openTracingService) AddGrpcServerOptions(addInterceptors func(s grpc.StreamServerInterceptor, u grpc.UnaryServerInterceptor)) { + ot := jf.Tracer.GetOpenTracingTracer() + addInterceptors(otgrpc.OpenTracingStreamServerInterceptor(ot), otgrpc.OpenTracingServerInterceptor(ot)) +} + +// AddGrpcClientOptions is part of an interface implementation +func (jf openTracingService) AddGrpcClientOptions(addInterceptors func(s grpc.StreamClientInterceptor, u grpc.UnaryClientInterceptor)) { + ot := jf.Tracer.GetOpenTracingTracer() + addInterceptors(otgrpc.OpenTracingStreamClientInterceptor(ot), otgrpc.OpenTracingClientInterceptor(ot)) +} + +// NewClientSpan is part of an interface implementation +func (jf openTracingService) NewClientSpan(parent Span, serviceName, label string) Span { + span := jf.New(parent, label) + span.Annotate("peer.service", serviceName) + return span +} + +// New is part of an interface implementation +func (jf openTracingService) New(parent Span, label string) Span { + var innerSpan opentracing.Span + if parent == nil { + innerSpan = jf.Tracer.GetOpenTracingTracer().StartSpan(label) + } else { + jaegerParent := parent.(openTracingSpan) + span := jaegerParent.otSpan + innerSpan = jf.Tracer.GetOpenTracingTracer().StartSpan(label, opentracing.ChildOf(span.Context())) + } + return openTracingSpan{otSpan: innerSpan} +} + +func extractMapFromString(in string) (opentracing.TextMapCarrier, error) { + m := make(opentracing.TextMapCarrier) + items := strings.Split(in, ":") + if len(items) < 2 { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "expected transmitted context to contain at least span id and trace id") + } + for _, v := range items { + idx := strings.Index(v, "=") + if idx < 1 { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "every element in the context string has to be in the form key=value") + } + m[v[0:idx]] = v[idx+1:] + } + return m, nil +} + +func (jf openTracingService) NewFromString(parent, label string) (Span, error) { + carrier, err := extractMapFromString(parent) + if err != nil { + return nil, err + } + spanContext, err := jf.Tracer.GetOpenTracingTracer().Extract(opentracing.TextMap, carrier) + if err != nil { + return nil, vterrors.Wrap(err, "failed to deserialize span context") + } + innerSpan := jf.Tracer.GetOpenTracingTracer().StartSpan(label, opentracing.ChildOf(spanContext)) + return openTracingSpan{otSpan: innerSpan}, nil +} + +// FromContext is part of an interface implementation +func (jf openTracingService) FromContext(ctx context.Context) (Span, bool) { + innerSpan := opentracing.SpanFromContext(ctx) + + if innerSpan == nil { + return nil, false + } + return openTracingSpan{otSpan: innerSpan}, true +} + +// NewContext is part of an interface implementation +func (jf openTracingService) NewContext(parent context.Context, s Span) context.Context { + span, ok := s.(openTracingSpan) + if !ok { + return nil + } + return opentracing.ContextWithSpan(parent, span.otSpan) +} diff --git a/internal/stackql-parser-fork/go/trace/opentracing_test.go b/internal/stackql-parser-fork/go/trace/opentracing_test.go new file mode 100644 index 00000000..19bdbce9 --- /dev/null +++ b/internal/stackql-parser-fork/go/trace/opentracing_test.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "testing" + + "github.com/opentracing/opentracing-go" + "github.com/stretchr/testify/assert" +) + +func TestExtractMapFromString(t *testing.T) { + expected := make(opentracing.TextMapCarrier) + expected["apa"] = "12" + expected["banan"] = "x-tracing-backend-12" + result, err := extractMapFromString("apa=12:banan=x-tracing-backend-12") + assert.NoError(t, err) + assert.Equal(t, expected, result) +} + +func TestErrorConditions(t *testing.T) { + _, err := extractMapFromString("") + assert.Error(t, err) + + _, err = extractMapFromString("key=value:keywithnovalue") + assert.Error(t, err) +} diff --git a/internal/stackql-parser-fork/go/trace/plugin_datadog.go b/internal/stackql-parser-fork/go/trace/plugin_datadog.go new file mode 100644 index 00000000..87809d9b --- /dev/null +++ b/internal/stackql-parser-fork/go/trace/plugin_datadog.go @@ -0,0 +1,56 @@ +package trace + +import ( + "flag" + "fmt" + "io" + + "github.com/opentracing/opentracing-go" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer" + ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" +) + +var ( + dataDogHost = flag.String("datadog-agent-host", "", "host to send spans to. if empty, no tracing will be done") + dataDogPort = flag.String("datadog-agent-port", "", "port to send spans to. if empty, no tracing will be done") +) + +func newDatadogTracer(serviceName string) (tracingService, io.Closer, error) { + if *dataDogHost == "" || *dataDogPort == "" { + return nil, nil, fmt.Errorf("need host and port to datadog agent to use datadog tracing") + } + + t := opentracer.New( + ddtracer.WithAgentAddr(*dataDogHost+":"+*dataDogPort), + ddtracer.WithServiceName(serviceName), + ddtracer.WithDebugMode(true), + ddtracer.WithSampler(ddtracer.NewRateSampler(*samplingRate)), + ) + + opentracing.SetGlobalTracer(t) + + return openTracingService{Tracer: &datadogTracer{actual: t}}, &ddCloser{}, nil +} + +var _ io.Closer = (*ddCloser)(nil) + +type ddCloser struct{} + +func (ddCloser) Close() error { + ddtracer.Stop() + return nil +} + +func init() { + tracingBackendFactories["opentracing-datadog"] = newDatadogTracer +} + +var _ tracer = (*datadogTracer)(nil) + +type datadogTracer struct { + actual opentracing.Tracer +} + +func (dt *datadogTracer) GetOpenTracingTracer() opentracing.Tracer { + return dt.actual +} diff --git a/internal/stackql-parser-fork/go/trace/plugin_jaeger.go b/internal/stackql-parser-fork/go/trace/plugin_jaeger.go new file mode 100644 index 00000000..2314a0f5 --- /dev/null +++ b/internal/stackql-parser-fork/go/trace/plugin_jaeger.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "flag" + "io" + + "github.com/stackql/stackql-parser/go/vt/log" + + "github.com/opentracing/opentracing-go" + "github.com/uber/jaeger-client-go" + "github.com/uber/jaeger-client-go/config" +) + +/* +This file makes it easy to build Vitess without including the Jaeger binaries. +All that is needed is to delete this file. OpenTracing binaries will still be +included but nothing Jaeger specific. +*/ + +var ( + agentHost = flag.String("jaeger-agent-host", "", "host and port to send spans to. if empty, no tracing will be done") + samplingRate = flag.Float64("tracing-sampling-rate", 0.1, "sampling rate for the probabilistic jaeger sampler") +) + +// newJagerTracerFromEnv will instantiate a tracingService implemented by Jaeger, +// taking configuration from environment variables. Available properties are: +// JAEGER_SERVICE_NAME -- If this is set, the service name used in code will be ignored and this value used instead +// JAEGER_RPC_METRICS +// JAEGER_TAGS +// JAEGER_SAMPLER_TYPE +// JAEGER_SAMPLER_PARAM +// JAEGER_SAMPLER_MANAGER_HOST_PORT +// JAEGER_SAMPLER_MAX_OPERATIONS +// JAEGER_SAMPLER_REFRESH_INTERVAL +// JAEGER_REPORTER_MAX_QUEUE_SIZE +// JAEGER_REPORTER_FLUSH_INTERVAL +// JAEGER_REPORTER_LOG_SPANS +// JAEGER_ENDPOINT +// JAEGER_USER +// JAEGER_PASSWORD +// JAEGER_AGENT_HOST +// JAEGER_AGENT_PORT +func newJagerTracerFromEnv(serviceName string) (tracingService, io.Closer, error) { + cfg, err := config.FromEnv() + if err != nil { + return nil, nil, err + } + if cfg.ServiceName == "" { + cfg.ServiceName = serviceName + } + + // Allow command line args to override environment variables. + if *agentHost != "" { + cfg.Reporter.LocalAgentHostPort = *agentHost + } + log.Infof("Tracing to: %v as %v", cfg.Reporter.LocalAgentHostPort, cfg.ServiceName) + cfg.Sampler = &config.SamplerConfig{ + Type: jaeger.SamplerTypeConst, + Param: *samplingRate, + } + log.Infof("Tracing sampling rate: %v", *samplingRate) + + tracer, closer, err := cfg.NewTracer() + + if err != nil { + return nil, &nilCloser{}, err + } + + opentracing.SetGlobalTracer(tracer) + + return openTracingService{Tracer: &jaegerTracer{actual: tracer}}, closer, nil +} + +func init() { + tracingBackendFactories["opentracing-jaeger"] = newJagerTracerFromEnv +} + +var _ tracer = (*jaegerTracer)(nil) + +type jaegerTracer struct { + actual opentracing.Tracer +} + +func (jt *jaegerTracer) GetOpenTracingTracer() opentracing.Tracer { + return jt.actual +} diff --git a/internal/stackql-parser-fork/go/trace/trace.go b/internal/stackql-parser-fork/go/trace/trace.go new file mode 100644 index 00000000..9ebcf1be --- /dev/null +++ b/internal/stackql-parser-fork/go/trace/trace.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package trace contains a helper interface that allows various tracing +// tools to be plugged in to components using this interface. If no plugin is +// registered, the default one makes all trace calls into no-ops. +package trace + +import ( + "flag" + "io" + "strings" + + "github.com/stackql/stackql-parser/go/vt/log" + "github.com/stackql/stackql-parser/go/vt/sqlparser" + "github.com/stackql/stackql-parser/go/vt/vterrors" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +// Span represents a unit of work within a trace. After creating a Span with +// NewSpan(), call one of the Start methods to mark the beginning of the work +// represented by this Span. Call Finish() when that work is done to record the +// Span. A Span may be reused by calling Start again. +type Span interface { + Finish() + // Annotate records a key/value pair associated with a Span. It should be + // called between Start and Finish. + Annotate(key string, value interface{}) +} + +// NewSpan creates a new Span with the currently installed tracing plugin. +// If no tracing plugin is installed, it returns a fake Span that does nothing. +func NewSpan(inCtx context.Context, label string) (Span, context.Context) { + parent, _ := currentTracer.FromContext(inCtx) + span := currentTracer.New(parent, label) + outCtx := currentTracer.NewContext(inCtx, span) + + return span, outCtx +} + +// NewFromString creates a new Span with the currently installed tracing plugin, extracting the span context from +// the provided string. +func NewFromString(inCtx context.Context, parent, label string) (Span, context.Context, error) { + span, err := currentTracer.NewFromString(parent, label) + if err != nil { + return nil, nil, err + } + outCtx := currentTracer.NewContext(inCtx, span) + return span, outCtx, nil +} + +// AnnotateSQL annotates information about a sql query in the span. This is done in a way +// so as to not leak personally identifying information (PII), or sensitive personal information (SPI) +func AnnotateSQL(span Span, sql string) { + span.Annotate("sql-statement-type", sqlparser.Preview(sql).String()) +} + +// FromContext returns the Span from a Context if present. The bool return +// value indicates whether a Span was present in the Context. +func FromContext(ctx context.Context) (Span, bool) { + return currentTracer.FromContext(ctx) +} + +// NewContext returns a context based on parent with a new Span value. +func NewContext(parent context.Context, span Span) context.Context { + return currentTracer.NewContext(parent, span) +} + +// CopySpan creates a new context from parentCtx, with only the trace span +// copied over from spanCtx, if it has any. If not, parentCtx is returned. +func CopySpan(parentCtx, spanCtx context.Context) context.Context { + if span, ok := FromContext(spanCtx); ok { + return NewContext(parentCtx, span) + } + return parentCtx +} + +// AddGrpcServerOptions adds GRPC interceptors that read the parent span from the grpc packets +func AddGrpcServerOptions(addInterceptors func(s grpc.StreamServerInterceptor, u grpc.UnaryServerInterceptor)) { + currentTracer.AddGrpcServerOptions(addInterceptors) +} + +// AddGrpcClientOptions adds GRPC interceptors that add parent information to outgoing grpc packets +func AddGrpcClientOptions(addInterceptors func(s grpc.StreamClientInterceptor, u grpc.UnaryClientInterceptor)) { + currentTracer.AddGrpcClientOptions(addInterceptors) +} + +// tracingService is an interface for creating spans or extracting them from Contexts. +type tracingService interface { + // New creates a new span from an existing one, if provided. The parent can also be nil + New(parent Span, label string) Span + + // NewFromString creates a new span and uses the provided string to reconstitute the parent span + NewFromString(parent, label string) (Span, error) + + // FromContext extracts a span from a context, making it possible to annotate the span with additional information + FromContext(ctx context.Context) (Span, bool) + + // NewContext creates a new context containing the provided span + NewContext(parent context.Context, span Span) context.Context + + // AddGrpcServerOptions allows a tracing system to add interceptors to grpc server traffic + AddGrpcServerOptions(addInterceptors func(s grpc.StreamServerInterceptor, u grpc.UnaryServerInterceptor)) + + // AddGrpcClientOptions allows a tracing system to add interceptors to grpc server traffic + AddGrpcClientOptions(addInterceptors func(s grpc.StreamClientInterceptor, u grpc.UnaryClientInterceptor)) +} + +// TracerFactory creates a tracing service for the service provided. It's important to close the provided io.Closer +// object to make sure that all spans are sent to the backend before the process exits. +type TracerFactory func(serviceName string) (tracingService, io.Closer, error) + +// tracingBackendFactories should be added to by a plugin during init() to install itself +var tracingBackendFactories = make(map[string]TracerFactory) + +var currentTracer tracingService = noopTracingServer{} + +var ( + tracingServer = flag.String("tracer", "noop", "tracing service to use") +) + +// StartTracing enables tracing for a named service +func StartTracing(serviceName string) io.Closer { + factory, ok := tracingBackendFactories[*tracingServer] + if !ok { + return fail(serviceName) + } + + tracer, closer, err := factory(serviceName) + if err != nil { + log.Error(vterrors.Wrapf(err, "failed to create a %s tracer", *tracingServer)) + return &nilCloser{} + } + + currentTracer = tracer + if *tracingServer != "noop" { + log.Infof("successfully started tracing with [%s]", *tracingServer) + } + + return closer +} + +func fail(serviceName string) io.Closer { + options := make([]string, len(tracingBackendFactories)) + for k := range tracingBackendFactories { + options = append(options, k) + } + altStr := strings.Join(options, ", ") + log.Errorf("no such [%s] tracing service found. alternatives are: %v", serviceName, altStr) + return &nilCloser{} +} + +type nilCloser struct { +} + +func (c *nilCloser) Close() error { return nil } diff --git a/internal/stackql-parser-fork/go/trace/trace_test.go b/internal/stackql-parser-fork/go/trace/trace_test.go new file mode 100644 index 00000000..f09cc086 --- /dev/null +++ b/internal/stackql-parser-fork/go/trace/trace_test.go @@ -0,0 +1,145 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "fmt" + "io" + "strings" + "testing" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +func TestFakeSpan(t *testing.T) { + ctx := context.Background() + + // It should be safe to call all the usual methods as if a plugin were installed. + span1, ctx := NewSpan(ctx, "label") + span1.Finish() + + span2, ctx := NewSpan(ctx, "label") + span2.Annotate("key", 42) + span2.Finish() + + span3, _ := NewSpan(ctx, "label") + span3.Annotate("key", 42) + span3.Finish() +} + +func TestRegisterService(t *testing.T) { + fakeName := "test" + tracingBackendFactories[fakeName] = func(serviceName string) (tracingService, io.Closer, error) { + tracer := &fakeTracer{name: serviceName} + return tracer, tracer, nil + } + + tracingServer = &fakeName + + serviceName := "vtservice" + closer := StartTracing(serviceName) + tracer, ok := closer.(*fakeTracer) + if !ok { + t.Fatalf("did not get the expected tracer") + } + + if tracer.name != serviceName { + t.Fatalf("expected the name to be `%v` but it was `%v`", serviceName, tracer.name) + } +} + +func TestProtectPII(t *testing.T) { + // set up fake tracer that we can assert on + fakeName := "test" + var tracer *fakeTracer + tracingBackendFactories[fakeName] = func(serviceName string) (tracingService, io.Closer, error) { + tracer = &fakeTracer{name: serviceName} + return tracer, tracer, nil + } + + tracingServer = &fakeName + + serviceName := "vtservice" + closer := StartTracing(serviceName) + _, ok := closer.(*fakeTracer) + if !ok { + t.Fatalf("did not get the expected tracer") + } + + span, _ := NewSpan(context.Background(), "span-name") + AnnotateSQL(span, "SELECT * FROM Tabble WHERE name = 'SECRET_INFORMATION'") + span.Finish() + + tracer.assertNoSpanWith(t, "SECRET_INFORMATION") +} + +type fakeTracer struct { + name string + log []string +} + +func (f *fakeTracer) NewFromString(parent, label string) (Span, error) { + panic("implement me") +} + +func (f *fakeTracer) New(parent Span, label string) Span { + f.log = append(f.log, "span started") + + return &mockSpan{tracer: f} +} + +func (f *fakeTracer) FromContext(ctx context.Context) (Span, bool) { + return nil, false +} + +func (f *fakeTracer) NewContext(parent context.Context, span Span) context.Context { + return parent +} + +func (f *fakeTracer) AddGrpcServerOptions(addInterceptors func(s grpc.StreamServerInterceptor, u grpc.UnaryServerInterceptor)) { + panic("implement me") +} + +func (f *fakeTracer) AddGrpcClientOptions(addInterceptors func(s grpc.StreamClientInterceptor, u grpc.UnaryClientInterceptor)) { + panic("implement me") +} + +func (f *fakeTracer) Close() error { + panic("implement me") +} + +func (f *fakeTracer) assertNoSpanWith(t *testing.T, substr string) { + t.Helper() + for _, logLine := range f.log { + if strings.Contains(logLine, substr) { + t.Fatalf("expected to not find [%v] but found it in [%v]", substr, logLine) + } + } +} + +type mockSpan struct { + tracer *fakeTracer +} + +func (m *mockSpan) Finish() { + m.tracer.log = append(m.tracer.log, "span finished") +} + +func (m *mockSpan) Annotate(key string, value interface{}) { + m.tracer.log = append(m.tracer.log, fmt.Sprintf("key: %v values:%v", key, value)) +} diff --git a/internal/stackql-parser-fork/go/trace/utils.go b/internal/stackql-parser-fork/go/trace/utils.go new file mode 100644 index 00000000..f720ecd4 --- /dev/null +++ b/internal/stackql-parser-fork/go/trace/utils.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "io" + + "github.com/stackql/stackql-parser/go/vt/log" +) + +// LogErrorsWhenClosing will close the provided Closer, and log any errors it generates +func LogErrorsWhenClosing(in io.Closer) func() { + return func() { + err := in.Close() + if err != nil { + log.Error(err) + } + } +} diff --git a/internal/stackql-parser-fork/go/vt/env/env.go b/internal/stackql-parser-fork/go/vt/env/env.go new file mode 100644 index 00000000..70feb431 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/env/env.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package env + +import ( + "errors" + "fmt" + "os" + "os/exec" + "path" + "path/filepath" + "strings" +) + +const ( + // DefaultVtDataRoot is the default value for VTROOT environment variable + DefaultVtDataRoot = "/vt" + // DefaultVtRoot is only required for hooks + DefaultVtRoot = "/usr/local/vitess" +) + +// VtRoot returns $VTROOT or tries to guess its value if it's not set. +// This is the root for the 'vt' distribution, which contains bin/vttablet +// for instance. +func VtRoot() (root string, err error) { + if root = os.Getenv("VTROOT"); root != "" { + return root, nil + } + command, err := filepath.Abs(os.Args[0]) + if err != nil { + return + } + dir := path.Dir(command) + + if strings.HasSuffix(dir, "/bin") { + return path.Dir(dir), nil + } + return DefaultVtRoot, nil +} + +// VtDataRoot returns $VTDATAROOT or the default if $VTDATAROOT is not +// set. VtDataRoot does not check if the directory exists and is +// writable. +func VtDataRoot() string { + if dataRoot := os.Getenv("VTDATAROOT"); dataRoot != "" { + return dataRoot + } + + return DefaultVtDataRoot +} + +// VtMysqlRoot returns the root for the mysql distribution, +// which contains bin/mysql CLI for instance. +// If it is not set, look for mysqld in the path. +func VtMysqlRoot() (string, error) { + // if the environment variable is set, use that + if root := os.Getenv("VT_MYSQL_ROOT"); root != "" { + return root, nil + } + + // otherwise let's look for mysqld in the PATH. + // ensure that /usr/sbin is included, as it might not be by default + // This is the default location for mysqld from packages. + newPath := fmt.Sprintf("/usr/sbin:%s", os.Getenv("PATH")) + os.Setenv("PATH", newPath) + path, err := exec.LookPath("mysqld") + if err != nil { + return "", errors.New("VT_MYSQL_ROOT is not set and no mysqld could be found in your PATH") + } + path = filepath.Dir(filepath.Dir(path)) // strip mysqld, and the sbin + return path, nil +} + +// VtMysqlBaseDir returns the Mysql base directory, which +// contains the fill_help_tables.sql script for instance +func VtMysqlBaseDir() (string, error) { + // if the environment variable is set, use that + if root := os.Getenv("VT_MYSQL_BASEDIR"); root != "" { + return root, nil + } + + // otherwise let's use VtMysqlRoot + root, err := VtMysqlRoot() + if err != nil { + return "", errors.New("VT_MYSQL_BASEDIR is not set. Please set $VT_MYSQL_BASEDIR") + } + return root, nil +} diff --git a/internal/stackql-parser-fork/go/vt/env/env_test.go b/internal/stackql-parser-fork/go/vt/env/env_test.go new file mode 100644 index 00000000..4aa53a25 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/env/env_test.go @@ -0,0 +1,45 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package env + +import ( + "os" + "testing" +) + +func TestVtDataRoot(t *testing.T) { + envVar := "VTDATAROOT" + oldEnvVar := os.Getenv(envVar) + + if oldEnvVar != "" { + os.Setenv(envVar, "") + } + + defer os.Setenv(envVar, oldEnvVar) + + root := VtDataRoot() + if root != DefaultVtDataRoot { + t.Errorf("When VTDATAROOT is not set, the default value should be %v, not %v.", DefaultVtDataRoot, root) + } + + passed := "/tmp" + os.Setenv(envVar, passed) + root = VtDataRoot() + if root != passed { + t.Errorf("The value of VtDataRoot should be %v, not %v.", passed, root) + } +} diff --git a/internal/stackql-parser-fork/go/vt/hook/hook.go b/internal/stackql-parser-fork/go/vt/hook/hook.go new file mode 100644 index 00000000..17d20545 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/hook/hook.go @@ -0,0 +1,286 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hook + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path" + "strings" + "syscall" + + vtenv "github.com/stackql/stackql-parser/go/vt/env" + "github.com/stackql/stackql-parser/go/vt/log" +) + +// Hook is the input structure for this library. +type Hook struct { + Name string + Parameters []string + ExtraEnv map[string]string +} + +// HookResult is returned by the Execute method. +type HookResult struct { + ExitStatus int // HOOK_SUCCESS if it succeeded + Stdout string + Stderr string +} + +// The hook will return a value between 0 and 255. 0 if it succeeds. +// So we have these additional values here for more information. +const ( + // HOOK_SUCCESS is returned when the hook worked. + HOOK_SUCCESS = 0 + + // HOOK_DOES_NOT_EXIST is returned when the hook cannot be found. + HOOK_DOES_NOT_EXIST = -1 + + // HOOK_STAT_FAILED is returned when the hook exists, but stat + // on it fails. + HOOK_STAT_FAILED = -2 + + // HOOK_CANNOT_GET_EXIT_STATUS is returned when after + // execution, we fail to get the exit code for the hook. + HOOK_CANNOT_GET_EXIT_STATUS = -3 + + // HOOK_INVALID_NAME is returned if a hook has an invalid name. + HOOK_INVALID_NAME = -4 + + // HOOK_VTROOT_ERROR is returned if VTROOT is not set properly. + HOOK_VTROOT_ERROR = -5 + + // HOOK_GENERIC_ERROR is returned for unknown errors. + HOOK_GENERIC_ERROR = -6 +) + +// WaitFunc is a return type for the Pipe methods. +// It returns the process stderr and an error, if any. +type WaitFunc func() (string, error) + +// NewHook returns a Hook object with the provided name and params. +func NewHook(name string, params []string) *Hook { + return &Hook{Name: name, Parameters: params} +} + +// NewSimpleHook returns a Hook object with just a name. +func NewSimpleHook(name string) *Hook { + return &Hook{Name: name} +} + +// NewHookWithEnv returns a Hook object with the provided name, params and ExtraEnv. +func NewHookWithEnv(name string, params []string, env map[string]string) *Hook { + return &Hook{Name: name, Parameters: params, ExtraEnv: env} +} + +// findHook trie to locate the hook, and returns the exec.Cmd for it. +func (hook *Hook) findHook() (*exec.Cmd, int, error) { + // Check the hook path. + if strings.Contains(hook.Name, "/") { + return nil, HOOK_INVALID_NAME, fmt.Errorf("hook cannot contain '/'") + } + + // Find our root. + root, err := vtenv.VtRoot() + if err != nil { + return nil, HOOK_VTROOT_ERROR, fmt.Errorf("cannot get VTROOT: %v", err) + } + + // See if the hook exists. + vthook := path.Join(root, "vthook", hook.Name) + _, err = os.Stat(vthook) + if err != nil { + if os.IsNotExist(err) { + return nil, HOOK_DOES_NOT_EXIST, fmt.Errorf("missing hook %v", vthook) + } + + return nil, HOOK_STAT_FAILED, fmt.Errorf("cannot stat hook %v: %v", vthook, err) + } + + // Configure the command. + log.Infof("hook: executing hook: %v %v", vthook, strings.Join(hook.Parameters, " ")) + cmd := exec.Command(vthook, hook.Parameters...) + if len(hook.ExtraEnv) > 0 { + cmd.Env = os.Environ() + for key, value := range hook.ExtraEnv { + cmd.Env = append(cmd.Env, key+"="+value) + } + } + + return cmd, HOOK_SUCCESS, nil +} + +// Execute tries to execute the Hook and returns a HookResult. +func (hook *Hook) Execute() (result *HookResult) { + result = &HookResult{} + + // Find the hook. + cmd, status, err := hook.findHook() + if err != nil { + result.ExitStatus = status + result.Stderr = err.Error() + "\n" + return result + } + + // Run it. + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err = cmd.Run() + result.Stdout = stdout.String() + result.Stderr = stderr.String() + if err == nil { + result.ExitStatus = HOOK_SUCCESS + } else { + if cmd.ProcessState != nil && cmd.ProcessState.Sys() != nil { + result.ExitStatus = cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + } else { + result.ExitStatus = HOOK_CANNOT_GET_EXIT_STATUS + } + result.Stderr += "ERROR: " + err.Error() + "\n" + } + + log.Infof("hook: result is %v", result.String()) + + return result +} + +// ExecuteOptional executes an optional hook, logs if it doesn't +// exist, and returns a printable error. +func (hook *Hook) ExecuteOptional() error { + hr := hook.Execute() + switch hr.ExitStatus { + case HOOK_DOES_NOT_EXIST: + log.Infof("%v hook doesn't exist", hook.Name) + case HOOK_VTROOT_ERROR: + log.Infof("VTROOT not set, so %v hook doesn't exist", hook.Name) + case HOOK_SUCCESS: + // nothing to do here + default: + return fmt.Errorf("%v hook failed(%v): %v", hook.Name, hr.ExitStatus, hr.Stderr) + } + return nil +} + +// ExecuteAsWritePipe will execute the hook as in a Unix pipe, +// directing output to the provided writer. It will return: +// - an io.WriteCloser to write data to. +// - a WaitFunc method to call to wait for the process to exit, +// that returns stderr and the cmd.Wait() error. +// - an error code and an error if anything fails. +func (hook *Hook) ExecuteAsWritePipe(out io.Writer) (io.WriteCloser, WaitFunc, int, error) { + // Find the hook. + cmd, status, err := hook.findHook() + if err != nil { + return nil, nil, status, err + } + + // Configure the process's stdin, stdout, and stderr. + in, err := cmd.StdinPipe() + if err != nil { + return nil, nil, HOOK_GENERIC_ERROR, fmt.Errorf("failed to configure stdin: %v", err) + } + cmd.Stdout = out + var stderr bytes.Buffer + cmd.Stderr = &stderr + + // Start the process. + err = cmd.Start() + if err != nil { + status = HOOK_CANNOT_GET_EXIT_STATUS + if cmd.ProcessState != nil && cmd.ProcessState.Sys() != nil { + status = cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + } + return nil, nil, status, err + } + + // And return + return in, func() (string, error) { + err := cmd.Wait() + return stderr.String(), err + }, HOOK_SUCCESS, nil +} + +// ExecuteAsReadPipe will execute the hook as in a Unix pipe, reading +// from the provided reader. It will return: +// - an io.Reader to read piped data from. +// - a WaitFunc method to call to wait for the process to exit, that +// returns stderr and the Wait() error. +// - an error code and an error if anything fails. +func (hook *Hook) ExecuteAsReadPipe(in io.Reader) (io.Reader, WaitFunc, int, error) { + // Find the hook. + cmd, status, err := hook.findHook() + if err != nil { + return nil, nil, status, err + } + + // Configure the process's stdin, stdout, and stderr. + out, err := cmd.StdoutPipe() + if err != nil { + return nil, nil, HOOK_GENERIC_ERROR, fmt.Errorf("failed to configure stdout: %v", err) + } + cmd.Stdin = in + var stderr bytes.Buffer + cmd.Stderr = &stderr + + // Start the process. + err = cmd.Start() + if err != nil { + status = HOOK_CANNOT_GET_EXIT_STATUS + if cmd.ProcessState != nil && cmd.ProcessState.Sys() != nil { + status = cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + } + return nil, nil, status, err + } + + // And return + return out, func() (string, error) { + err := cmd.Wait() + return stderr.String(), err + }, HOOK_SUCCESS, nil +} + +// String returns a printable version of the HookResult +func (hr *HookResult) String() string { + result := "result: " + switch hr.ExitStatus { + case HOOK_SUCCESS: + result += "HOOK_SUCCESS" + case HOOK_DOES_NOT_EXIST: + result += "HOOK_DOES_NOT_EXIST" + case HOOK_STAT_FAILED: + result += "HOOK_STAT_FAILED" + case HOOK_CANNOT_GET_EXIT_STATUS: + result += "HOOK_CANNOT_GET_EXIT_STATUS" + case HOOK_INVALID_NAME: + result += "HOOK_INVALID_NAME" + case HOOK_VTROOT_ERROR: + result += "HOOK_VTROOT_ERROR" + default: + result += fmt.Sprintf("exit(%v)", hr.ExitStatus) + } + if hr.Stdout != "" { + result += "\nstdout:\n" + hr.Stdout + } + if hr.Stderr != "" { + result += "\nstderr:\n" + hr.Stderr + } + return result +} diff --git a/internal/stackql-parser-fork/go/vt/log/log.go b/internal/stackql-parser-fork/go/vt/log/log.go new file mode 100644 index 00000000..a7348311 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/log/log.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// You can modify this file to hook up a different logging library instead of glog. +// If you adapt to a different logging framework, you may need to use that +// framework's equivalent of *Depth() functions so the file and line number printed +// point to the real caller instead of your adapter function. + +package log + +import ( + "flag" + + "github.com/golang/glog" +) + +// Level is used with V() to test log verbosity. +type Level = glog.Level + +var ( + // V quickly checks if the logging verbosity meets a threshold. + V = glog.V + + // Flush ensures any pending I/O is written. + Flush = glog.Flush + + // Info formats arguments like fmt.Print. + Info = glog.Info + // Infof formats arguments like fmt.Printf. + Infof = glog.Infof + // InfoDepth formats arguments like fmt.Print and uses depth to choose which call frame to log. + InfoDepth = glog.InfoDepth + + // Warning formats arguments like fmt.Print. + Warning = glog.Warning + // Warningf formats arguments like fmt.Printf. + Warningf = glog.Warningf + // WarningDepth formats arguments like fmt.Print and uses depth to choose which call frame to log. + WarningDepth = glog.WarningDepth + + // Error formats arguments like fmt.Print. + Error = glog.Error + // Errorf formats arguments like fmt.Printf. + Errorf = glog.Errorf + // ErrorDepth formats arguments like fmt.Print and uses depth to choose which call frame to log. + ErrorDepth = glog.ErrorDepth + + // Exit formats arguments like fmt.Print. + Exit = glog.Exit + // Exitf formats arguments like fmt.Printf. + Exitf = glog.Exitf + // ExitDepth formats arguments like fmt.Print and uses depth to choose which call frame to log. + ExitDepth = glog.ExitDepth + + // Fatal formats arguments like fmt.Print. + Fatal = glog.Fatal + // Fatalf formats arguments like fmt.Printf + Fatalf = glog.Fatalf + // FatalDepth formats arguments like fmt.Print and uses depth to choose which call frame to log. + FatalDepth = glog.FatalDepth +) + +func init() { + flag.Uint64Var(&glog.MaxSize, "log_rotate_max_size", glog.MaxSize, "size in bytes at which logs are rotated (glog.MaxSize)") +} diff --git a/internal/stackql-parser-fork/go/vt/logz/logz_utils.go b/internal/stackql-parser-fork/go/vt/logz/logz_utils.go new file mode 100644 index 00000000..71104760 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/logz/logz_utils.go @@ -0,0 +1,155 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package logz provides an infrastructure to expose a list of entries as +// a sortable table on a webpage. +// +// It is used by many internal vttablet pages e.g. /queryz, /querylogz, /schemaz +// /streamqueryz or /txlogz. +// +// See tabletserver/querylogz.go for an example how to use it. +package logz + +import ( + "bytes" + "net/http" +) + +// StartHTMLTable writes the start of a logz-style table to an HTTP response. +func StartHTMLTable(w http.ResponseWriter) { + w.Write([]byte(` + + + + + + + +`)) +} + +// EndHTMLTable writes the end of a logz-style table to an HTTP response. +func EndHTMLTable(w http.ResponseWriter) { + w.Write([]byte(` +
+`)) +} + +// Wrappable inserts zero-width whitespaces to make +// the string wrappable. +func Wrappable(in string) string { + buf := bytes.NewBuffer(nil) + for _, ch := range in { + buf.WriteRune(ch) + if ch == ',' || ch == ')' { + // zero-width whitespace + buf.WriteRune('\u200B') + } + } + return buf.String() +} diff --git a/internal/stackql-parser-fork/go/vt/proto/automation/automation.pb.go b/internal/stackql-parser-fork/go/vt/proto/automation/automation.pb.go new file mode 100644 index 00000000..fe039ca8 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/automation/automation.pb.go @@ -0,0 +1,585 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: automation.proto + +package automation + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ClusterOperationState int32 + +const ( + ClusterOperationState_UNKNOWN_CLUSTER_OPERATION_STATE ClusterOperationState = 0 + ClusterOperationState_CLUSTER_OPERATION_NOT_STARTED ClusterOperationState = 1 + ClusterOperationState_CLUSTER_OPERATION_RUNNING ClusterOperationState = 2 + ClusterOperationState_CLUSTER_OPERATION_DONE ClusterOperationState = 3 +) + +var ClusterOperationState_name = map[int32]string{ + 0: "UNKNOWN_CLUSTER_OPERATION_STATE", + 1: "CLUSTER_OPERATION_NOT_STARTED", + 2: "CLUSTER_OPERATION_RUNNING", + 3: "CLUSTER_OPERATION_DONE", +} + +var ClusterOperationState_value = map[string]int32{ + "UNKNOWN_CLUSTER_OPERATION_STATE": 0, + "CLUSTER_OPERATION_NOT_STARTED": 1, + "CLUSTER_OPERATION_RUNNING": 2, + "CLUSTER_OPERATION_DONE": 3, +} + +func (x ClusterOperationState) String() string { + return proto.EnumName(ClusterOperationState_name, int32(x)) +} + +func (ClusterOperationState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_06e15ad07c41cb38, []int{0} +} + +type TaskState int32 + +const ( + TaskState_UNKNOWN_TASK_STATE TaskState = 0 + TaskState_NOT_STARTED TaskState = 1 + TaskState_RUNNING TaskState = 2 + TaskState_DONE TaskState = 3 +) + +var TaskState_name = map[int32]string{ + 0: "UNKNOWN_TASK_STATE", + 1: "NOT_STARTED", + 2: "RUNNING", + 3: "DONE", +} + +var TaskState_value = map[string]int32{ + "UNKNOWN_TASK_STATE": 0, + "NOT_STARTED": 1, + "RUNNING": 2, + "DONE": 3, +} + +func (x TaskState) String() string { + return proto.EnumName(TaskState_name, int32(x)) +} + +func (TaskState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_06e15ad07c41cb38, []int{1} +} + +type ClusterOperation struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // TaskContainer are processed sequentially, one at a time. + SerialTasks []*TaskContainer `protobuf:"bytes,2,rep,name=serial_tasks,json=serialTasks,proto3" json:"serial_tasks,omitempty"` + // Cached value. This has to be re-evaluated e.g. after a checkpoint load because running tasks may have already finished. + State ClusterOperationState `protobuf:"varint,3,opt,name=state,proto3,enum=automation.ClusterOperationState" json:"state,omitempty"` + // Error of the first task which failed. Set after state advanced to CLUSTER_OPERATION_DONE. If empty, all tasks succeeded. Cached value, see state above. + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterOperation) Reset() { *m = ClusterOperation{} } +func (m *ClusterOperation) String() string { return proto.CompactTextString(m) } +func (*ClusterOperation) ProtoMessage() {} +func (*ClusterOperation) Descriptor() ([]byte, []int) { + return fileDescriptor_06e15ad07c41cb38, []int{0} +} + +func (m *ClusterOperation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterOperation.Unmarshal(m, b) +} +func (m *ClusterOperation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterOperation.Marshal(b, m, deterministic) +} +func (m *ClusterOperation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterOperation.Merge(m, src) +} +func (m *ClusterOperation) XXX_Size() int { + return xxx_messageInfo_ClusterOperation.Size(m) +} +func (m *ClusterOperation) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterOperation.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterOperation proto.InternalMessageInfo + +func (m *ClusterOperation) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ClusterOperation) GetSerialTasks() []*TaskContainer { + if m != nil { + return m.SerialTasks + } + return nil +} + +func (m *ClusterOperation) GetState() ClusterOperationState { + if m != nil { + return m.State + } + return ClusterOperationState_UNKNOWN_CLUSTER_OPERATION_STATE +} + +func (m *ClusterOperation) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +// TaskContainer holds one or more task which may be executed in parallel. +// "concurrency", if > 0, limits the amount of concurrently executed tasks. +type TaskContainer struct { + ParallelTasks []*Task `protobuf:"bytes,1,rep,name=parallel_tasks,json=parallelTasks,proto3" json:"parallel_tasks,omitempty"` + Concurrency int32 `protobuf:"varint,2,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskContainer) Reset() { *m = TaskContainer{} } +func (m *TaskContainer) String() string { return proto.CompactTextString(m) } +func (*TaskContainer) ProtoMessage() {} +func (*TaskContainer) Descriptor() ([]byte, []int) { + return fileDescriptor_06e15ad07c41cb38, []int{1} +} + +func (m *TaskContainer) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskContainer.Unmarshal(m, b) +} +func (m *TaskContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskContainer.Marshal(b, m, deterministic) +} +func (m *TaskContainer) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskContainer.Merge(m, src) +} +func (m *TaskContainer) XXX_Size() int { + return xxx_messageInfo_TaskContainer.Size(m) +} +func (m *TaskContainer) XXX_DiscardUnknown() { + xxx_messageInfo_TaskContainer.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskContainer proto.InternalMessageInfo + +func (m *TaskContainer) GetParallelTasks() []*Task { + if m != nil { + return m.ParallelTasks + } + return nil +} + +func (m *TaskContainer) GetConcurrency() int32 { + if m != nil { + return m.Concurrency + } + return 0 +} + +// Task represents a specific task which should be automatically executed. +type Task struct { + // Task specification. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Runtime data. + Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` + State TaskState `protobuf:"varint,4,opt,name=state,proto3,enum=automation.TaskState" json:"state,omitempty"` + // Set after state advanced to DONE. + Output string `protobuf:"bytes,5,opt,name=output,proto3" json:"output,omitempty"` + // Set after state advanced to DONE. If empty, the task did succeed. + Error string `protobuf:"bytes,6,opt,name=error,proto3" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Task) Reset() { *m = Task{} } +func (m *Task) String() string { return proto.CompactTextString(m) } +func (*Task) ProtoMessage() {} +func (*Task) Descriptor() ([]byte, []int) { + return fileDescriptor_06e15ad07c41cb38, []int{2} +} + +func (m *Task) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Task.Unmarshal(m, b) +} +func (m *Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Task.Marshal(b, m, deterministic) +} +func (m *Task) XXX_Merge(src proto.Message) { + xxx_messageInfo_Task.Merge(m, src) +} +func (m *Task) XXX_Size() int { + return xxx_messageInfo_Task.Size(m) +} +func (m *Task) XXX_DiscardUnknown() { + xxx_messageInfo_Task.DiscardUnknown(m) +} + +var xxx_messageInfo_Task proto.InternalMessageInfo + +func (m *Task) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Task) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Task) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Task) GetState() TaskState { + if m != nil { + return m.State + } + return TaskState_UNKNOWN_TASK_STATE +} + +func (m *Task) GetOutput() string { + if m != nil { + return m.Output + } + return "" +} + +func (m *Task) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type EnqueueClusterOperationRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnqueueClusterOperationRequest) Reset() { *m = EnqueueClusterOperationRequest{} } +func (m *EnqueueClusterOperationRequest) String() string { return proto.CompactTextString(m) } +func (*EnqueueClusterOperationRequest) ProtoMessage() {} +func (*EnqueueClusterOperationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_06e15ad07c41cb38, []int{3} +} + +func (m *EnqueueClusterOperationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnqueueClusterOperationRequest.Unmarshal(m, b) +} +func (m *EnqueueClusterOperationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnqueueClusterOperationRequest.Marshal(b, m, deterministic) +} +func (m *EnqueueClusterOperationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnqueueClusterOperationRequest.Merge(m, src) +} +func (m *EnqueueClusterOperationRequest) XXX_Size() int { + return xxx_messageInfo_EnqueueClusterOperationRequest.Size(m) +} +func (m *EnqueueClusterOperationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EnqueueClusterOperationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EnqueueClusterOperationRequest proto.InternalMessageInfo + +func (m *EnqueueClusterOperationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnqueueClusterOperationRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type EnqueueClusterOperationResponse struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnqueueClusterOperationResponse) Reset() { *m = EnqueueClusterOperationResponse{} } +func (m *EnqueueClusterOperationResponse) String() string { return proto.CompactTextString(m) } +func (*EnqueueClusterOperationResponse) ProtoMessage() {} +func (*EnqueueClusterOperationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_06e15ad07c41cb38, []int{4} +} + +func (m *EnqueueClusterOperationResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnqueueClusterOperationResponse.Unmarshal(m, b) +} +func (m *EnqueueClusterOperationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnqueueClusterOperationResponse.Marshal(b, m, deterministic) +} +func (m *EnqueueClusterOperationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnqueueClusterOperationResponse.Merge(m, src) +} +func (m *EnqueueClusterOperationResponse) XXX_Size() int { + return xxx_messageInfo_EnqueueClusterOperationResponse.Size(m) +} +func (m *EnqueueClusterOperationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EnqueueClusterOperationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_EnqueueClusterOperationResponse proto.InternalMessageInfo + +func (m *EnqueueClusterOperationResponse) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type GetClusterOperationStateRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterOperationStateRequest) Reset() { *m = GetClusterOperationStateRequest{} } +func (m *GetClusterOperationStateRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterOperationStateRequest) ProtoMessage() {} +func (*GetClusterOperationStateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_06e15ad07c41cb38, []int{5} +} + +func (m *GetClusterOperationStateRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterOperationStateRequest.Unmarshal(m, b) +} +func (m *GetClusterOperationStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterOperationStateRequest.Marshal(b, m, deterministic) +} +func (m *GetClusterOperationStateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterOperationStateRequest.Merge(m, src) +} +func (m *GetClusterOperationStateRequest) XXX_Size() int { + return xxx_messageInfo_GetClusterOperationStateRequest.Size(m) +} +func (m *GetClusterOperationStateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterOperationStateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterOperationStateRequest proto.InternalMessageInfo + +func (m *GetClusterOperationStateRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type GetClusterOperationStateResponse struct { + State ClusterOperationState `protobuf:"varint,1,opt,name=state,proto3,enum=automation.ClusterOperationState" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterOperationStateResponse) Reset() { *m = GetClusterOperationStateResponse{} } +func (m *GetClusterOperationStateResponse) String() string { return proto.CompactTextString(m) } +func (*GetClusterOperationStateResponse) ProtoMessage() {} +func (*GetClusterOperationStateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_06e15ad07c41cb38, []int{6} +} + +func (m *GetClusterOperationStateResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterOperationStateResponse.Unmarshal(m, b) +} +func (m *GetClusterOperationStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterOperationStateResponse.Marshal(b, m, deterministic) +} +func (m *GetClusterOperationStateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterOperationStateResponse.Merge(m, src) +} +func (m *GetClusterOperationStateResponse) XXX_Size() int { + return xxx_messageInfo_GetClusterOperationStateResponse.Size(m) +} +func (m *GetClusterOperationStateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterOperationStateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterOperationStateResponse proto.InternalMessageInfo + +func (m *GetClusterOperationStateResponse) GetState() ClusterOperationState { + if m != nil { + return m.State + } + return ClusterOperationState_UNKNOWN_CLUSTER_OPERATION_STATE +} + +type GetClusterOperationDetailsRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterOperationDetailsRequest) Reset() { *m = GetClusterOperationDetailsRequest{} } +func (m *GetClusterOperationDetailsRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterOperationDetailsRequest) ProtoMessage() {} +func (*GetClusterOperationDetailsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_06e15ad07c41cb38, []int{7} +} + +func (m *GetClusterOperationDetailsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterOperationDetailsRequest.Unmarshal(m, b) +} +func (m *GetClusterOperationDetailsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterOperationDetailsRequest.Marshal(b, m, deterministic) +} +func (m *GetClusterOperationDetailsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterOperationDetailsRequest.Merge(m, src) +} +func (m *GetClusterOperationDetailsRequest) XXX_Size() int { + return xxx_messageInfo_GetClusterOperationDetailsRequest.Size(m) +} +func (m *GetClusterOperationDetailsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterOperationDetailsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterOperationDetailsRequest proto.InternalMessageInfo + +func (m *GetClusterOperationDetailsRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type GetClusterOperationDetailsResponse struct { + // Full snapshot of the execution e.g. including output of each task. + ClusterOp *ClusterOperation `protobuf:"bytes,2,opt,name=cluster_op,json=clusterOp,proto3" json:"cluster_op,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterOperationDetailsResponse) Reset() { *m = GetClusterOperationDetailsResponse{} } +func (m *GetClusterOperationDetailsResponse) String() string { return proto.CompactTextString(m) } +func (*GetClusterOperationDetailsResponse) ProtoMessage() {} +func (*GetClusterOperationDetailsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_06e15ad07c41cb38, []int{8} +} + +func (m *GetClusterOperationDetailsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterOperationDetailsResponse.Unmarshal(m, b) +} +func (m *GetClusterOperationDetailsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterOperationDetailsResponse.Marshal(b, m, deterministic) +} +func (m *GetClusterOperationDetailsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterOperationDetailsResponse.Merge(m, src) +} +func (m *GetClusterOperationDetailsResponse) XXX_Size() int { + return xxx_messageInfo_GetClusterOperationDetailsResponse.Size(m) +} +func (m *GetClusterOperationDetailsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterOperationDetailsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterOperationDetailsResponse proto.InternalMessageInfo + +func (m *GetClusterOperationDetailsResponse) GetClusterOp() *ClusterOperation { + if m != nil { + return m.ClusterOp + } + return nil +} + +func init() { + proto.RegisterEnum("automation.ClusterOperationState", ClusterOperationState_name, ClusterOperationState_value) + proto.RegisterEnum("automation.TaskState", TaskState_name, TaskState_value) + proto.RegisterType((*ClusterOperation)(nil), "automation.ClusterOperation") + proto.RegisterType((*TaskContainer)(nil), "automation.TaskContainer") + proto.RegisterType((*Task)(nil), "automation.Task") + proto.RegisterMapType((map[string]string)(nil), "automation.Task.ParametersEntry") + proto.RegisterType((*EnqueueClusterOperationRequest)(nil), "automation.EnqueueClusterOperationRequest") + proto.RegisterMapType((map[string]string)(nil), "automation.EnqueueClusterOperationRequest.ParametersEntry") + proto.RegisterType((*EnqueueClusterOperationResponse)(nil), "automation.EnqueueClusterOperationResponse") + proto.RegisterType((*GetClusterOperationStateRequest)(nil), "automation.GetClusterOperationStateRequest") + proto.RegisterType((*GetClusterOperationStateResponse)(nil), "automation.GetClusterOperationStateResponse") + proto.RegisterType((*GetClusterOperationDetailsRequest)(nil), "automation.GetClusterOperationDetailsRequest") + proto.RegisterType((*GetClusterOperationDetailsResponse)(nil), "automation.GetClusterOperationDetailsResponse") +} + +func init() { proto.RegisterFile("automation.proto", fileDescriptor_06e15ad07c41cb38) } + +var fileDescriptor_06e15ad07c41cb38 = []byte{ + // 588 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xdd, 0x6e, 0xd3, 0x3e, + 0x18, 0xc6, 0xff, 0x49, 0xdb, 0xfd, 0xe9, 0x1b, 0xb6, 0x45, 0x16, 0x9b, 0xb2, 0x89, 0xb1, 0x2c, + 0x1c, 0x50, 0x86, 0xd4, 0x8a, 0xed, 0x60, 0x68, 0x80, 0xc4, 0xd8, 0xa2, 0x69, 0x1a, 0x4a, 0x26, + 0x37, 0x13, 0xd2, 0x38, 0xa8, 0x4c, 0x67, 0xa1, 0xd0, 0x34, 0xce, 0x6c, 0xa7, 0x52, 0x6f, 0x80, + 0x8b, 0xe0, 0x26, 0xb8, 0x14, 0x6e, 0x09, 0xe5, 0xab, 0x4d, 0xd3, 0x0f, 0x09, 0x71, 0x66, 0xbf, + 0x7e, 0xde, 0xe7, 0x7d, 0xfc, 0x6b, 0x1d, 0xd0, 0x49, 0x2c, 0xd9, 0x90, 0x48, 0x9f, 0x85, 0xed, + 0x88, 0x33, 0xc9, 0x10, 0x4c, 0x2b, 0xd6, 0x2f, 0x05, 0xf4, 0xf3, 0x20, 0x16, 0x92, 0x72, 0x37, + 0xa2, 0x3c, 0x2d, 0xa2, 0x0d, 0x50, 0xfd, 0x7b, 0x43, 0x31, 0x95, 0x56, 0x13, 0xab, 0xfe, 0x3d, + 0x7a, 0x07, 0x8f, 0x05, 0xe5, 0x3e, 0x09, 0x7a, 0x92, 0x88, 0x81, 0x30, 0x54, 0xb3, 0xd6, 0xd2, + 0x8e, 0x76, 0xda, 0x25, 0x67, 0x8f, 0x88, 0xc1, 0x39, 0x0b, 0x25, 0xf1, 0x43, 0xca, 0xb1, 0x96, + 0xc9, 0x93, 0xa2, 0x40, 0x27, 0xd0, 0x10, 0x92, 0x48, 0x6a, 0xd4, 0x4c, 0xa5, 0xb5, 0x71, 0x74, + 0x50, 0x6e, 0xab, 0x8e, 0xee, 0x26, 0x42, 0x9c, 0xe9, 0xd1, 0x13, 0x68, 0x50, 0xce, 0x19, 0x37, + 0xea, 0x69, 0x92, 0x6c, 0x63, 0x7d, 0x87, 0xf5, 0x99, 0x61, 0xe8, 0x04, 0x36, 0x22, 0xc2, 0x49, + 0x10, 0xd0, 0x22, 0x9f, 0x92, 0xe6, 0xd3, 0xab, 0xf9, 0xf0, 0x7a, 0xa1, 0xcb, 0x82, 0x99, 0xa0, + 0xf5, 0x59, 0xd8, 0x8f, 0x39, 0xa7, 0x61, 0x7f, 0x6c, 0xa8, 0xa6, 0xd2, 0x6a, 0xe0, 0x72, 0xc9, + 0xfa, 0xa1, 0x42, 0x3d, 0xd1, 0x22, 0x04, 0xf5, 0x90, 0x0c, 0x69, 0xce, 0x24, 0x5d, 0xa3, 0x0f, + 0x00, 0x89, 0xdf, 0x90, 0x4a, 0xca, 0x0b, 0x26, 0x66, 0x75, 0x66, 0xfb, 0x66, 0x22, 0xb1, 0x43, + 0xc9, 0xc7, 0xb8, 0xd4, 0x93, 0x73, 0xae, 0x4d, 0x38, 0xbf, 0x2a, 0x48, 0xd5, 0x53, 0x52, 0x5b, + 0x55, 0xb3, 0x19, 0x3a, 0xdb, 0xb0, 0xc6, 0x62, 0x19, 0xc5, 0xd2, 0x68, 0xa4, 0x06, 0xf9, 0x6e, + 0x4a, 0x6d, 0xad, 0x44, 0x6d, 0xf7, 0x3d, 0x6c, 0x56, 0x92, 0x20, 0x1d, 0x6a, 0x03, 0x3a, 0xce, + 0xaf, 0x94, 0x2c, 0x93, 0xd6, 0x11, 0x09, 0x62, 0x9a, 0xa2, 0x68, 0xe2, 0x6c, 0x73, 0xaa, 0xbe, + 0x51, 0xac, 0xdf, 0x0a, 0x3c, 0xb3, 0xc3, 0x87, 0x98, 0xc6, 0xb4, 0xfa, 0x93, 0x61, 0xfa, 0x10, + 0x53, 0x21, 0x17, 0x22, 0xba, 0x5b, 0x80, 0xe8, 0xb4, 0x7c, 0xab, 0xd5, 0x9e, 0xab, 0xe0, 0xfd, + 0xeb, 0x8d, 0x5e, 0xc3, 0xfe, 0xd2, 0xe1, 0x22, 0x62, 0xa1, 0xa0, 0xd5, 0x67, 0x90, 0xb4, 0x5c, + 0x52, 0xb9, 0xf8, 0x2f, 0x9b, 0x43, 0xa8, 0xb6, 0x7c, 0x01, 0x73, 0x79, 0x4b, 0x3e, 0x66, 0xf2, + 0x3e, 0x94, 0xbf, 0x7b, 0x1f, 0xd6, 0x31, 0x1c, 0x2c, 0x30, 0xbf, 0xa0, 0x92, 0xf8, 0x81, 0x58, + 0x96, 0x88, 0x80, 0xb5, 0xaa, 0x29, 0xcf, 0xf4, 0x16, 0xa0, 0x9f, 0x49, 0x7a, 0x2c, 0x4a, 0xe1, + 0x69, 0x47, 0x4f, 0x57, 0x05, 0xc3, 0xcd, 0x7e, 0x51, 0x39, 0xfc, 0xa9, 0xc0, 0xd6, 0xc2, 0xe0, + 0xe8, 0x39, 0xec, 0xdf, 0x3a, 0xd7, 0x8e, 0xfb, 0xd9, 0xe9, 0x9d, 0x7f, 0xba, 0xed, 0x7a, 0x36, + 0xee, 0xb9, 0x37, 0x36, 0x3e, 0xf3, 0xae, 0x5c, 0xa7, 0xd7, 0xf5, 0xce, 0x3c, 0x5b, 0xff, 0x0f, + 0x1d, 0xc0, 0xde, 0xfc, 0xa1, 0xe3, 0x7a, 0x89, 0x00, 0x7b, 0xf6, 0x85, 0xae, 0xa0, 0x3d, 0xd8, + 0x99, 0x97, 0xe0, 0x5b, 0xc7, 0xb9, 0x72, 0x2e, 0x75, 0x15, 0xed, 0xc2, 0xf6, 0xfc, 0xf1, 0x85, + 0xeb, 0xd8, 0x7a, 0xed, 0xf0, 0x1a, 0x9a, 0x93, 0xa7, 0x84, 0xb6, 0x01, 0x15, 0x79, 0xbc, 0xb3, + 0xee, 0xf5, 0x24, 0xc2, 0x26, 0x68, 0xb3, 0x03, 0x35, 0xf8, 0x7f, 0x6a, 0xff, 0x08, 0xea, 0x99, + 0xd9, 0xc7, 0x97, 0x77, 0x2f, 0x46, 0xbe, 0xa4, 0x42, 0xb4, 0x7d, 0xd6, 0xc9, 0x56, 0x9d, 0x6f, + 0xac, 0x33, 0x92, 0x9d, 0xf4, 0x4b, 0xdb, 0x99, 0x02, 0xfb, 0xba, 0x96, 0x56, 0x8e, 0xff, 0x04, + 0x00, 0x00, 0xff, 0xff, 0xa0, 0x42, 0x72, 0x53, 0x8f, 0x05, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/automationservice/automationservice.pb.go b/internal/stackql-parser-fork/go/vt/proto/automationservice/automationservice.pb.go new file mode 100644 index 00000000..4f3cf2a0 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/automationservice/automationservice.pb.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: automationservice.proto + +package automationservice + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + automation "github.com/stackql/stackql-parser/go/vt/proto/automation" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("automationservice.proto", fileDescriptor_c03abdd2a71b5164) } + +var fileDescriptor_c03abdd2a71b5164 = []byte{ + // 178 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0x2c, 0x2d, 0xc9, + 0xcf, 0x4d, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc4, 0x90, 0x90, 0x12, 0x40, 0x08, 0x41, 0x14, 0x19, 0x35, 0x32, + 0x71, 0x71, 0x39, 0xc2, 0x05, 0x85, 0x4a, 0xb8, 0xc4, 0x5d, 0xf3, 0x0a, 0x4b, 0x53, 0x4b, 0x53, + 0x9d, 0x73, 0x4a, 0x8b, 0x4b, 0x52, 0x8b, 0xfc, 0x0b, 0x52, 0x8b, 0x20, 0x52, 0x5a, 0x7a, 0x48, + 0x9a, 0x71, 0x28, 0x0a, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x91, 0xd2, 0x26, 0x4a, 0x6d, 0x71, + 0x01, 0xc8, 0x65, 0x4a, 0x0c, 0x42, 0xb5, 0x5c, 0x52, 0xee, 0xa9, 0x25, 0xe8, 0x0a, 0x5c, 0x52, + 0x4b, 0x12, 0x33, 0x73, 0x8a, 0x85, 0x74, 0x91, 0x0d, 0xc3, 0xad, 0x0e, 0x66, 0xb7, 0x1e, 0xb1, + 0xca, 0x61, 0xd6, 0x3b, 0x19, 0x44, 0xe9, 0x95, 0x65, 0x96, 0xa4, 0x16, 0x17, 0xeb, 0x65, 0xe6, + 0xeb, 0x43, 0x58, 0xfa, 0xe9, 0xf9, 0xfa, 0x65, 0x25, 0xfa, 0xe0, 0x30, 0xd2, 0xc7, 0x08, 0xc7, + 0x24, 0x36, 0xb0, 0x84, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8f, 0x4a, 0x9d, 0xc0, 0x7c, 0x01, + 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// AutomationClient is the client API for Automation service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AutomationClient interface { + // Start a cluster operation. + EnqueueClusterOperation(ctx context.Context, in *automation.EnqueueClusterOperationRequest, opts ...grpc.CallOption) (*automation.EnqueueClusterOperationResponse, error) + // TODO(mberlin): Polling this is bad. Implement a subscribe mechanism to wait for changes? + // Get all details of an active cluster operation. + GetClusterOperationDetails(ctx context.Context, in *automation.GetClusterOperationDetailsRequest, opts ...grpc.CallOption) (*automation.GetClusterOperationDetailsResponse, error) +} + +type automationClient struct { + cc *grpc.ClientConn +} + +func NewAutomationClient(cc *grpc.ClientConn) AutomationClient { + return &automationClient{cc} +} + +func (c *automationClient) EnqueueClusterOperation(ctx context.Context, in *automation.EnqueueClusterOperationRequest, opts ...grpc.CallOption) (*automation.EnqueueClusterOperationResponse, error) { + out := new(automation.EnqueueClusterOperationResponse) + err := c.cc.Invoke(ctx, "/automationservice.Automation/EnqueueClusterOperation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *automationClient) GetClusterOperationDetails(ctx context.Context, in *automation.GetClusterOperationDetailsRequest, opts ...grpc.CallOption) (*automation.GetClusterOperationDetailsResponse, error) { + out := new(automation.GetClusterOperationDetailsResponse) + err := c.cc.Invoke(ctx, "/automationservice.Automation/GetClusterOperationDetails", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AutomationServer is the server API for Automation service. +type AutomationServer interface { + // Start a cluster operation. + EnqueueClusterOperation(context.Context, *automation.EnqueueClusterOperationRequest) (*automation.EnqueueClusterOperationResponse, error) + // TODO(mberlin): Polling this is bad. Implement a subscribe mechanism to wait for changes? + // Get all details of an active cluster operation. + GetClusterOperationDetails(context.Context, *automation.GetClusterOperationDetailsRequest) (*automation.GetClusterOperationDetailsResponse, error) +} + +// UnimplementedAutomationServer can be embedded to have forward compatible implementations. +type UnimplementedAutomationServer struct { +} + +func (*UnimplementedAutomationServer) EnqueueClusterOperation(ctx context.Context, req *automation.EnqueueClusterOperationRequest) (*automation.EnqueueClusterOperationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EnqueueClusterOperation not implemented") +} +func (*UnimplementedAutomationServer) GetClusterOperationDetails(ctx context.Context, req *automation.GetClusterOperationDetailsRequest) (*automation.GetClusterOperationDetailsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetClusterOperationDetails not implemented") +} + +func RegisterAutomationServer(s *grpc.Server, srv AutomationServer) { + s.RegisterService(&_Automation_serviceDesc, srv) +} + +func _Automation_EnqueueClusterOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(automation.EnqueueClusterOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AutomationServer).EnqueueClusterOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/automationservice.Automation/EnqueueClusterOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AutomationServer).EnqueueClusterOperation(ctx, req.(*automation.EnqueueClusterOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Automation_GetClusterOperationDetails_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(automation.GetClusterOperationDetailsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AutomationServer).GetClusterOperationDetails(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/automationservice.Automation/GetClusterOperationDetails", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AutomationServer).GetClusterOperationDetails(ctx, req.(*automation.GetClusterOperationDetailsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Automation_serviceDesc = grpc.ServiceDesc{ + ServiceName: "automationservice.Automation", + HandlerType: (*AutomationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "EnqueueClusterOperation", + Handler: _Automation_EnqueueClusterOperation_Handler, + }, + { + MethodName: "GetClusterOperationDetails", + Handler: _Automation_GetClusterOperationDetails_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "automationservice.proto", +} diff --git a/internal/stackql-parser-fork/go/vt/proto/binlogdata/binlogdata.pb.go b/internal/stackql-parser-fork/go/vt/proto/binlogdata/binlogdata.pb.go new file mode 100644 index 00000000..68c2d352 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/binlogdata/binlogdata.pb.go @@ -0,0 +1,2136 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: binlogdata.proto + +package binlogdata + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + query "github.com/stackql/stackql-parser/go/vt/proto/query" + topodata "github.com/stackql/stackql-parser/go/vt/proto/topodata" + vtrpc "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// OnDDLAction lists the possible actions for DDLs. +type OnDDLAction int32 + +const ( + OnDDLAction_IGNORE OnDDLAction = 0 + OnDDLAction_STOP OnDDLAction = 1 + OnDDLAction_EXEC OnDDLAction = 2 + OnDDLAction_EXEC_IGNORE OnDDLAction = 3 +) + +var OnDDLAction_name = map[int32]string{ + 0: "IGNORE", + 1: "STOP", + 2: "EXEC", + 3: "EXEC_IGNORE", +} + +var OnDDLAction_value = map[string]int32{ + "IGNORE": 0, + "STOP": 1, + "EXEC": 2, + "EXEC_IGNORE": 3, +} + +func (x OnDDLAction) String() string { + return proto.EnumName(OnDDLAction_name, int32(x)) +} + +func (OnDDLAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{0} +} + +// VEventType enumerates the event types. Many of these types +// will not be encountered in RBR mode. +type VEventType int32 + +const ( + VEventType_UNKNOWN VEventType = 0 + VEventType_GTID VEventType = 1 + VEventType_BEGIN VEventType = 2 + VEventType_COMMIT VEventType = 3 + VEventType_ROLLBACK VEventType = 4 + VEventType_DDL VEventType = 5 + // INSERT, REPLACE, UPDATE, DELETE and SET will not be seen in RBR mode. + VEventType_INSERT VEventType = 6 + VEventType_REPLACE VEventType = 7 + VEventType_UPDATE VEventType = 8 + VEventType_DELETE VEventType = 9 + VEventType_SET VEventType = 10 + // OTHER is a dummy event. If encountered, the current GTID must be + // recorded by the client to be able to resume. + VEventType_OTHER VEventType = 11 + VEventType_ROW VEventType = 12 + VEventType_FIELD VEventType = 13 + // HEARTBEAT is sent if there is inactivity. If a client does not + // receive events beyond the hearbeat interval, it can assume that it's + // lost connection to the vstreamer. + VEventType_HEARTBEAT VEventType = 14 + // VGTID is generated by VTGate's VStream that combines multiple + // GTIDs. + VEventType_VGTID VEventType = 15 + VEventType_JOURNAL VEventType = 16 + VEventType_VERSION VEventType = 17 + VEventType_LASTPK VEventType = 18 +) + +var VEventType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "GTID", + 2: "BEGIN", + 3: "COMMIT", + 4: "ROLLBACK", + 5: "DDL", + 6: "INSERT", + 7: "REPLACE", + 8: "UPDATE", + 9: "DELETE", + 10: "SET", + 11: "OTHER", + 12: "ROW", + 13: "FIELD", + 14: "HEARTBEAT", + 15: "VGTID", + 16: "JOURNAL", + 17: "VERSION", + 18: "LASTPK", +} + +var VEventType_value = map[string]int32{ + "UNKNOWN": 0, + "GTID": 1, + "BEGIN": 2, + "COMMIT": 3, + "ROLLBACK": 4, + "DDL": 5, + "INSERT": 6, + "REPLACE": 7, + "UPDATE": 8, + "DELETE": 9, + "SET": 10, + "OTHER": 11, + "ROW": 12, + "FIELD": 13, + "HEARTBEAT": 14, + "VGTID": 15, + "JOURNAL": 16, + "VERSION": 17, + "LASTPK": 18, +} + +func (x VEventType) String() string { + return proto.EnumName(VEventType_name, int32(x)) +} + +func (VEventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{1} +} + +// MigrationType specifies the type of migration for the Journal. +type MigrationType int32 + +const ( + MigrationType_TABLES MigrationType = 0 + MigrationType_SHARDS MigrationType = 1 +) + +var MigrationType_name = map[int32]string{ + 0: "TABLES", + 1: "SHARDS", +} + +var MigrationType_value = map[string]int32{ + "TABLES": 0, + "SHARDS": 1, +} + +func (x MigrationType) String() string { + return proto.EnumName(MigrationType_name, int32(x)) +} + +func (MigrationType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{2} +} + +type BinlogTransaction_Statement_Category int32 + +const ( + BinlogTransaction_Statement_BL_UNRECOGNIZED BinlogTransaction_Statement_Category = 0 + BinlogTransaction_Statement_BL_BEGIN BinlogTransaction_Statement_Category = 1 + BinlogTransaction_Statement_BL_COMMIT BinlogTransaction_Statement_Category = 2 + BinlogTransaction_Statement_BL_ROLLBACK BinlogTransaction_Statement_Category = 3 + // BL_DML is deprecated. + BinlogTransaction_Statement_BL_DML_DEPRECATED BinlogTransaction_Statement_Category = 4 + BinlogTransaction_Statement_BL_DDL BinlogTransaction_Statement_Category = 5 + BinlogTransaction_Statement_BL_SET BinlogTransaction_Statement_Category = 6 + BinlogTransaction_Statement_BL_INSERT BinlogTransaction_Statement_Category = 7 + BinlogTransaction_Statement_BL_UPDATE BinlogTransaction_Statement_Category = 8 + BinlogTransaction_Statement_BL_DELETE BinlogTransaction_Statement_Category = 9 +) + +var BinlogTransaction_Statement_Category_name = map[int32]string{ + 0: "BL_UNRECOGNIZED", + 1: "BL_BEGIN", + 2: "BL_COMMIT", + 3: "BL_ROLLBACK", + 4: "BL_DML_DEPRECATED", + 5: "BL_DDL", + 6: "BL_SET", + 7: "BL_INSERT", + 8: "BL_UPDATE", + 9: "BL_DELETE", +} + +var BinlogTransaction_Statement_Category_value = map[string]int32{ + "BL_UNRECOGNIZED": 0, + "BL_BEGIN": 1, + "BL_COMMIT": 2, + "BL_ROLLBACK": 3, + "BL_DML_DEPRECATED": 4, + "BL_DDL": 5, + "BL_SET": 6, + "BL_INSERT": 7, + "BL_UPDATE": 8, + "BL_DELETE": 9, +} + +func (x BinlogTransaction_Statement_Category) String() string { + return proto.EnumName(BinlogTransaction_Statement_Category_name, int32(x)) +} + +func (BinlogTransaction_Statement_Category) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{1, 0, 0} +} + +type Filter_FieldEventMode int32 + +const ( + Filter_ERR_ON_MISMATCH Filter_FieldEventMode = 0 + Filter_BEST_EFFORT Filter_FieldEventMode = 1 +) + +var Filter_FieldEventMode_name = map[int32]string{ + 0: "ERR_ON_MISMATCH", + 1: "BEST_EFFORT", +} + +var Filter_FieldEventMode_value = map[string]int32{ + "ERR_ON_MISMATCH": 0, + "BEST_EFFORT": 1, +} + +func (x Filter_FieldEventMode) String() string { + return proto.EnumName(Filter_FieldEventMode_name, int32(x)) +} + +func (Filter_FieldEventMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{7, 0} +} + +// Charset is the per-statement charset info from a QUERY_EVENT binlog entry. +type Charset struct { + // @@session.character_set_client + Client int32 `protobuf:"varint,1,opt,name=client,proto3" json:"client,omitempty"` + // @@session.collation_connection + Conn int32 `protobuf:"varint,2,opt,name=conn,proto3" json:"conn,omitempty"` + // @@session.collation_server + Server int32 `protobuf:"varint,3,opt,name=server,proto3" json:"server,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Charset) Reset() { *m = Charset{} } +func (m *Charset) String() string { return proto.CompactTextString(m) } +func (*Charset) ProtoMessage() {} +func (*Charset) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{0} +} + +func (m *Charset) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Charset.Unmarshal(m, b) +} +func (m *Charset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Charset.Marshal(b, m, deterministic) +} +func (m *Charset) XXX_Merge(src proto.Message) { + xxx_messageInfo_Charset.Merge(m, src) +} +func (m *Charset) XXX_Size() int { + return xxx_messageInfo_Charset.Size(m) +} +func (m *Charset) XXX_DiscardUnknown() { + xxx_messageInfo_Charset.DiscardUnknown(m) +} + +var xxx_messageInfo_Charset proto.InternalMessageInfo + +func (m *Charset) GetClient() int32 { + if m != nil { + return m.Client + } + return 0 +} + +func (m *Charset) GetConn() int32 { + if m != nil { + return m.Conn + } + return 0 +} + +func (m *Charset) GetServer() int32 { + if m != nil { + return m.Server + } + return 0 +} + +// BinlogTransaction describes a transaction inside the binlogs. +// It is streamed by vttablet for filtered replication, used during resharding. +type BinlogTransaction struct { + // the statements in this transaction + Statements []*BinlogTransaction_Statement `protobuf:"bytes,1,rep,name=statements,proto3" json:"statements,omitempty"` + // The Event Token for this event. + EventToken *query.EventToken `protobuf:"bytes,4,opt,name=event_token,json=eventToken,proto3" json:"event_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BinlogTransaction) Reset() { *m = BinlogTransaction{} } +func (m *BinlogTransaction) String() string { return proto.CompactTextString(m) } +func (*BinlogTransaction) ProtoMessage() {} +func (*BinlogTransaction) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{1} +} + +func (m *BinlogTransaction) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BinlogTransaction.Unmarshal(m, b) +} +func (m *BinlogTransaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BinlogTransaction.Marshal(b, m, deterministic) +} +func (m *BinlogTransaction) XXX_Merge(src proto.Message) { + xxx_messageInfo_BinlogTransaction.Merge(m, src) +} +func (m *BinlogTransaction) XXX_Size() int { + return xxx_messageInfo_BinlogTransaction.Size(m) +} +func (m *BinlogTransaction) XXX_DiscardUnknown() { + xxx_messageInfo_BinlogTransaction.DiscardUnknown(m) +} + +var xxx_messageInfo_BinlogTransaction proto.InternalMessageInfo + +func (m *BinlogTransaction) GetStatements() []*BinlogTransaction_Statement { + if m != nil { + return m.Statements + } + return nil +} + +func (m *BinlogTransaction) GetEventToken() *query.EventToken { + if m != nil { + return m.EventToken + } + return nil +} + +type BinlogTransaction_Statement struct { + // what type of statement is this? + Category BinlogTransaction_Statement_Category `protobuf:"varint,1,opt,name=category,proto3,enum=binlogdata.BinlogTransaction_Statement_Category" json:"category,omitempty"` + // charset of this statement, if different from pre-negotiated default. + Charset *Charset `protobuf:"bytes,2,opt,name=charset,proto3" json:"charset,omitempty"` + // the sql + Sql []byte `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BinlogTransaction_Statement) Reset() { *m = BinlogTransaction_Statement{} } +func (m *BinlogTransaction_Statement) String() string { return proto.CompactTextString(m) } +func (*BinlogTransaction_Statement) ProtoMessage() {} +func (*BinlogTransaction_Statement) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{1, 0} +} + +func (m *BinlogTransaction_Statement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BinlogTransaction_Statement.Unmarshal(m, b) +} +func (m *BinlogTransaction_Statement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BinlogTransaction_Statement.Marshal(b, m, deterministic) +} +func (m *BinlogTransaction_Statement) XXX_Merge(src proto.Message) { + xxx_messageInfo_BinlogTransaction_Statement.Merge(m, src) +} +func (m *BinlogTransaction_Statement) XXX_Size() int { + return xxx_messageInfo_BinlogTransaction_Statement.Size(m) +} +func (m *BinlogTransaction_Statement) XXX_DiscardUnknown() { + xxx_messageInfo_BinlogTransaction_Statement.DiscardUnknown(m) +} + +var xxx_messageInfo_BinlogTransaction_Statement proto.InternalMessageInfo + +func (m *BinlogTransaction_Statement) GetCategory() BinlogTransaction_Statement_Category { + if m != nil { + return m.Category + } + return BinlogTransaction_Statement_BL_UNRECOGNIZED +} + +func (m *BinlogTransaction_Statement) GetCharset() *Charset { + if m != nil { + return m.Charset + } + return nil +} + +func (m *BinlogTransaction_Statement) GetSql() []byte { + if m != nil { + return m.Sql + } + return nil +} + +// StreamKeyRangeRequest is the payload to StreamKeyRange +type StreamKeyRangeRequest struct { + // where to start + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + // what to get + KeyRange *topodata.KeyRange `protobuf:"bytes,2,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"` + // default charset on the player side + Charset *Charset `protobuf:"bytes,3,opt,name=charset,proto3" json:"charset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamKeyRangeRequest) Reset() { *m = StreamKeyRangeRequest{} } +func (m *StreamKeyRangeRequest) String() string { return proto.CompactTextString(m) } +func (*StreamKeyRangeRequest) ProtoMessage() {} +func (*StreamKeyRangeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{2} +} + +func (m *StreamKeyRangeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamKeyRangeRequest.Unmarshal(m, b) +} +func (m *StreamKeyRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamKeyRangeRequest.Marshal(b, m, deterministic) +} +func (m *StreamKeyRangeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamKeyRangeRequest.Merge(m, src) +} +func (m *StreamKeyRangeRequest) XXX_Size() int { + return xxx_messageInfo_StreamKeyRangeRequest.Size(m) +} +func (m *StreamKeyRangeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StreamKeyRangeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamKeyRangeRequest proto.InternalMessageInfo + +func (m *StreamKeyRangeRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *StreamKeyRangeRequest) GetKeyRange() *topodata.KeyRange { + if m != nil { + return m.KeyRange + } + return nil +} + +func (m *StreamKeyRangeRequest) GetCharset() *Charset { + if m != nil { + return m.Charset + } + return nil +} + +// StreamKeyRangeResponse is the response from StreamKeyRange +type StreamKeyRangeResponse struct { + BinlogTransaction *BinlogTransaction `protobuf:"bytes,1,opt,name=binlog_transaction,json=binlogTransaction,proto3" json:"binlog_transaction,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamKeyRangeResponse) Reset() { *m = StreamKeyRangeResponse{} } +func (m *StreamKeyRangeResponse) String() string { return proto.CompactTextString(m) } +func (*StreamKeyRangeResponse) ProtoMessage() {} +func (*StreamKeyRangeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{3} +} + +func (m *StreamKeyRangeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamKeyRangeResponse.Unmarshal(m, b) +} +func (m *StreamKeyRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamKeyRangeResponse.Marshal(b, m, deterministic) +} +func (m *StreamKeyRangeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamKeyRangeResponse.Merge(m, src) +} +func (m *StreamKeyRangeResponse) XXX_Size() int { + return xxx_messageInfo_StreamKeyRangeResponse.Size(m) +} +func (m *StreamKeyRangeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StreamKeyRangeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamKeyRangeResponse proto.InternalMessageInfo + +func (m *StreamKeyRangeResponse) GetBinlogTransaction() *BinlogTransaction { + if m != nil { + return m.BinlogTransaction + } + return nil +} + +// StreamTablesRequest is the payload to StreamTables +type StreamTablesRequest struct { + // where to start + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + // what to get + Tables []string `protobuf:"bytes,2,rep,name=tables,proto3" json:"tables,omitempty"` + // default charset on the player side + Charset *Charset `protobuf:"bytes,3,opt,name=charset,proto3" json:"charset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamTablesRequest) Reset() { *m = StreamTablesRequest{} } +func (m *StreamTablesRequest) String() string { return proto.CompactTextString(m) } +func (*StreamTablesRequest) ProtoMessage() {} +func (*StreamTablesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{4} +} + +func (m *StreamTablesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamTablesRequest.Unmarshal(m, b) +} +func (m *StreamTablesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamTablesRequest.Marshal(b, m, deterministic) +} +func (m *StreamTablesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamTablesRequest.Merge(m, src) +} +func (m *StreamTablesRequest) XXX_Size() int { + return xxx_messageInfo_StreamTablesRequest.Size(m) +} +func (m *StreamTablesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StreamTablesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamTablesRequest proto.InternalMessageInfo + +func (m *StreamTablesRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *StreamTablesRequest) GetTables() []string { + if m != nil { + return m.Tables + } + return nil +} + +func (m *StreamTablesRequest) GetCharset() *Charset { + if m != nil { + return m.Charset + } + return nil +} + +// StreamTablesResponse is the response from StreamTables +type StreamTablesResponse struct { + BinlogTransaction *BinlogTransaction `protobuf:"bytes,1,opt,name=binlog_transaction,json=binlogTransaction,proto3" json:"binlog_transaction,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamTablesResponse) Reset() { *m = StreamTablesResponse{} } +func (m *StreamTablesResponse) String() string { return proto.CompactTextString(m) } +func (*StreamTablesResponse) ProtoMessage() {} +func (*StreamTablesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{5} +} + +func (m *StreamTablesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamTablesResponse.Unmarshal(m, b) +} +func (m *StreamTablesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamTablesResponse.Marshal(b, m, deterministic) +} +func (m *StreamTablesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamTablesResponse.Merge(m, src) +} +func (m *StreamTablesResponse) XXX_Size() int { + return xxx_messageInfo_StreamTablesResponse.Size(m) +} +func (m *StreamTablesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StreamTablesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamTablesResponse proto.InternalMessageInfo + +func (m *StreamTablesResponse) GetBinlogTransaction() *BinlogTransaction { + if m != nil { + return m.BinlogTransaction + } + return nil +} + +// Rule represents one rule in a Filter. +type Rule struct { + // Match can be a table name or a regular expression. + // If it starts with a '/', it's a regular expression. + // For example, "t" matches a table named "t", whereas + // "/t.*" matches all tables that begin with 't'. + Match string `protobuf:"bytes,1,opt,name=match,proto3" json:"match,omitempty"` + // Filter: If empty, all columns and rows of the matching tables + // are sent. If it's a keyrange like "-80", only rows that + // match the keyrange are sent. + // If Match is a table name instead of a regular expression, + // the Filter can also be a select expression like this: + // "select * from t", same as an empty Filter, or + // "select * from t where in_keyrange('-80')", same as "-80", or + // "select col1, col2 from t where in_keyrange(col1, 'hash', '-80'), or + // What is allowed in a select expression depends on whether + // it's a vstreamer or vreplication request. For more details, + // please refer to the specific package documentation. + // On the vreplication side, Filter can also accept a special + // "exclude" value, which will cause the matched tables + // to be excluded. + // TODO(sougou): support this on vstreamer side also. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Rule) Reset() { *m = Rule{} } +func (m *Rule) String() string { return proto.CompactTextString(m) } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{6} +} + +func (m *Rule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Rule.Unmarshal(m, b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Rule.Marshal(b, m, deterministic) +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return xxx_messageInfo_Rule.Size(m) +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *Rule) GetMatch() string { + if m != nil { + return m.Match + } + return "" +} + +func (m *Rule) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +// Filter represents a list of ordered rules. The first +// match wins. +type Filter struct { + Rules []*Rule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + // FieldEventMode specifies the behavior if there is a mismatch + // between the current schema and the fields in the binlog. This + // can happen if the binlog position is before a DDL that would + // cause the fields to change. If vstreamer detects such + // an inconsistency, the behavior depends on the FieldEventMode. + // If the value is ERR_ON_MISMATCH (default), then it errors out. + // If it's BEST_EFFORT, it sends a field event with fake column + // names as "@1", "@2", etc. + FieldEventMode Filter_FieldEventMode `protobuf:"varint,2,opt,name=fieldEventMode,proto3,enum=binlogdata.Filter_FieldEventMode" json:"fieldEventMode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Filter) Reset() { *m = Filter{} } +func (m *Filter) String() string { return proto.CompactTextString(m) } +func (*Filter) ProtoMessage() {} +func (*Filter) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{7} +} + +func (m *Filter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Filter.Unmarshal(m, b) +} +func (m *Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Filter.Marshal(b, m, deterministic) +} +func (m *Filter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Filter.Merge(m, src) +} +func (m *Filter) XXX_Size() int { + return xxx_messageInfo_Filter.Size(m) +} +func (m *Filter) XXX_DiscardUnknown() { + xxx_messageInfo_Filter.DiscardUnknown(m) +} + +var xxx_messageInfo_Filter proto.InternalMessageInfo + +func (m *Filter) GetRules() []*Rule { + if m != nil { + return m.Rules + } + return nil +} + +func (m *Filter) GetFieldEventMode() Filter_FieldEventMode { + if m != nil { + return m.FieldEventMode + } + return Filter_ERR_ON_MISMATCH +} + +// BinlogSource specifies the source and filter parameters for +// Filtered Replication. KeyRange and Tables are legacy. Filter +// is the new way to specify the filtering rules. +type BinlogSource struct { + // the source keyspace + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // the source shard + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // the source tablet type + TabletType topodata.TabletType `protobuf:"varint,3,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + // KeyRange is set if the request is for a keyrange + KeyRange *topodata.KeyRange `protobuf:"bytes,4,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"` + // Tables is set if the request is for a list of tables + Tables []string `protobuf:"bytes,5,rep,name=tables,proto3" json:"tables,omitempty"` + // Filter is set if we're using the generalized representation + // for the filter. + Filter *Filter `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"` + // OnDdl specifies the action to be taken when a DDL is encountered. + OnDdl OnDDLAction `protobuf:"varint,7,opt,name=on_ddl,json=onDdl,proto3,enum=binlogdata.OnDDLAction" json:"on_ddl,omitempty"` + // Source is an external mysql. This attribute should be set to the username + // to use in the connection + ExternalMysql string `protobuf:"bytes,8,opt,name=external_mysql,json=externalMysql,proto3" json:"external_mysql,omitempty"` + // StopAfterCopy specifies if vreplication should be stopped + // after copying is done. + StopAfterCopy bool `protobuf:"varint,9,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BinlogSource) Reset() { *m = BinlogSource{} } +func (m *BinlogSource) String() string { return proto.CompactTextString(m) } +func (*BinlogSource) ProtoMessage() {} +func (*BinlogSource) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{8} +} + +func (m *BinlogSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BinlogSource.Unmarshal(m, b) +} +func (m *BinlogSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BinlogSource.Marshal(b, m, deterministic) +} +func (m *BinlogSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BinlogSource.Merge(m, src) +} +func (m *BinlogSource) XXX_Size() int { + return xxx_messageInfo_BinlogSource.Size(m) +} +func (m *BinlogSource) XXX_DiscardUnknown() { + xxx_messageInfo_BinlogSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BinlogSource proto.InternalMessageInfo + +func (m *BinlogSource) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *BinlogSource) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *BinlogSource) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *BinlogSource) GetKeyRange() *topodata.KeyRange { + if m != nil { + return m.KeyRange + } + return nil +} + +func (m *BinlogSource) GetTables() []string { + if m != nil { + return m.Tables + } + return nil +} + +func (m *BinlogSource) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *BinlogSource) GetOnDdl() OnDDLAction { + if m != nil { + return m.OnDdl + } + return OnDDLAction_IGNORE +} + +func (m *BinlogSource) GetExternalMysql() string { + if m != nil { + return m.ExternalMysql + } + return "" +} + +func (m *BinlogSource) GetStopAfterCopy() bool { + if m != nil { + return m.StopAfterCopy + } + return false +} + +// RowChange represents one row change. +// If Before is set and not After, it's a delete. +// If After is set and not Before, it's an insert. +// If both are set, it's an update. +type RowChange struct { + Before *query.Row `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"` + After *query.Row `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RowChange) Reset() { *m = RowChange{} } +func (m *RowChange) String() string { return proto.CompactTextString(m) } +func (*RowChange) ProtoMessage() {} +func (*RowChange) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{9} +} + +func (m *RowChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RowChange.Unmarshal(m, b) +} +func (m *RowChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RowChange.Marshal(b, m, deterministic) +} +func (m *RowChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_RowChange.Merge(m, src) +} +func (m *RowChange) XXX_Size() int { + return xxx_messageInfo_RowChange.Size(m) +} +func (m *RowChange) XXX_DiscardUnknown() { + xxx_messageInfo_RowChange.DiscardUnknown(m) +} + +var xxx_messageInfo_RowChange proto.InternalMessageInfo + +func (m *RowChange) GetBefore() *query.Row { + if m != nil { + return m.Before + } + return nil +} + +func (m *RowChange) GetAfter() *query.Row { + if m != nil { + return m.After + } + return nil +} + +// RowEvent represent row events for one table. +type RowEvent struct { + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + RowChanges []*RowChange `protobuf:"bytes,2,rep,name=row_changes,json=rowChanges,proto3" json:"row_changes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RowEvent) Reset() { *m = RowEvent{} } +func (m *RowEvent) String() string { return proto.CompactTextString(m) } +func (*RowEvent) ProtoMessage() {} +func (*RowEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{10} +} + +func (m *RowEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RowEvent.Unmarshal(m, b) +} +func (m *RowEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RowEvent.Marshal(b, m, deterministic) +} +func (m *RowEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_RowEvent.Merge(m, src) +} +func (m *RowEvent) XXX_Size() int { + return xxx_messageInfo_RowEvent.Size(m) +} +func (m *RowEvent) XXX_DiscardUnknown() { + xxx_messageInfo_RowEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_RowEvent proto.InternalMessageInfo + +func (m *RowEvent) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *RowEvent) GetRowChanges() []*RowChange { + if m != nil { + return m.RowChanges + } + return nil +} + +// FieldEvent represents the field info for a table. +type FieldEvent struct { + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + Fields []*query.Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldEvent) Reset() { *m = FieldEvent{} } +func (m *FieldEvent) String() string { return proto.CompactTextString(m) } +func (*FieldEvent) ProtoMessage() {} +func (*FieldEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{11} +} + +func (m *FieldEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldEvent.Unmarshal(m, b) +} +func (m *FieldEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldEvent.Marshal(b, m, deterministic) +} +func (m *FieldEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldEvent.Merge(m, src) +} +func (m *FieldEvent) XXX_Size() int { + return xxx_messageInfo_FieldEvent.Size(m) +} +func (m *FieldEvent) XXX_DiscardUnknown() { + xxx_messageInfo_FieldEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldEvent proto.InternalMessageInfo + +func (m *FieldEvent) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *FieldEvent) GetFields() []*query.Field { + if m != nil { + return m.Fields + } + return nil +} + +// ShardGtid contains the GTID position for one shard. +// It's used in a request for requesting a starting position. +// It's used in a response to transmit the current position +// of a shard. It's also used in a Journal to indicate the +// list of targets and shard positions to migrate to. +type ShardGtid struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Gtid string `protobuf:"bytes,3,opt,name=gtid,proto3" json:"gtid,omitempty"` + TablePKs []*TableLastPK `protobuf:"bytes,4,rep,name=table_p_ks,json=tablePKs,proto3" json:"table_p_ks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShardGtid) Reset() { *m = ShardGtid{} } +func (m *ShardGtid) String() string { return proto.CompactTextString(m) } +func (*ShardGtid) ProtoMessage() {} +func (*ShardGtid) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{12} +} + +func (m *ShardGtid) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShardGtid.Unmarshal(m, b) +} +func (m *ShardGtid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShardGtid.Marshal(b, m, deterministic) +} +func (m *ShardGtid) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShardGtid.Merge(m, src) +} +func (m *ShardGtid) XXX_Size() int { + return xxx_messageInfo_ShardGtid.Size(m) +} +func (m *ShardGtid) XXX_DiscardUnknown() { + xxx_messageInfo_ShardGtid.DiscardUnknown(m) +} + +var xxx_messageInfo_ShardGtid proto.InternalMessageInfo + +func (m *ShardGtid) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *ShardGtid) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *ShardGtid) GetGtid() string { + if m != nil { + return m.Gtid + } + return "" +} + +func (m *ShardGtid) GetTablePKs() []*TableLastPK { + if m != nil { + return m.TablePKs + } + return nil +} + +// A VGtid is a list of ShardGtids. +type VGtid struct { + ShardGtids []*ShardGtid `protobuf:"bytes,1,rep,name=shard_gtids,json=shardGtids,proto3" json:"shard_gtids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VGtid) Reset() { *m = VGtid{} } +func (m *VGtid) String() string { return proto.CompactTextString(m) } +func (*VGtid) ProtoMessage() {} +func (*VGtid) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{13} +} + +func (m *VGtid) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VGtid.Unmarshal(m, b) +} +func (m *VGtid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VGtid.Marshal(b, m, deterministic) +} +func (m *VGtid) XXX_Merge(src proto.Message) { + xxx_messageInfo_VGtid.Merge(m, src) +} +func (m *VGtid) XXX_Size() int { + return xxx_messageInfo_VGtid.Size(m) +} +func (m *VGtid) XXX_DiscardUnknown() { + xxx_messageInfo_VGtid.DiscardUnknown(m) +} + +var xxx_messageInfo_VGtid proto.InternalMessageInfo + +func (m *VGtid) GetShardGtids() []*ShardGtid { + if m != nil { + return m.ShardGtids + } + return nil +} + +// KeyspaceShard represents a keyspace and shard. +type KeyspaceShard struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyspaceShard) Reset() { *m = KeyspaceShard{} } +func (m *KeyspaceShard) String() string { return proto.CompactTextString(m) } +func (*KeyspaceShard) ProtoMessage() {} +func (*KeyspaceShard) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{14} +} + +func (m *KeyspaceShard) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyspaceShard.Unmarshal(m, b) +} +func (m *KeyspaceShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyspaceShard.Marshal(b, m, deterministic) +} +func (m *KeyspaceShard) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyspaceShard.Merge(m, src) +} +func (m *KeyspaceShard) XXX_Size() int { + return xxx_messageInfo_KeyspaceShard.Size(m) +} +func (m *KeyspaceShard) XXX_DiscardUnknown() { + xxx_messageInfo_KeyspaceShard.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyspaceShard proto.InternalMessageInfo + +func (m *KeyspaceShard) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *KeyspaceShard) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +// Journal contains the metadata for a journal event. +// The commit of a journal event indicates the point of no return +// for a migration. +type Journal struct { + // Id represents a unique journal id. + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + MigrationType MigrationType `protobuf:"varint,2,opt,name=migration_type,json=migrationType,proto3,enum=binlogdata.MigrationType" json:"migration_type,omitempty"` + // Tables is set if the journal represents a TABLES migration. + Tables []string `protobuf:"bytes,3,rep,name=tables,proto3" json:"tables,omitempty"` + // LocalPosition is the source position at which the migration happened. + LocalPosition string `protobuf:"bytes,4,opt,name=local_position,json=localPosition,proto3" json:"local_position,omitempty"` + // ShardGtids is the list of targets to which the migration took place. + ShardGtids []*ShardGtid `protobuf:"bytes,5,rep,name=shard_gtids,json=shardGtids,proto3" json:"shard_gtids,omitempty"` + // Participants is the list of source participants for a migration. + // Every participant is expected to have an identical journal entry. + // While streaming, the client must wait for the journal entry to + // be received from all pariticipants, and then replace them with new + // streams specified by ShardGtid. + // If a stream does not have all participants, a consistent migration + // is not possible. + Participants []*KeyspaceShard `protobuf:"bytes,6,rep,name=participants,proto3" json:"participants,omitempty"` + // SourceWorkflows is the list of workflows in the source shard that + // were migrated to the target. If a migration fails after a Journal + // is committed, this information is used to start the target streams + // that were created prior to the creation of the journal. + SourceWorkflows []string `protobuf:"bytes,7,rep,name=source_workflows,json=sourceWorkflows,proto3" json:"source_workflows,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Journal) Reset() { *m = Journal{} } +func (m *Journal) String() string { return proto.CompactTextString(m) } +func (*Journal) ProtoMessage() {} +func (*Journal) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{15} +} + +func (m *Journal) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Journal.Unmarshal(m, b) +} +func (m *Journal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Journal.Marshal(b, m, deterministic) +} +func (m *Journal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Journal.Merge(m, src) +} +func (m *Journal) XXX_Size() int { + return xxx_messageInfo_Journal.Size(m) +} +func (m *Journal) XXX_DiscardUnknown() { + xxx_messageInfo_Journal.DiscardUnknown(m) +} + +var xxx_messageInfo_Journal proto.InternalMessageInfo + +func (m *Journal) GetId() int64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Journal) GetMigrationType() MigrationType { + if m != nil { + return m.MigrationType + } + return MigrationType_TABLES +} + +func (m *Journal) GetTables() []string { + if m != nil { + return m.Tables + } + return nil +} + +func (m *Journal) GetLocalPosition() string { + if m != nil { + return m.LocalPosition + } + return "" +} + +func (m *Journal) GetShardGtids() []*ShardGtid { + if m != nil { + return m.ShardGtids + } + return nil +} + +func (m *Journal) GetParticipants() []*KeyspaceShard { + if m != nil { + return m.Participants + } + return nil +} + +func (m *Journal) GetSourceWorkflows() []string { + if m != nil { + return m.SourceWorkflows + } + return nil +} + +// VEvent represents a vstream event. +// A FieldEvent is sent once for every table, just before +// the first event for that table. The client is expected +// to cache this information and match it against the RowEvent +// which contains the table name. +// A GTID event always precedes a commitable event, which can be +// COMMIT, DDL or OTHER. +// OTHER events are non-material events that have no additional metadata. +type VEvent struct { + Type VEventType `protobuf:"varint,1,opt,name=type,proto3,enum=binlogdata.VEventType" json:"type,omitempty"` + // Timestamp is the binlog timestamp in seconds. + // The value should be ignored if 0. + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Gtid is set if the event type is GTID. + Gtid string `protobuf:"bytes,3,opt,name=gtid,proto3" json:"gtid,omitempty"` + // Ddl is set if the event type is DDL. + Ddl string `protobuf:"bytes,4,opt,name=ddl,proto3" json:"ddl,omitempty"` + // RowEvent is set if the event type is ROW. + RowEvent *RowEvent `protobuf:"bytes,5,opt,name=row_event,json=rowEvent,proto3" json:"row_event,omitempty"` + // FieldEvent is set if the event type is FIELD. + FieldEvent *FieldEvent `protobuf:"bytes,6,opt,name=field_event,json=fieldEvent,proto3" json:"field_event,omitempty"` + // Vgtid is set if the event type is VGTID. + // This event is only generated by VTGate's VStream function. + Vgtid *VGtid `protobuf:"bytes,7,opt,name=vgtid,proto3" json:"vgtid,omitempty"` + // Journal is set if the event type is JOURNAL. + Journal *Journal `protobuf:"bytes,8,opt,name=journal,proto3" json:"journal,omitempty"` + // Dml is set if the event type is INSERT, REPLACE, UPDATE or DELETE. + Dml string `protobuf:"bytes,9,opt,name=dml,proto3" json:"dml,omitempty"` + // CurrentTime specifies the current time when the message was sent. + // This can be used to compenssate for clock skew. + CurrentTime int64 `protobuf:"varint,20,opt,name=current_time,json=currentTime,proto3" json:"current_time,omitempty"` + // LastPK is the last PK for a table + LastPKEvent *LastPKEvent `protobuf:"bytes,21,opt,name=last_p_k_event,json=lastPKEvent,proto3" json:"last_p_k_event,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VEvent) Reset() { *m = VEvent{} } +func (m *VEvent) String() string { return proto.CompactTextString(m) } +func (*VEvent) ProtoMessage() {} +func (*VEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{16} +} + +func (m *VEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VEvent.Unmarshal(m, b) +} +func (m *VEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VEvent.Marshal(b, m, deterministic) +} +func (m *VEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_VEvent.Merge(m, src) +} +func (m *VEvent) XXX_Size() int { + return xxx_messageInfo_VEvent.Size(m) +} +func (m *VEvent) XXX_DiscardUnknown() { + xxx_messageInfo_VEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_VEvent proto.InternalMessageInfo + +func (m *VEvent) GetType() VEventType { + if m != nil { + return m.Type + } + return VEventType_UNKNOWN +} + +func (m *VEvent) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *VEvent) GetGtid() string { + if m != nil { + return m.Gtid + } + return "" +} + +func (m *VEvent) GetDdl() string { + if m != nil { + return m.Ddl + } + return "" +} + +func (m *VEvent) GetRowEvent() *RowEvent { + if m != nil { + return m.RowEvent + } + return nil +} + +func (m *VEvent) GetFieldEvent() *FieldEvent { + if m != nil { + return m.FieldEvent + } + return nil +} + +func (m *VEvent) GetVgtid() *VGtid { + if m != nil { + return m.Vgtid + } + return nil +} + +func (m *VEvent) GetJournal() *Journal { + if m != nil { + return m.Journal + } + return nil +} + +func (m *VEvent) GetDml() string { + if m != nil { + return m.Dml + } + return "" +} + +func (m *VEvent) GetCurrentTime() int64 { + if m != nil { + return m.CurrentTime + } + return 0 +} + +func (m *VEvent) GetLastPKEvent() *LastPKEvent { + if m != nil { + return m.LastPKEvent + } + return nil +} + +type MinimalTable struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Fields []*query.Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + PKColumns []int64 `protobuf:"varint,3,rep,packed,name=p_k_columns,json=pKColumns,proto3" json:"p_k_columns,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MinimalTable) Reset() { *m = MinimalTable{} } +func (m *MinimalTable) String() string { return proto.CompactTextString(m) } +func (*MinimalTable) ProtoMessage() {} +func (*MinimalTable) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{17} +} + +func (m *MinimalTable) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MinimalTable.Unmarshal(m, b) +} +func (m *MinimalTable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MinimalTable.Marshal(b, m, deterministic) +} +func (m *MinimalTable) XXX_Merge(src proto.Message) { + xxx_messageInfo_MinimalTable.Merge(m, src) +} +func (m *MinimalTable) XXX_Size() int { + return xxx_messageInfo_MinimalTable.Size(m) +} +func (m *MinimalTable) XXX_DiscardUnknown() { + xxx_messageInfo_MinimalTable.DiscardUnknown(m) +} + +var xxx_messageInfo_MinimalTable proto.InternalMessageInfo + +func (m *MinimalTable) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MinimalTable) GetFields() []*query.Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *MinimalTable) GetPKColumns() []int64 { + if m != nil { + return m.PKColumns + } + return nil +} + +type MinimalSchema struct { + Tables []*MinimalTable `protobuf:"bytes,1,rep,name=tables,proto3" json:"tables,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MinimalSchema) Reset() { *m = MinimalSchema{} } +func (m *MinimalSchema) String() string { return proto.CompactTextString(m) } +func (*MinimalSchema) ProtoMessage() {} +func (*MinimalSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{18} +} + +func (m *MinimalSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MinimalSchema.Unmarshal(m, b) +} +func (m *MinimalSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MinimalSchema.Marshal(b, m, deterministic) +} +func (m *MinimalSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_MinimalSchema.Merge(m, src) +} +func (m *MinimalSchema) XXX_Size() int { + return xxx_messageInfo_MinimalSchema.Size(m) +} +func (m *MinimalSchema) XXX_DiscardUnknown() { + xxx_messageInfo_MinimalSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_MinimalSchema proto.InternalMessageInfo + +func (m *MinimalSchema) GetTables() []*MinimalTable { + if m != nil { + return m.Tables + } + return nil +} + +// VStreamRequest is the payload for VStreamer +type VStreamRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *query.VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *query.Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Position string `protobuf:"bytes,4,opt,name=position,proto3" json:"position,omitempty"` + Filter *Filter `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + TableLastPKs []*TableLastPK `protobuf:"bytes,6,rep,name=table_last_p_ks,json=tableLastPKs,proto3" json:"table_last_p_ks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamRequest) Reset() { *m = VStreamRequest{} } +func (m *VStreamRequest) String() string { return proto.CompactTextString(m) } +func (*VStreamRequest) ProtoMessage() {} +func (*VStreamRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{19} +} + +func (m *VStreamRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VStreamRequest.Unmarshal(m, b) +} +func (m *VStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VStreamRequest.Marshal(b, m, deterministic) +} +func (m *VStreamRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamRequest.Merge(m, src) +} +func (m *VStreamRequest) XXX_Size() int { + return xxx_messageInfo_VStreamRequest.Size(m) +} +func (m *VStreamRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamRequest proto.InternalMessageInfo + +func (m *VStreamRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *VStreamRequest) GetImmediateCallerId() *query.VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *VStreamRequest) GetTarget() *query.Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *VStreamRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *VStreamRequest) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *VStreamRequest) GetTableLastPKs() []*TableLastPK { + if m != nil { + return m.TableLastPKs + } + return nil +} + +// VStreamResponse is the response from VStreamer +type VStreamResponse struct { + Events []*VEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamResponse) Reset() { *m = VStreamResponse{} } +func (m *VStreamResponse) String() string { return proto.CompactTextString(m) } +func (*VStreamResponse) ProtoMessage() {} +func (*VStreamResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{20} +} + +func (m *VStreamResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VStreamResponse.Unmarshal(m, b) +} +func (m *VStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VStreamResponse.Marshal(b, m, deterministic) +} +func (m *VStreamResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamResponse.Merge(m, src) +} +func (m *VStreamResponse) XXX_Size() int { + return xxx_messageInfo_VStreamResponse.Size(m) +} +func (m *VStreamResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamResponse proto.InternalMessageInfo + +func (m *VStreamResponse) GetEvents() []*VEvent { + if m != nil { + return m.Events + } + return nil +} + +// VStreamRowsRequest is the payload for VStreamRows +type VStreamRowsRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *query.VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *query.Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Query string `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` + Lastpk *query.QueryResult `protobuf:"bytes,5,opt,name=lastpk,proto3" json:"lastpk,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamRowsRequest) Reset() { *m = VStreamRowsRequest{} } +func (m *VStreamRowsRequest) String() string { return proto.CompactTextString(m) } +func (*VStreamRowsRequest) ProtoMessage() {} +func (*VStreamRowsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{21} +} + +func (m *VStreamRowsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VStreamRowsRequest.Unmarshal(m, b) +} +func (m *VStreamRowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VStreamRowsRequest.Marshal(b, m, deterministic) +} +func (m *VStreamRowsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamRowsRequest.Merge(m, src) +} +func (m *VStreamRowsRequest) XXX_Size() int { + return xxx_messageInfo_VStreamRowsRequest.Size(m) +} +func (m *VStreamRowsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamRowsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamRowsRequest proto.InternalMessageInfo + +func (m *VStreamRowsRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *VStreamRowsRequest) GetImmediateCallerId() *query.VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *VStreamRowsRequest) GetTarget() *query.Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *VStreamRowsRequest) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +func (m *VStreamRowsRequest) GetLastpk() *query.QueryResult { + if m != nil { + return m.Lastpk + } + return nil +} + +// VStreamRowsResponse is the response from VStreamRows +type VStreamRowsResponse struct { + Fields []*query.Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` + Pkfields []*query.Field `protobuf:"bytes,2,rep,name=pkfields,proto3" json:"pkfields,omitempty"` + Gtid string `protobuf:"bytes,3,opt,name=gtid,proto3" json:"gtid,omitempty"` + Rows []*query.Row `protobuf:"bytes,4,rep,name=rows,proto3" json:"rows,omitempty"` + Lastpk *query.Row `protobuf:"bytes,5,opt,name=lastpk,proto3" json:"lastpk,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamRowsResponse) Reset() { *m = VStreamRowsResponse{} } +func (m *VStreamRowsResponse) String() string { return proto.CompactTextString(m) } +func (*VStreamRowsResponse) ProtoMessage() {} +func (*VStreamRowsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{22} +} + +func (m *VStreamRowsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VStreamRowsResponse.Unmarshal(m, b) +} +func (m *VStreamRowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VStreamRowsResponse.Marshal(b, m, deterministic) +} +func (m *VStreamRowsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamRowsResponse.Merge(m, src) +} +func (m *VStreamRowsResponse) XXX_Size() int { + return xxx_messageInfo_VStreamRowsResponse.Size(m) +} +func (m *VStreamRowsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamRowsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamRowsResponse proto.InternalMessageInfo + +func (m *VStreamRowsResponse) GetFields() []*query.Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *VStreamRowsResponse) GetPkfields() []*query.Field { + if m != nil { + return m.Pkfields + } + return nil +} + +func (m *VStreamRowsResponse) GetGtid() string { + if m != nil { + return m.Gtid + } + return "" +} + +func (m *VStreamRowsResponse) GetRows() []*query.Row { + if m != nil { + return m.Rows + } + return nil +} + +func (m *VStreamRowsResponse) GetLastpk() *query.Row { + if m != nil { + return m.Lastpk + } + return nil +} + +type LastPKEvent struct { + TableLastPK *TableLastPK `protobuf:"bytes,1,opt,name=table_last_p_k,json=tableLastPK,proto3" json:"table_last_p_k,omitempty"` + Completed bool `protobuf:"varint,2,opt,name=completed,proto3" json:"completed,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LastPKEvent) Reset() { *m = LastPKEvent{} } +func (m *LastPKEvent) String() string { return proto.CompactTextString(m) } +func (*LastPKEvent) ProtoMessage() {} +func (*LastPKEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{23} +} + +func (m *LastPKEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LastPKEvent.Unmarshal(m, b) +} +func (m *LastPKEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LastPKEvent.Marshal(b, m, deterministic) +} +func (m *LastPKEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_LastPKEvent.Merge(m, src) +} +func (m *LastPKEvent) XXX_Size() int { + return xxx_messageInfo_LastPKEvent.Size(m) +} +func (m *LastPKEvent) XXX_DiscardUnknown() { + xxx_messageInfo_LastPKEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_LastPKEvent proto.InternalMessageInfo + +func (m *LastPKEvent) GetTableLastPK() *TableLastPK { + if m != nil { + return m.TableLastPK + } + return nil +} + +func (m *LastPKEvent) GetCompleted() bool { + if m != nil { + return m.Completed + } + return false +} + +type TableLastPK struct { + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + Lastpk *query.QueryResult `protobuf:"bytes,3,opt,name=lastpk,proto3" json:"lastpk,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TableLastPK) Reset() { *m = TableLastPK{} } +func (m *TableLastPK) String() string { return proto.CompactTextString(m) } +func (*TableLastPK) ProtoMessage() {} +func (*TableLastPK) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{24} +} + +func (m *TableLastPK) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TableLastPK.Unmarshal(m, b) +} +func (m *TableLastPK) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TableLastPK.Marshal(b, m, deterministic) +} +func (m *TableLastPK) XXX_Merge(src proto.Message) { + xxx_messageInfo_TableLastPK.Merge(m, src) +} +func (m *TableLastPK) XXX_Size() int { + return xxx_messageInfo_TableLastPK.Size(m) +} +func (m *TableLastPK) XXX_DiscardUnknown() { + xxx_messageInfo_TableLastPK.DiscardUnknown(m) +} + +var xxx_messageInfo_TableLastPK proto.InternalMessageInfo + +func (m *TableLastPK) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *TableLastPK) GetLastpk() *query.QueryResult { + if m != nil { + return m.Lastpk + } + return nil +} + +// VStreamResultsRequest is the payload for VStreamResults +// The ids match VStreamRows, in case we decide to merge the two. +// The ids match VStreamRows, in case we decide to merge the two. +type VStreamResultsRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *query.VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *query.Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Query string `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamResultsRequest) Reset() { *m = VStreamResultsRequest{} } +func (m *VStreamResultsRequest) String() string { return proto.CompactTextString(m) } +func (*VStreamResultsRequest) ProtoMessage() {} +func (*VStreamResultsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{25} +} + +func (m *VStreamResultsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VStreamResultsRequest.Unmarshal(m, b) +} +func (m *VStreamResultsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VStreamResultsRequest.Marshal(b, m, deterministic) +} +func (m *VStreamResultsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamResultsRequest.Merge(m, src) +} +func (m *VStreamResultsRequest) XXX_Size() int { + return xxx_messageInfo_VStreamResultsRequest.Size(m) +} +func (m *VStreamResultsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamResultsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamResultsRequest proto.InternalMessageInfo + +func (m *VStreamResultsRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *VStreamResultsRequest) GetImmediateCallerId() *query.VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *VStreamResultsRequest) GetTarget() *query.Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *VStreamResultsRequest) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +// VStreamResultsResponse is the response from VStreamResults +// The ids match VStreamRows, in case we decide to merge the two. +type VStreamResultsResponse struct { + Fields []*query.Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` + Gtid string `protobuf:"bytes,3,opt,name=gtid,proto3" json:"gtid,omitempty"` + Rows []*query.Row `protobuf:"bytes,4,rep,name=rows,proto3" json:"rows,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamResultsResponse) Reset() { *m = VStreamResultsResponse{} } +func (m *VStreamResultsResponse) String() string { return proto.CompactTextString(m) } +func (*VStreamResultsResponse) ProtoMessage() {} +func (*VStreamResultsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5fd02bcb2e350dad, []int{26} +} + +func (m *VStreamResultsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VStreamResultsResponse.Unmarshal(m, b) +} +func (m *VStreamResultsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VStreamResultsResponse.Marshal(b, m, deterministic) +} +func (m *VStreamResultsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamResultsResponse.Merge(m, src) +} +func (m *VStreamResultsResponse) XXX_Size() int { + return xxx_messageInfo_VStreamResultsResponse.Size(m) +} +func (m *VStreamResultsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamResultsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamResultsResponse proto.InternalMessageInfo + +func (m *VStreamResultsResponse) GetFields() []*query.Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *VStreamResultsResponse) GetGtid() string { + if m != nil { + return m.Gtid + } + return "" +} + +func (m *VStreamResultsResponse) GetRows() []*query.Row { + if m != nil { + return m.Rows + } + return nil +} + +func init() { + proto.RegisterEnum("binlogdata.OnDDLAction", OnDDLAction_name, OnDDLAction_value) + proto.RegisterEnum("binlogdata.VEventType", VEventType_name, VEventType_value) + proto.RegisterEnum("binlogdata.MigrationType", MigrationType_name, MigrationType_value) + proto.RegisterEnum("binlogdata.BinlogTransaction_Statement_Category", BinlogTransaction_Statement_Category_name, BinlogTransaction_Statement_Category_value) + proto.RegisterEnum("binlogdata.Filter_FieldEventMode", Filter_FieldEventMode_name, Filter_FieldEventMode_value) + proto.RegisterType((*Charset)(nil), "binlogdata.Charset") + proto.RegisterType((*BinlogTransaction)(nil), "binlogdata.BinlogTransaction") + proto.RegisterType((*BinlogTransaction_Statement)(nil), "binlogdata.BinlogTransaction.Statement") + proto.RegisterType((*StreamKeyRangeRequest)(nil), "binlogdata.StreamKeyRangeRequest") + proto.RegisterType((*StreamKeyRangeResponse)(nil), "binlogdata.StreamKeyRangeResponse") + proto.RegisterType((*StreamTablesRequest)(nil), "binlogdata.StreamTablesRequest") + proto.RegisterType((*StreamTablesResponse)(nil), "binlogdata.StreamTablesResponse") + proto.RegisterType((*Rule)(nil), "binlogdata.Rule") + proto.RegisterType((*Filter)(nil), "binlogdata.Filter") + proto.RegisterType((*BinlogSource)(nil), "binlogdata.BinlogSource") + proto.RegisterType((*RowChange)(nil), "binlogdata.RowChange") + proto.RegisterType((*RowEvent)(nil), "binlogdata.RowEvent") + proto.RegisterType((*FieldEvent)(nil), "binlogdata.FieldEvent") + proto.RegisterType((*ShardGtid)(nil), "binlogdata.ShardGtid") + proto.RegisterType((*VGtid)(nil), "binlogdata.VGtid") + proto.RegisterType((*KeyspaceShard)(nil), "binlogdata.KeyspaceShard") + proto.RegisterType((*Journal)(nil), "binlogdata.Journal") + proto.RegisterType((*VEvent)(nil), "binlogdata.VEvent") + proto.RegisterType((*MinimalTable)(nil), "binlogdata.MinimalTable") + proto.RegisterType((*MinimalSchema)(nil), "binlogdata.MinimalSchema") + proto.RegisterType((*VStreamRequest)(nil), "binlogdata.VStreamRequest") + proto.RegisterType((*VStreamResponse)(nil), "binlogdata.VStreamResponse") + proto.RegisterType((*VStreamRowsRequest)(nil), "binlogdata.VStreamRowsRequest") + proto.RegisterType((*VStreamRowsResponse)(nil), "binlogdata.VStreamRowsResponse") + proto.RegisterType((*LastPKEvent)(nil), "binlogdata.LastPKEvent") + proto.RegisterType((*TableLastPK)(nil), "binlogdata.TableLastPK") + proto.RegisterType((*VStreamResultsRequest)(nil), "binlogdata.VStreamResultsRequest") + proto.RegisterType((*VStreamResultsResponse)(nil), "binlogdata.VStreamResultsResponse") +} + +func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_5fd02bcb2e350dad) } + +var fileDescriptor_5fd02bcb2e350dad = []byte{ + // 1901 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4b, 0x73, 0xe3, 0xc6, + 0x11, 0x5e, 0xbe, 0xc9, 0x06, 0x45, 0x41, 0xa3, 0x47, 0x98, 0xad, 0xd8, 0x25, 0xa3, 0x62, 0xaf, + 0xac, 0xaa, 0x50, 0x0e, 0x13, 0x6f, 0x2e, 0xb1, 0x1d, 0x3e, 0xb0, 0x5a, 0xae, 0xf8, 0xd0, 0x0e, + 0xb1, 0x5a, 0x97, 0x2f, 0x28, 0x08, 0x1c, 0x49, 0x88, 0x00, 0x02, 0x0b, 0x0c, 0x25, 0xf3, 0x07, + 0xa4, 0x2a, 0xf7, 0xfc, 0x8a, 0x9c, 0x72, 0xc8, 0x35, 0xb9, 0x26, 0x7f, 0x22, 0xd7, 0x5c, 0x92, + 0x3f, 0x91, 0x9a, 0x07, 0x5e, 0x5a, 0x7b, 0xa5, 0x75, 0x55, 0x0e, 0xc9, 0x85, 0x35, 0xd3, 0xd3, + 0xdd, 0xd3, 0xaf, 0xaf, 0xd1, 0x1c, 0x50, 0xcf, 0x9d, 0xa5, 0xeb, 0x5f, 0x2e, 0x2c, 0x6a, 0x75, + 0x82, 0xd0, 0xa7, 0x3e, 0x82, 0x94, 0xf2, 0x58, 0xb9, 0xa1, 0x61, 0x60, 0x8b, 0x83, 0xc7, 0xca, + 0x9b, 0x15, 0x09, 0xd7, 0x72, 0xd3, 0xa2, 0x7e, 0xe0, 0xa7, 0x52, 0xda, 0x04, 0x6a, 0x83, 0x2b, + 0x2b, 0x8c, 0x08, 0x45, 0x7b, 0x50, 0xb5, 0x5d, 0x87, 0x2c, 0x69, 0xbb, 0xb0, 0x5f, 0x38, 0xa8, + 0x60, 0xb9, 0x43, 0x08, 0xca, 0xb6, 0xbf, 0x5c, 0xb6, 0x8b, 0x9c, 0xca, 0xd7, 0x8c, 0x37, 0x22, + 0xe1, 0x0d, 0x09, 0xdb, 0x25, 0xc1, 0x2b, 0x76, 0xda, 0x3f, 0x4b, 0xb0, 0xd5, 0xe7, 0x76, 0x18, + 0xa1, 0xb5, 0x8c, 0x2c, 0x9b, 0x3a, 0xfe, 0x12, 0x1d, 0x03, 0x44, 0xd4, 0xa2, 0xc4, 0x23, 0x4b, + 0x1a, 0xb5, 0x0b, 0xfb, 0xa5, 0x03, 0xa5, 0xfb, 0xa4, 0x93, 0xf1, 0xe0, 0x2d, 0x91, 0xce, 0x3c, + 0xe6, 0xc7, 0x19, 0x51, 0xd4, 0x05, 0x85, 0xdc, 0x90, 0x25, 0x35, 0xa9, 0x7f, 0x4d, 0x96, 0xed, + 0xf2, 0x7e, 0xe1, 0x40, 0xe9, 0x6e, 0x75, 0x84, 0x83, 0x3a, 0x3b, 0x31, 0xd8, 0x01, 0x06, 0x92, + 0xac, 0x1f, 0xff, 0xad, 0x08, 0x8d, 0x44, 0x1b, 0x1a, 0x43, 0xdd, 0xb6, 0x28, 0xb9, 0xf4, 0xc3, + 0x35, 0x77, 0xb3, 0xd5, 0xfd, 0xec, 0x81, 0x86, 0x74, 0x06, 0x52, 0x0e, 0x27, 0x1a, 0xd0, 0xcf, + 0xa0, 0x66, 0x8b, 0xe8, 0xf1, 0xe8, 0x28, 0xdd, 0xed, 0xac, 0x32, 0x19, 0x58, 0x1c, 0xf3, 0x20, + 0x15, 0x4a, 0xd1, 0x1b, 0x97, 0x87, 0xac, 0x89, 0xd9, 0x52, 0xfb, 0x63, 0x01, 0xea, 0xb1, 0x5e, + 0xb4, 0x0d, 0x9b, 0xfd, 0xb1, 0xf9, 0x6a, 0x8a, 0xf5, 0xc1, 0xec, 0x78, 0x3a, 0xfa, 0x46, 0x1f, + 0xaa, 0x8f, 0x50, 0x13, 0xea, 0xfd, 0xb1, 0xd9, 0xd7, 0x8f, 0x47, 0x53, 0xb5, 0x80, 0x36, 0xa0, + 0xd1, 0x1f, 0x9b, 0x83, 0xd9, 0x64, 0x32, 0x32, 0xd4, 0x22, 0xda, 0x04, 0xa5, 0x3f, 0x36, 0xf1, + 0x6c, 0x3c, 0xee, 0xf7, 0x06, 0x27, 0x6a, 0x09, 0xed, 0xc2, 0x56, 0x7f, 0x6c, 0x0e, 0x27, 0x63, + 0x73, 0xa8, 0x9f, 0x62, 0x7d, 0xd0, 0x33, 0xf4, 0xa1, 0x5a, 0x46, 0x00, 0x55, 0x46, 0x1e, 0x8e, + 0xd5, 0x8a, 0x5c, 0xcf, 0x75, 0x43, 0xad, 0x4a, 0x75, 0xa3, 0xe9, 0x5c, 0xc7, 0x86, 0x5a, 0x93, + 0xdb, 0x57, 0xa7, 0xc3, 0x9e, 0xa1, 0xab, 0x75, 0xb9, 0x1d, 0xea, 0x63, 0xdd, 0xd0, 0xd5, 0xc6, + 0x8b, 0x72, 0xbd, 0xa8, 0x96, 0x5e, 0x94, 0xeb, 0x25, 0xb5, 0xac, 0xfd, 0xa1, 0x00, 0xbb, 0x73, + 0x1a, 0x12, 0xcb, 0x3b, 0x21, 0x6b, 0x6c, 0x2d, 0x2f, 0x09, 0x26, 0x6f, 0x56, 0x24, 0xa2, 0xe8, + 0x31, 0xd4, 0x03, 0x3f, 0x72, 0x58, 0xec, 0x78, 0x80, 0x1b, 0x38, 0xd9, 0xa3, 0x23, 0x68, 0x5c, + 0x93, 0xb5, 0x19, 0x32, 0x7e, 0x19, 0x30, 0xd4, 0x49, 0x0a, 0x32, 0xd1, 0x54, 0xbf, 0x96, 0xab, + 0x6c, 0x7c, 0x4b, 0xf7, 0xc7, 0x57, 0xbb, 0x80, 0xbd, 0xbb, 0x46, 0x45, 0x81, 0xbf, 0x8c, 0x08, + 0x1a, 0x03, 0x12, 0x82, 0x26, 0x4d, 0x73, 0xcb, 0xed, 0x53, 0xba, 0x1f, 0xbc, 0xb3, 0x00, 0xf0, + 0xd6, 0xf9, 0x5d, 0x92, 0xf6, 0x2d, 0x6c, 0x8b, 0x7b, 0x0c, 0xeb, 0xdc, 0x25, 0xd1, 0x43, 0x5c, + 0xdf, 0x83, 0x2a, 0xe5, 0xcc, 0xed, 0xe2, 0x7e, 0xe9, 0xa0, 0x81, 0xe5, 0xee, 0x7d, 0x3d, 0x5c, + 0xc0, 0x4e, 0xfe, 0xe6, 0xff, 0x8a, 0x7f, 0xbf, 0x84, 0x32, 0x5e, 0xb9, 0x04, 0xed, 0x40, 0xc5, + 0xb3, 0xa8, 0x7d, 0x25, 0xbd, 0x11, 0x1b, 0xe6, 0xca, 0x85, 0xe3, 0x52, 0x12, 0xf2, 0x14, 0x36, + 0xb0, 0xdc, 0x69, 0x7f, 0x2e, 0x40, 0xf5, 0x19, 0x5f, 0xa2, 0x4f, 0xa0, 0x12, 0xae, 0x98, 0xb3, + 0x02, 0xeb, 0x6a, 0xd6, 0x02, 0xa6, 0x19, 0x8b, 0x63, 0x34, 0x82, 0xd6, 0x85, 0x43, 0xdc, 0x05, + 0x87, 0xee, 0xc4, 0x5f, 0x88, 0xaa, 0x68, 0x75, 0x3f, 0xca, 0x0a, 0x08, 0x9d, 0x9d, 0x67, 0x39, + 0x46, 0x7c, 0x47, 0x50, 0x7b, 0x0a, 0xad, 0x3c, 0x07, 0x83, 0x93, 0x8e, 0xb1, 0x39, 0x9b, 0x9a, + 0x93, 0xd1, 0x7c, 0xd2, 0x33, 0x06, 0xcf, 0xd5, 0x47, 0x1c, 0x31, 0xfa, 0xdc, 0x30, 0xf5, 0x67, + 0xcf, 0x66, 0xd8, 0x50, 0x0b, 0xda, 0xbf, 0x8b, 0xd0, 0x14, 0x41, 0x99, 0xfb, 0xab, 0xd0, 0x26, + 0x2c, 0x8b, 0xd7, 0x64, 0x1d, 0x05, 0x96, 0x4d, 0xe2, 0x2c, 0xc6, 0x7b, 0x16, 0x90, 0xe8, 0xca, + 0x0a, 0x17, 0xd2, 0x73, 0xb1, 0x41, 0x9f, 0x83, 0xc2, 0xb3, 0x49, 0x4d, 0xba, 0x0e, 0x08, 0xcf, + 0x63, 0xab, 0xbb, 0x93, 0x16, 0x36, 0xcf, 0x15, 0x35, 0xd6, 0x01, 0xc1, 0x40, 0x93, 0x75, 0x1e, + 0x0d, 0xe5, 0x07, 0xa0, 0x21, 0xad, 0xa1, 0x4a, 0xae, 0x86, 0x0e, 0x93, 0x84, 0x54, 0xa5, 0x96, + 0xb7, 0xa2, 0x17, 0x27, 0x09, 0x75, 0xa0, 0xea, 0x2f, 0xcd, 0xc5, 0xc2, 0x6d, 0xd7, 0xb8, 0x99, + 0x3f, 0xca, 0xf2, 0xce, 0x96, 0xc3, 0xe1, 0xb8, 0x27, 0xca, 0xa2, 0xe2, 0x2f, 0x87, 0x0b, 0x17, + 0x7d, 0x0c, 0x2d, 0xf2, 0x2d, 0x25, 0xe1, 0xd2, 0x72, 0x4d, 0x6f, 0xcd, 0xba, 0x57, 0x9d, 0xbb, + 0xbe, 0x11, 0x53, 0x27, 0x8c, 0x88, 0x3e, 0x81, 0xcd, 0x88, 0xfa, 0x81, 0x69, 0x5d, 0x50, 0x12, + 0x9a, 0xb6, 0x1f, 0xac, 0xdb, 0x8d, 0xfd, 0xc2, 0x41, 0x1d, 0x6f, 0x30, 0x72, 0x8f, 0x51, 0x07, + 0x7e, 0xb0, 0xd6, 0x5e, 0x42, 0x03, 0xfb, 0xb7, 0x83, 0x2b, 0xee, 0x8f, 0x06, 0xd5, 0x73, 0x72, + 0xe1, 0x87, 0x44, 0x16, 0x2a, 0xc8, 0x46, 0x8e, 0xfd, 0x5b, 0x2c, 0x4f, 0xd0, 0x3e, 0x54, 0xb8, + 0x4e, 0xd9, 0x2e, 0xb2, 0x2c, 0xe2, 0x40, 0xb3, 0xa0, 0x8e, 0xfd, 0x5b, 0x9e, 0x76, 0xf4, 0x01, + 0x88, 0x00, 0x9b, 0x4b, 0xcb, 0x8b, 0xb3, 0xd7, 0xe0, 0x94, 0xa9, 0xe5, 0x11, 0xf4, 0x14, 0x94, + 0xd0, 0xbf, 0x35, 0x6d, 0x7e, 0xbd, 0x40, 0xa2, 0xd2, 0xdd, 0xcd, 0x15, 0x67, 0x6c, 0x1c, 0x86, + 0x30, 0x5e, 0x46, 0xda, 0x4b, 0x80, 0xb4, 0xb6, 0xee, 0xbb, 0xe4, 0xa7, 0x2c, 0x1b, 0xc4, 0x5d, + 0xc4, 0xfa, 0x9b, 0xd2, 0x64, 0xae, 0x01, 0xcb, 0x33, 0xed, 0xf7, 0x05, 0x68, 0xcc, 0x59, 0xf5, + 0x1c, 0x53, 0x67, 0xf1, 0x03, 0x6a, 0x0e, 0x41, 0xf9, 0x92, 0x3a, 0x0b, 0x5e, 0x6c, 0x0d, 0xcc, + 0xd7, 0xe8, 0xf3, 0xd8, 0xb0, 0xc0, 0xbc, 0x8e, 0xda, 0x65, 0x7e, 0x7b, 0x2e, 0xbf, 0xbc, 0x10, + 0xc7, 0x56, 0x44, 0x4f, 0x4f, 0x70, 0x9d, 0xb3, 0x9e, 0x9e, 0x44, 0xda, 0x57, 0x50, 0x39, 0xe3, + 0x56, 0x3c, 0x05, 0x85, 0x2b, 0x37, 0x99, 0xb6, 0x18, 0xbb, 0xb9, 0xf0, 0x24, 0x16, 0x63, 0x88, + 0xe2, 0x65, 0xa4, 0xf5, 0x60, 0xe3, 0x44, 0x5a, 0xcb, 0x19, 0xde, 0xdf, 0x1d, 0xed, 0x2f, 0x45, + 0xa8, 0xbd, 0xf0, 0x57, 0xac, 0xa0, 0x50, 0x0b, 0x8a, 0xce, 0x82, 0xcb, 0x95, 0x70, 0xd1, 0x59, + 0xa0, 0xdf, 0x40, 0xcb, 0x73, 0x2e, 0x43, 0x8b, 0x95, 0xa5, 0x40, 0x98, 0x68, 0x12, 0x3f, 0xce, + 0x5a, 0x36, 0x89, 0x39, 0x38, 0xcc, 0x36, 0xbc, 0xec, 0x36, 0x03, 0x9c, 0x52, 0x0e, 0x38, 0x1f, + 0x43, 0xcb, 0xf5, 0x6d, 0xcb, 0x35, 0x93, 0xb6, 0x5d, 0x16, 0xc5, 0xcd, 0xa9, 0xa7, 0x71, 0xef, + 0xbe, 0x13, 0x97, 0xca, 0x03, 0xe3, 0x82, 0xbe, 0x80, 0x66, 0x60, 0x85, 0xd4, 0xb1, 0x9d, 0xc0, + 0x62, 0x83, 0x4f, 0x95, 0x0b, 0xe6, 0xcc, 0xce, 0xc5, 0x0d, 0xe7, 0xd8, 0xd1, 0xa7, 0xa0, 0x46, + 0xbc, 0x25, 0x99, 0xb7, 0x7e, 0x78, 0x7d, 0xe1, 0xfa, 0xb7, 0x51, 0xbb, 0xc6, 0xed, 0xdf, 0x14, + 0xf4, 0xd7, 0x31, 0x59, 0xfb, 0x53, 0x09, 0xaa, 0x67, 0xa2, 0x3a, 0x0f, 0xa1, 0xcc, 0x63, 0x24, + 0x86, 0x9b, 0xbd, 0xec, 0x65, 0x82, 0x83, 0x07, 0x88, 0xf3, 0xa0, 0x9f, 0x40, 0x83, 0x3a, 0x1e, + 0x89, 0xa8, 0xe5, 0x05, 0x3c, 0xa8, 0x25, 0x9c, 0x12, 0xbe, 0xb3, 0xc4, 0x54, 0x28, 0xb1, 0xde, + 0x21, 0xc2, 0xc4, 0x96, 0xe8, 0xe7, 0xd0, 0x60, 0x98, 0xe2, 0x03, 0x57, 0xbb, 0xc2, 0x41, 0xba, + 0x73, 0x07, 0x51, 0xfc, 0x5a, 0x5c, 0x0f, 0x63, 0x94, 0xfe, 0x0a, 0x14, 0x8e, 0x02, 0x29, 0x24, + 0x9a, 0xd6, 0x5e, 0xbe, 0x69, 0xc5, 0x68, 0xc3, 0x90, 0xf6, 0x79, 0xf4, 0x04, 0x2a, 0x37, 0xdc, + 0xa4, 0x9a, 0x1c, 0xfc, 0xb2, 0xce, 0xf1, 0xf0, 0x8b, 0x73, 0xf6, 0x55, 0xfd, 0xad, 0xa8, 0x26, + 0xde, 0xae, 0xee, 0x7c, 0x55, 0x65, 0xa1, 0xe1, 0x98, 0x87, 0x7b, 0xe5, 0xb9, 0xbc, 0x63, 0x31, + 0xaf, 0x3c, 0x17, 0x7d, 0x04, 0x4d, 0x7b, 0x15, 0x86, 0x7c, 0xd4, 0x74, 0x3c, 0xd2, 0xde, 0xe1, + 0xc1, 0x51, 0x24, 0xcd, 0x70, 0x3c, 0x82, 0x7e, 0x0d, 0x2d, 0xd7, 0x8a, 0x28, 0x03, 0x9b, 0x74, + 0x64, 0x97, 0x5f, 0x95, 0x43, 0x9c, 0x00, 0x9b, 0xf0, 0x44, 0x71, 0xd3, 0x8d, 0x76, 0x05, 0xcd, + 0x89, 0xb3, 0x74, 0x3c, 0xcb, 0xe5, 0xa0, 0x64, 0xc1, 0xce, 0xb4, 0x13, 0xbe, 0x7e, 0x58, 0x27, + 0x41, 0x1f, 0x82, 0xc2, 0x4c, 0xb0, 0x7d, 0x77, 0xe5, 0x2d, 0x45, 0x85, 0x97, 0x70, 0x23, 0x38, + 0x19, 0x08, 0x02, 0x43, 0xa7, 0xbc, 0x69, 0x6e, 0x5f, 0x11, 0xcf, 0x42, 0x9f, 0x25, 0x68, 0x10, + 0x08, 0x6f, 0xe7, 0x71, 0x94, 0x1a, 0x15, 0xe3, 0x44, 0xfb, 0x7b, 0x11, 0x5a, 0x67, 0x62, 0xee, + 0x88, 0x67, 0x9d, 0xaf, 0x60, 0x9b, 0x5c, 0x5c, 0x10, 0x9b, 0x3a, 0x37, 0xc4, 0xb4, 0x2d, 0xd7, + 0x25, 0xa1, 0x29, 0x51, 0xab, 0x74, 0x37, 0x3b, 0xe2, 0xff, 0xc7, 0x80, 0xd3, 0x47, 0x43, 0xbc, + 0x95, 0xf0, 0x4a, 0xd2, 0x02, 0xe9, 0xb0, 0xed, 0x78, 0x1e, 0x59, 0x38, 0x16, 0xcd, 0x2a, 0x10, + 0x6d, 0x7e, 0x57, 0x7a, 0x7a, 0x66, 0x1c, 0x5b, 0x94, 0xa4, 0x6a, 0x12, 0x89, 0x44, 0xcd, 0xc7, + 0xcc, 0x99, 0xf0, 0x32, 0x19, 0x9f, 0x36, 0xa4, 0xa4, 0xc1, 0x89, 0x58, 0x1e, 0xe6, 0x46, 0xb3, + 0xf2, 0x9d, 0xd1, 0x2c, 0xfd, 0x7c, 0x56, 0xee, 0xfd, 0x7c, 0x7e, 0x09, 0x9b, 0xa2, 0xc5, 0xc6, + 0xa9, 0x8f, 0x51, 0xfd, 0xbd, 0x7d, 0xb6, 0x49, 0xd3, 0x4d, 0xa4, 0x7d, 0x01, 0x9b, 0x49, 0x20, + 0xe5, 0xe8, 0x76, 0x08, 0x55, 0x5e, 0x3e, 0x71, 0x3a, 0xd0, 0xdb, 0x90, 0xc5, 0x92, 0x43, 0xfb, + 0x5d, 0x11, 0x50, 0x2c, 0xef, 0xdf, 0x46, 0xff, 0xa3, 0xc9, 0xd8, 0x81, 0x0a, 0xa7, 0xcb, 0x4c, + 0x88, 0x0d, 0x8b, 0x03, 0x0b, 0x6a, 0x70, 0x9d, 0xa4, 0x41, 0x08, 0xbf, 0x64, 0xbf, 0x98, 0x44, + 0x2b, 0x97, 0x62, 0xc9, 0xa1, 0xfd, 0xb5, 0x00, 0xdb, 0xb9, 0x38, 0xc8, 0x58, 0xa6, 0x88, 0x29, + 0xbc, 0x03, 0x31, 0x07, 0x50, 0x0f, 0xae, 0xdf, 0x81, 0xac, 0xe4, 0xf4, 0x3b, 0x5b, 0xe0, 0x87, + 0x50, 0x0e, 0x59, 0x2b, 0x16, 0xdf, 0xd7, 0xec, 0x40, 0xc2, 0xe9, 0x6c, 0xaa, 0xc9, 0xf9, 0x91, + 0x9b, 0x6a, 0xa4, 0xfd, 0x0e, 0x28, 0x99, 0xce, 0xc0, 0x5a, 0x49, 0xbe, 0xaa, 0x64, 0xea, 0xbe, + 0xb7, 0xa8, 0x94, 0x4c, 0x51, 0xb1, 0x2e, 0x6e, 0xfb, 0x5e, 0xe0, 0x12, 0x4a, 0x44, 0xca, 0xea, + 0x38, 0x25, 0x68, 0x5f, 0x83, 0x92, 0x91, 0xbc, 0x6f, 0x78, 0x49, 0x93, 0x50, 0xba, 0x37, 0x09, + 0xff, 0x28, 0xc0, 0x6e, 0x5a, 0xcc, 0x2b, 0x97, 0xfe, 0x5f, 0xd5, 0xa3, 0x16, 0xc2, 0xde, 0x5d, + 0xef, 0xde, 0xab, 0xca, 0x7e, 0x40, 0xed, 0x1c, 0x7e, 0x09, 0x4a, 0x66, 0x06, 0x67, 0x7f, 0xd5, + 0x47, 0xc7, 0xd3, 0x19, 0xd6, 0xd5, 0x47, 0xa8, 0x0e, 0xe5, 0xb9, 0x31, 0x3b, 0x55, 0x0b, 0x6c, + 0xa5, 0x7f, 0xad, 0x0f, 0xc4, 0xdf, 0x7f, 0xb6, 0x32, 0x25, 0x53, 0xe9, 0xf0, 0x5f, 0x05, 0x80, + 0xf4, 0x2b, 0x8f, 0x14, 0xa8, 0xbd, 0x9a, 0x9e, 0x4c, 0x67, 0xaf, 0xa7, 0x42, 0xc1, 0xb1, 0x31, + 0x1a, 0xaa, 0x05, 0xd4, 0x80, 0x8a, 0x78, 0x4f, 0x28, 0xb2, 0x1b, 0xe4, 0x63, 0x42, 0x09, 0x35, + 0xa1, 0x9e, 0xbc, 0x24, 0x94, 0x51, 0x0d, 0x4a, 0xc9, 0x7b, 0x81, 0x7c, 0x20, 0xa8, 0x32, 0x85, + 0x58, 0x3f, 0x1d, 0xf7, 0x06, 0xba, 0x5a, 0x63, 0x07, 0xc9, 0x53, 0x01, 0x40, 0x35, 0x7e, 0x27, + 0x60, 0x92, 0x73, 0xdd, 0x50, 0x81, 0xdd, 0x33, 0x33, 0x9e, 0xeb, 0x58, 0x55, 0x18, 0x0d, 0xcf, + 0x5e, 0xab, 0x4d, 0x46, 0x7b, 0x36, 0xd2, 0xc7, 0x43, 0x75, 0x03, 0x6d, 0x40, 0xe3, 0xb9, 0xde, + 0xc3, 0x46, 0x5f, 0xef, 0x19, 0x6a, 0x8b, 0x9d, 0x9c, 0x71, 0x03, 0x37, 0xd9, 0x35, 0x2f, 0x66, + 0xaf, 0xf0, 0xb4, 0x37, 0x56, 0x55, 0xb6, 0x39, 0xd3, 0xf1, 0x7c, 0x34, 0x9b, 0xaa, 0x5b, 0xec, + 0x9e, 0x71, 0x6f, 0x6e, 0x9c, 0x9e, 0xa8, 0xe8, 0xf0, 0x09, 0xfb, 0xb0, 0x65, 0xc7, 0x3c, 0x80, + 0xaa, 0xd1, 0xeb, 0x8f, 0xf5, 0xb9, 0xfa, 0x88, 0xad, 0xe7, 0xcf, 0x7b, 0x78, 0x38, 0x57, 0x0b, + 0xfd, 0x4f, 0xbf, 0x79, 0x72, 0xe3, 0x50, 0x12, 0x45, 0x1d, 0xc7, 0x3f, 0x12, 0xab, 0xa3, 0x4b, + 0xff, 0xe8, 0x86, 0x1e, 0xf1, 0x37, 0xb0, 0xa3, 0x14, 0x64, 0xe7, 0x55, 0x4e, 0xf9, 0xc5, 0x7f, + 0x02, 0x00, 0x00, 0xff, 0xff, 0xb6, 0x0c, 0x0f, 0x53, 0x5f, 0x13, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/binlogservice/binlogservice.pb.go b/internal/stackql-parser-fork/go/vt/proto/binlogservice/binlogservice.pb.go new file mode 100644 index 00000000..365ad3f3 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/binlogservice/binlogservice.pb.go @@ -0,0 +1,223 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: binlogservice.proto + +package binlogservice + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + binlogdata "github.com/stackql/stackql-parser/go/vt/proto/binlogdata" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("binlogservice.proto", fileDescriptor_4ccdea02fd9c8d58) } + +var fileDescriptor_4ccdea02fd9c8d58 = []byte{ + // 177 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4e, 0xca, 0xcc, 0xcb, + 0xc9, 0x4f, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0xe2, 0x45, 0x11, 0x94, 0x12, 0x80, 0x70, 0x53, 0x12, 0x4b, 0x12, 0x21, 0x0a, 0x8c, 0x0e, 0x31, + 0x72, 0xf1, 0x84, 0x16, 0xa4, 0x24, 0x96, 0xa4, 0x06, 0x97, 0x14, 0xa5, 0x26, 0xe6, 0x0a, 0x45, + 0x73, 0xf1, 0x41, 0x58, 0xde, 0xa9, 0x95, 0x41, 0x89, 0x79, 0xe9, 0xa9, 0x42, 0x8a, 0x7a, 0x48, + 0xba, 0x50, 0xe5, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0xa4, 0x94, 0xf0, 0x29, 0x29, 0x2e, + 0xc8, 0xcf, 0x2b, 0x4e, 0x55, 0x62, 0x30, 0x60, 0x14, 0x0a, 0xe5, 0xe2, 0x81, 0xc8, 0x86, 0x24, + 0x26, 0xe5, 0xa4, 0x16, 0x0b, 0xc9, 0x63, 0xea, 0x83, 0xc8, 0xc0, 0x0c, 0x56, 0xc0, 0xad, 0x00, + 0x61, 0xac, 0x93, 0x4e, 0x94, 0x56, 0x59, 0x66, 0x49, 0x6a, 0x71, 0xb1, 0x5e, 0x66, 0xbe, 0x3e, + 0x84, 0xa5, 0x9f, 0x9e, 0xaf, 0x5f, 0x56, 0xa2, 0x0f, 0xf6, 0xa4, 0x3e, 0x4a, 0x20, 0x24, 0xb1, + 0x81, 0x05, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xf4, 0x0a, 0x9c, 0x31, 0x01, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// UpdateStreamClient is the client API for UpdateStream service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UpdateStreamClient interface { + // StreamKeyRange returns the binlog transactions related to + // the specified Keyrange. + StreamKeyRange(ctx context.Context, in *binlogdata.StreamKeyRangeRequest, opts ...grpc.CallOption) (UpdateStream_StreamKeyRangeClient, error) + // StreamTables returns the binlog transactions related to + // the specified Tables. + StreamTables(ctx context.Context, in *binlogdata.StreamTablesRequest, opts ...grpc.CallOption) (UpdateStream_StreamTablesClient, error) +} + +type updateStreamClient struct { + cc *grpc.ClientConn +} + +func NewUpdateStreamClient(cc *grpc.ClientConn) UpdateStreamClient { + return &updateStreamClient{cc} +} + +func (c *updateStreamClient) StreamKeyRange(ctx context.Context, in *binlogdata.StreamKeyRangeRequest, opts ...grpc.CallOption) (UpdateStream_StreamKeyRangeClient, error) { + stream, err := c.cc.NewStream(ctx, &_UpdateStream_serviceDesc.Streams[0], "/binlogservice.UpdateStream/StreamKeyRange", opts...) + if err != nil { + return nil, err + } + x := &updateStreamStreamKeyRangeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type UpdateStream_StreamKeyRangeClient interface { + Recv() (*binlogdata.StreamKeyRangeResponse, error) + grpc.ClientStream +} + +type updateStreamStreamKeyRangeClient struct { + grpc.ClientStream +} + +func (x *updateStreamStreamKeyRangeClient) Recv() (*binlogdata.StreamKeyRangeResponse, error) { + m := new(binlogdata.StreamKeyRangeResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *updateStreamClient) StreamTables(ctx context.Context, in *binlogdata.StreamTablesRequest, opts ...grpc.CallOption) (UpdateStream_StreamTablesClient, error) { + stream, err := c.cc.NewStream(ctx, &_UpdateStream_serviceDesc.Streams[1], "/binlogservice.UpdateStream/StreamTables", opts...) + if err != nil { + return nil, err + } + x := &updateStreamStreamTablesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type UpdateStream_StreamTablesClient interface { + Recv() (*binlogdata.StreamTablesResponse, error) + grpc.ClientStream +} + +type updateStreamStreamTablesClient struct { + grpc.ClientStream +} + +func (x *updateStreamStreamTablesClient) Recv() (*binlogdata.StreamTablesResponse, error) { + m := new(binlogdata.StreamTablesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// UpdateStreamServer is the server API for UpdateStream service. +type UpdateStreamServer interface { + // StreamKeyRange returns the binlog transactions related to + // the specified Keyrange. + StreamKeyRange(*binlogdata.StreamKeyRangeRequest, UpdateStream_StreamKeyRangeServer) error + // StreamTables returns the binlog transactions related to + // the specified Tables. + StreamTables(*binlogdata.StreamTablesRequest, UpdateStream_StreamTablesServer) error +} + +// UnimplementedUpdateStreamServer can be embedded to have forward compatible implementations. +type UnimplementedUpdateStreamServer struct { +} + +func (*UnimplementedUpdateStreamServer) StreamKeyRange(req *binlogdata.StreamKeyRangeRequest, srv UpdateStream_StreamKeyRangeServer) error { + return status.Errorf(codes.Unimplemented, "method StreamKeyRange not implemented") +} +func (*UnimplementedUpdateStreamServer) StreamTables(req *binlogdata.StreamTablesRequest, srv UpdateStream_StreamTablesServer) error { + return status.Errorf(codes.Unimplemented, "method StreamTables not implemented") +} + +func RegisterUpdateStreamServer(s *grpc.Server, srv UpdateStreamServer) { + s.RegisterService(&_UpdateStream_serviceDesc, srv) +} + +func _UpdateStream_StreamKeyRange_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(binlogdata.StreamKeyRangeRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(UpdateStreamServer).StreamKeyRange(m, &updateStreamStreamKeyRangeServer{stream}) +} + +type UpdateStream_StreamKeyRangeServer interface { + Send(*binlogdata.StreamKeyRangeResponse) error + grpc.ServerStream +} + +type updateStreamStreamKeyRangeServer struct { + grpc.ServerStream +} + +func (x *updateStreamStreamKeyRangeServer) Send(m *binlogdata.StreamKeyRangeResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _UpdateStream_StreamTables_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(binlogdata.StreamTablesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(UpdateStreamServer).StreamTables(m, &updateStreamStreamTablesServer{stream}) +} + +type UpdateStream_StreamTablesServer interface { + Send(*binlogdata.StreamTablesResponse) error + grpc.ServerStream +} + +type updateStreamStreamTablesServer struct { + grpc.ServerStream +} + +func (x *updateStreamStreamTablesServer) Send(m *binlogdata.StreamTablesResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _UpdateStream_serviceDesc = grpc.ServiceDesc{ + ServiceName: "binlogservice.UpdateStream", + HandlerType: (*UpdateStreamServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamKeyRange", + Handler: _UpdateStream_StreamKeyRange_Handler, + ServerStreams: true, + }, + { + StreamName: "StreamTables", + Handler: _UpdateStream_StreamTables_Handler, + ServerStreams: true, + }, + }, + Metadata: "binlogservice.proto", +} diff --git a/internal/stackql-parser-fork/go/vt/proto/logutil/logutil.pb.go b/internal/stackql-parser-fork/go/vt/proto/logutil/logutil.pb.go new file mode 100644 index 00000000..531367b0 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/logutil/logutil.pb.go @@ -0,0 +1,157 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: logutil.proto + +package logutil + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + vttime "github.com/stackql/stackql-parser/go/vt/proto/vttime" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Level is the level of the log messages. +type Level int32 + +const ( + // The usual logging levels. + // Should be logged using logging facility. + Level_INFO Level = 0 + Level_WARNING Level = 1 + Level_ERROR Level = 2 + // For messages that may contains non-logging events. + // Should be logged to console directly. + Level_CONSOLE Level = 3 +) + +var Level_name = map[int32]string{ + 0: "INFO", + 1: "WARNING", + 2: "ERROR", + 3: "CONSOLE", +} + +var Level_value = map[string]int32{ + "INFO": 0, + "WARNING": 1, + "ERROR": 2, + "CONSOLE": 3, +} + +func (x Level) String() string { + return proto.EnumName(Level_name, int32(x)) +} + +func (Level) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_31f5dd3702a8edf9, []int{0} +} + +// Event is a single logging event +type Event struct { + Time *vttime.Time `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + Level Level `protobuf:"varint,2,opt,name=level,proto3,enum=logutil.Level" json:"level,omitempty"` + File string `protobuf:"bytes,3,opt,name=file,proto3" json:"file,omitempty"` + Line int64 `protobuf:"varint,4,opt,name=line,proto3" json:"line,omitempty"` + Value string `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_31f5dd3702a8edf9, []int{0} +} + +func (m *Event) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Event.Unmarshal(m, b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Event.Marshal(b, m, deterministic) +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return xxx_messageInfo_Event.Size(m) +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *Event) GetTime() *vttime.Time { + if m != nil { + return m.Time + } + return nil +} + +func (m *Event) GetLevel() Level { + if m != nil { + return m.Level + } + return Level_INFO +} + +func (m *Event) GetFile() string { + if m != nil { + return m.File + } + return "" +} + +func (m *Event) GetLine() int64 { + if m != nil { + return m.Line + } + return 0 +} + +func (m *Event) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func init() { + proto.RegisterEnum("logutil.Level", Level_name, Level_value) + proto.RegisterType((*Event)(nil), "logutil.Event") +} + +func init() { proto.RegisterFile("logutil.proto", fileDescriptor_31f5dd3702a8edf9) } + +var fileDescriptor_31f5dd3702a8edf9 = []byte{ + // 236 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8f, 0x5f, 0x4b, 0xc3, 0x30, + 0x14, 0xc5, 0xcd, 0xda, 0x38, 0x77, 0x37, 0x47, 0xb9, 0xf8, 0x10, 0x7c, 0x0a, 0x32, 0xa4, 0xf8, + 0xd0, 0xc0, 0x04, 0xdf, 0x55, 0xaa, 0x0c, 0x46, 0x0b, 0x57, 0x41, 0xf0, 0x4d, 0xe1, 0x3a, 0x02, + 0xd9, 0x22, 0x2e, 0xcd, 0xb7, 0xf0, 0x3b, 0x4b, 0xd3, 0xfa, 0x76, 0xce, 0xef, 0x1c, 0xee, 0x1f, + 0x38, 0x77, 0x7e, 0xd7, 0x05, 0xeb, 0xaa, 0xef, 0x1f, 0x1f, 0x3c, 0x4e, 0x47, 0x7b, 0xb9, 0x88, + 0x21, 0xd8, 0x3d, 0x0f, 0xf8, 0xea, 0x57, 0x80, 0xac, 0x23, 0x1f, 0x02, 0x6a, 0xc8, 0x7b, 0xae, + 0x84, 0x16, 0xe5, 0x7c, 0xbd, 0xa8, 0xc6, 0xda, 0xab, 0xdd, 0x33, 0xa5, 0x04, 0x57, 0x20, 0x1d, + 0x47, 0x76, 0x6a, 0xa2, 0x45, 0xb9, 0x5c, 0x2f, 0xab, 0xff, 0x0d, 0xdb, 0x9e, 0xd2, 0x10, 0x22, + 0x42, 0xfe, 0x65, 0x1d, 0xab, 0x4c, 0x8b, 0x72, 0x46, 0x49, 0xf7, 0xcc, 0xd9, 0x03, 0xab, 0x5c, + 0x8b, 0x32, 0xa3, 0xa4, 0xf1, 0x02, 0x64, 0xfc, 0x70, 0x1d, 0x2b, 0x99, 0x8a, 0x83, 0xb9, 0xb9, + 0x03, 0x99, 0xa6, 0xe1, 0x19, 0xe4, 0x9b, 0xe6, 0xa9, 0x2d, 0x4e, 0x70, 0x0e, 0xd3, 0xb7, 0x7b, + 0x6a, 0x36, 0xcd, 0x73, 0x21, 0x70, 0x06, 0xb2, 0x26, 0x6a, 0xa9, 0x98, 0xf4, 0xfc, 0xb1, 0x6d, + 0x5e, 0xda, 0x6d, 0x5d, 0x64, 0x0f, 0xd7, 0xef, 0xab, 0x68, 0x03, 0x1f, 0x8f, 0x95, 0xf5, 0x66, + 0x50, 0x66, 0xe7, 0x4d, 0x0c, 0x26, 0xfd, 0x69, 0xc6, 0x53, 0x3f, 0x4f, 0x93, 0xbd, 0xfd, 0x0b, + 0x00, 0x00, 0xff, 0xff, 0xa4, 0x27, 0x83, 0x63, 0x1e, 0x01, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/mysqlctl/mysqlctl.pb.go b/internal/stackql-parser-fork/go/vt/proto/mysqlctl/mysqlctl.pb.go new file mode 100644 index 00000000..2b80a997 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/mysqlctl/mysqlctl.pb.go @@ -0,0 +1,617 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: mysqlctl.proto + +package mysqlctl + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type StartRequest struct { + MysqldArgs []string `protobuf:"bytes,1,rep,name=mysqld_args,json=mysqldArgs,proto3" json:"mysqld_args,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartRequest) Reset() { *m = StartRequest{} } +func (m *StartRequest) String() string { return proto.CompactTextString(m) } +func (*StartRequest) ProtoMessage() {} +func (*StartRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cd8c110e42f9cbb9, []int{0} +} + +func (m *StartRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartRequest.Unmarshal(m, b) +} +func (m *StartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic) +} +func (m *StartRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartRequest.Merge(m, src) +} +func (m *StartRequest) XXX_Size() int { + return xxx_messageInfo_StartRequest.Size(m) +} +func (m *StartRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartRequest proto.InternalMessageInfo + +func (m *StartRequest) GetMysqldArgs() []string { + if m != nil { + return m.MysqldArgs + } + return nil +} + +type StartResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartResponse) Reset() { *m = StartResponse{} } +func (m *StartResponse) String() string { return proto.CompactTextString(m) } +func (*StartResponse) ProtoMessage() {} +func (*StartResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cd8c110e42f9cbb9, []int{1} +} + +func (m *StartResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartResponse.Unmarshal(m, b) +} +func (m *StartResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic) +} +func (m *StartResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartResponse.Merge(m, src) +} +func (m *StartResponse) XXX_Size() int { + return xxx_messageInfo_StartResponse.Size(m) +} +func (m *StartResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartResponse proto.InternalMessageInfo + +type ShutdownRequest struct { + WaitForMysqld bool `protobuf:"varint,1,opt,name=wait_for_mysqld,json=waitForMysqld,proto3" json:"wait_for_mysqld,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} } +func (m *ShutdownRequest) String() string { return proto.CompactTextString(m) } +func (*ShutdownRequest) ProtoMessage() {} +func (*ShutdownRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cd8c110e42f9cbb9, []int{2} +} + +func (m *ShutdownRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShutdownRequest.Unmarshal(m, b) +} +func (m *ShutdownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShutdownRequest.Marshal(b, m, deterministic) +} +func (m *ShutdownRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutdownRequest.Merge(m, src) +} +func (m *ShutdownRequest) XXX_Size() int { + return xxx_messageInfo_ShutdownRequest.Size(m) +} +func (m *ShutdownRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ShutdownRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ShutdownRequest proto.InternalMessageInfo + +func (m *ShutdownRequest) GetWaitForMysqld() bool { + if m != nil { + return m.WaitForMysqld + } + return false +} + +type ShutdownResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutdownResponse) Reset() { *m = ShutdownResponse{} } +func (m *ShutdownResponse) String() string { return proto.CompactTextString(m) } +func (*ShutdownResponse) ProtoMessage() {} +func (*ShutdownResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cd8c110e42f9cbb9, []int{3} +} + +func (m *ShutdownResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShutdownResponse.Unmarshal(m, b) +} +func (m *ShutdownResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShutdownResponse.Marshal(b, m, deterministic) +} +func (m *ShutdownResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutdownResponse.Merge(m, src) +} +func (m *ShutdownResponse) XXX_Size() int { + return xxx_messageInfo_ShutdownResponse.Size(m) +} +func (m *ShutdownResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ShutdownResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ShutdownResponse proto.InternalMessageInfo + +type RunMysqlUpgradeRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunMysqlUpgradeRequest) Reset() { *m = RunMysqlUpgradeRequest{} } +func (m *RunMysqlUpgradeRequest) String() string { return proto.CompactTextString(m) } +func (*RunMysqlUpgradeRequest) ProtoMessage() {} +func (*RunMysqlUpgradeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cd8c110e42f9cbb9, []int{4} +} + +func (m *RunMysqlUpgradeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunMysqlUpgradeRequest.Unmarshal(m, b) +} +func (m *RunMysqlUpgradeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunMysqlUpgradeRequest.Marshal(b, m, deterministic) +} +func (m *RunMysqlUpgradeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunMysqlUpgradeRequest.Merge(m, src) +} +func (m *RunMysqlUpgradeRequest) XXX_Size() int { + return xxx_messageInfo_RunMysqlUpgradeRequest.Size(m) +} +func (m *RunMysqlUpgradeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RunMysqlUpgradeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RunMysqlUpgradeRequest proto.InternalMessageInfo + +type RunMysqlUpgradeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunMysqlUpgradeResponse) Reset() { *m = RunMysqlUpgradeResponse{} } +func (m *RunMysqlUpgradeResponse) String() string { return proto.CompactTextString(m) } +func (*RunMysqlUpgradeResponse) ProtoMessage() {} +func (*RunMysqlUpgradeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cd8c110e42f9cbb9, []int{5} +} + +func (m *RunMysqlUpgradeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunMysqlUpgradeResponse.Unmarshal(m, b) +} +func (m *RunMysqlUpgradeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunMysqlUpgradeResponse.Marshal(b, m, deterministic) +} +func (m *RunMysqlUpgradeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunMysqlUpgradeResponse.Merge(m, src) +} +func (m *RunMysqlUpgradeResponse) XXX_Size() int { + return xxx_messageInfo_RunMysqlUpgradeResponse.Size(m) +} +func (m *RunMysqlUpgradeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RunMysqlUpgradeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RunMysqlUpgradeResponse proto.InternalMessageInfo + +type ReinitConfigRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReinitConfigRequest) Reset() { *m = ReinitConfigRequest{} } +func (m *ReinitConfigRequest) String() string { return proto.CompactTextString(m) } +func (*ReinitConfigRequest) ProtoMessage() {} +func (*ReinitConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cd8c110e42f9cbb9, []int{6} +} + +func (m *ReinitConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReinitConfigRequest.Unmarshal(m, b) +} +func (m *ReinitConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReinitConfigRequest.Marshal(b, m, deterministic) +} +func (m *ReinitConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReinitConfigRequest.Merge(m, src) +} +func (m *ReinitConfigRequest) XXX_Size() int { + return xxx_messageInfo_ReinitConfigRequest.Size(m) +} +func (m *ReinitConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReinitConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReinitConfigRequest proto.InternalMessageInfo + +type ReinitConfigResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReinitConfigResponse) Reset() { *m = ReinitConfigResponse{} } +func (m *ReinitConfigResponse) String() string { return proto.CompactTextString(m) } +func (*ReinitConfigResponse) ProtoMessage() {} +func (*ReinitConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cd8c110e42f9cbb9, []int{7} +} + +func (m *ReinitConfigResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReinitConfigResponse.Unmarshal(m, b) +} +func (m *ReinitConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReinitConfigResponse.Marshal(b, m, deterministic) +} +func (m *ReinitConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReinitConfigResponse.Merge(m, src) +} +func (m *ReinitConfigResponse) XXX_Size() int { + return xxx_messageInfo_ReinitConfigResponse.Size(m) +} +func (m *ReinitConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReinitConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReinitConfigResponse proto.InternalMessageInfo + +type RefreshConfigRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RefreshConfigRequest) Reset() { *m = RefreshConfigRequest{} } +func (m *RefreshConfigRequest) String() string { return proto.CompactTextString(m) } +func (*RefreshConfigRequest) ProtoMessage() {} +func (*RefreshConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cd8c110e42f9cbb9, []int{8} +} + +func (m *RefreshConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RefreshConfigRequest.Unmarshal(m, b) +} +func (m *RefreshConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RefreshConfigRequest.Marshal(b, m, deterministic) +} +func (m *RefreshConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RefreshConfigRequest.Merge(m, src) +} +func (m *RefreshConfigRequest) XXX_Size() int { + return xxx_messageInfo_RefreshConfigRequest.Size(m) +} +func (m *RefreshConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RefreshConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RefreshConfigRequest proto.InternalMessageInfo + +type RefreshConfigResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RefreshConfigResponse) Reset() { *m = RefreshConfigResponse{} } +func (m *RefreshConfigResponse) String() string { return proto.CompactTextString(m) } +func (*RefreshConfigResponse) ProtoMessage() {} +func (*RefreshConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cd8c110e42f9cbb9, []int{9} +} + +func (m *RefreshConfigResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RefreshConfigResponse.Unmarshal(m, b) +} +func (m *RefreshConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RefreshConfigResponse.Marshal(b, m, deterministic) +} +func (m *RefreshConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RefreshConfigResponse.Merge(m, src) +} +func (m *RefreshConfigResponse) XXX_Size() int { + return xxx_messageInfo_RefreshConfigResponse.Size(m) +} +func (m *RefreshConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RefreshConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RefreshConfigResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*StartRequest)(nil), "mysqlctl.StartRequest") + proto.RegisterType((*StartResponse)(nil), "mysqlctl.StartResponse") + proto.RegisterType((*ShutdownRequest)(nil), "mysqlctl.ShutdownRequest") + proto.RegisterType((*ShutdownResponse)(nil), "mysqlctl.ShutdownResponse") + proto.RegisterType((*RunMysqlUpgradeRequest)(nil), "mysqlctl.RunMysqlUpgradeRequest") + proto.RegisterType((*RunMysqlUpgradeResponse)(nil), "mysqlctl.RunMysqlUpgradeResponse") + proto.RegisterType((*ReinitConfigRequest)(nil), "mysqlctl.ReinitConfigRequest") + proto.RegisterType((*ReinitConfigResponse)(nil), "mysqlctl.ReinitConfigResponse") + proto.RegisterType((*RefreshConfigRequest)(nil), "mysqlctl.RefreshConfigRequest") + proto.RegisterType((*RefreshConfigResponse)(nil), "mysqlctl.RefreshConfigResponse") +} + +func init() { proto.RegisterFile("mysqlctl.proto", fileDescriptor_cd8c110e42f9cbb9) } + +var fileDescriptor_cd8c110e42f9cbb9 = []byte{ + // 339 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x4d, 0x4f, 0xfa, 0x30, + 0x1c, 0xc7, 0xff, 0x84, 0xfc, 0xcd, 0xfc, 0x09, 0xce, 0x54, 0x79, 0x6a, 0xa2, 0xe0, 0x12, 0x95, + 0x13, 0x4d, 0xf4, 0xa4, 0x37, 0x25, 0xf1, 0x66, 0x4c, 0x4a, 0x4c, 0x8c, 0x17, 0x32, 0xa5, 0x8c, + 0x26, 0xb8, 0x42, 0x5b, 0x20, 0xbe, 0x05, 0x5f, 0xb5, 0xb1, 0x6b, 0xc7, 0xc6, 0xc0, 0xdb, 0xfa, + 0x7d, 0x6a, 0xf6, 0xd9, 0xe0, 0xf0, 0xf3, 0x4b, 0xcd, 0xa7, 0x1f, 0x7a, 0xda, 0x9b, 0x49, 0xa1, + 0x05, 0xf2, 0xdc, 0x39, 0x20, 0x50, 0x19, 0xe8, 0x50, 0x6a, 0xca, 0xe6, 0x0b, 0xa6, 0x34, 0x6a, + 0xc3, 0x81, 0xf1, 0x46, 0xc3, 0x50, 0x46, 0xaa, 0x59, 0xea, 0x94, 0xbb, 0xfb, 0x14, 0x12, 0xe9, + 0x5e, 0x46, 0x2a, 0xf0, 0xa1, 0x6a, 0x0b, 0x6a, 0x26, 0x62, 0xc5, 0x82, 0x5b, 0xf0, 0x07, 0x93, + 0x85, 0x1e, 0x89, 0x55, 0xec, 0x46, 0x2e, 0xc1, 0x5f, 0x85, 0x5c, 0x0f, 0xc7, 0x42, 0x0e, 0x93, + 0x6a, 0xb3, 0xd4, 0x29, 0x75, 0x3d, 0x5a, 0xfd, 0x95, 0x1f, 0x85, 0x7c, 0x32, 0x62, 0x80, 0xe0, + 0x68, 0x5d, 0xb5, 0x73, 0x4d, 0xa8, 0xd3, 0x45, 0x6c, 0x02, 0x2f, 0xb3, 0x48, 0x86, 0x23, 0x66, + 0x57, 0x83, 0x16, 0x34, 0x0a, 0x8e, 0x2d, 0xd5, 0xe0, 0x98, 0x32, 0x1e, 0x73, 0xdd, 0x17, 0xf1, + 0x98, 0x47, 0xae, 0x51, 0x87, 0x93, 0xbc, 0x6c, 0xe3, 0x46, 0x1f, 0x4b, 0xa6, 0x26, 0xf9, 0x7c, + 0x03, 0x6a, 0x1b, 0x7a, 0x52, 0xb8, 0xfe, 0x2e, 0x83, 0x67, 0x2e, 0xee, 0xeb, 0x29, 0xba, 0x83, + 0xff, 0x86, 0x00, 0xaa, 0xf7, 0x52, 0xac, 0x59, 0x86, 0xb8, 0x51, 0xd0, 0xed, 0xbd, 0xff, 0x50, + 0x1f, 0x3c, 0xf7, 0xc6, 0xa8, 0x95, 0x89, 0xe5, 0x01, 0x62, 0xbc, 0xcd, 0x4a, 0x47, 0x5e, 0xc1, + 0xdf, 0x00, 0x81, 0x3a, 0xeb, 0xc2, 0x76, 0x7a, 0xf8, 0xfc, 0x8f, 0x44, 0xba, 0xfc, 0x0c, 0x95, + 0x2c, 0x30, 0x74, 0x9a, 0x29, 0x15, 0xf9, 0xe2, 0xb3, 0x5d, 0x76, 0x3a, 0x48, 0xa1, 0x9a, 0x23, + 0x8a, 0x72, 0x95, 0xe2, 0x27, 0xc0, 0xed, 0x9d, 0xbe, 0xdb, 0x7c, 0xb8, 0x7a, 0xbb, 0x58, 0x72, + 0xcd, 0x94, 0xea, 0x71, 0x41, 0x92, 0x27, 0x12, 0x09, 0xb2, 0xd4, 0xc4, 0xfc, 0xdc, 0xc4, 0x0d, + 0xbc, 0xef, 0x99, 0xf3, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xe2, 0x15, 0x86, 0xfe, + 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MysqlCtlClient is the client API for MysqlCtl service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MysqlCtlClient interface { + Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) + Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) + RunMysqlUpgrade(ctx context.Context, in *RunMysqlUpgradeRequest, opts ...grpc.CallOption) (*RunMysqlUpgradeResponse, error) + ReinitConfig(ctx context.Context, in *ReinitConfigRequest, opts ...grpc.CallOption) (*ReinitConfigResponse, error) + RefreshConfig(ctx context.Context, in *RefreshConfigRequest, opts ...grpc.CallOption) (*RefreshConfigResponse, error) +} + +type mysqlCtlClient struct { + cc *grpc.ClientConn +} + +func NewMysqlCtlClient(cc *grpc.ClientConn) MysqlCtlClient { + return &mysqlCtlClient{cc} +} + +func (c *mysqlCtlClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { + out := new(StartResponse) + err := c.cc.Invoke(ctx, "/mysqlctl.MysqlCtl/Start", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *mysqlCtlClient) Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) { + out := new(ShutdownResponse) + err := c.cc.Invoke(ctx, "/mysqlctl.MysqlCtl/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *mysqlCtlClient) RunMysqlUpgrade(ctx context.Context, in *RunMysqlUpgradeRequest, opts ...grpc.CallOption) (*RunMysqlUpgradeResponse, error) { + out := new(RunMysqlUpgradeResponse) + err := c.cc.Invoke(ctx, "/mysqlctl.MysqlCtl/RunMysqlUpgrade", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *mysqlCtlClient) ReinitConfig(ctx context.Context, in *ReinitConfigRequest, opts ...grpc.CallOption) (*ReinitConfigResponse, error) { + out := new(ReinitConfigResponse) + err := c.cc.Invoke(ctx, "/mysqlctl.MysqlCtl/ReinitConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *mysqlCtlClient) RefreshConfig(ctx context.Context, in *RefreshConfigRequest, opts ...grpc.CallOption) (*RefreshConfigResponse, error) { + out := new(RefreshConfigResponse) + err := c.cc.Invoke(ctx, "/mysqlctl.MysqlCtl/RefreshConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MysqlCtlServer is the server API for MysqlCtl service. +type MysqlCtlServer interface { + Start(context.Context, *StartRequest) (*StartResponse, error) + Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) + RunMysqlUpgrade(context.Context, *RunMysqlUpgradeRequest) (*RunMysqlUpgradeResponse, error) + ReinitConfig(context.Context, *ReinitConfigRequest) (*ReinitConfigResponse, error) + RefreshConfig(context.Context, *RefreshConfigRequest) (*RefreshConfigResponse, error) +} + +// UnimplementedMysqlCtlServer can be embedded to have forward compatible implementations. +type UnimplementedMysqlCtlServer struct { +} + +func (*UnimplementedMysqlCtlServer) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Start not implemented") +} +func (*UnimplementedMysqlCtlServer) Shutdown(ctx context.Context, req *ShutdownRequest) (*ShutdownResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") +} +func (*UnimplementedMysqlCtlServer) RunMysqlUpgrade(ctx context.Context, req *RunMysqlUpgradeRequest) (*RunMysqlUpgradeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RunMysqlUpgrade not implemented") +} +func (*UnimplementedMysqlCtlServer) ReinitConfig(ctx context.Context, req *ReinitConfigRequest) (*ReinitConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReinitConfig not implemented") +} +func (*UnimplementedMysqlCtlServer) RefreshConfig(ctx context.Context, req *RefreshConfigRequest) (*RefreshConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RefreshConfig not implemented") +} + +func RegisterMysqlCtlServer(s *grpc.Server, srv MysqlCtlServer) { + s.RegisterService(&_MysqlCtl_serviceDesc, srv) +} + +func _MysqlCtl_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MysqlCtlServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mysqlctl.MysqlCtl/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MysqlCtlServer).Start(ctx, req.(*StartRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MysqlCtl_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ShutdownRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MysqlCtlServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mysqlctl.MysqlCtl/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MysqlCtlServer).Shutdown(ctx, req.(*ShutdownRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MysqlCtl_RunMysqlUpgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunMysqlUpgradeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MysqlCtlServer).RunMysqlUpgrade(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mysqlctl.MysqlCtl/RunMysqlUpgrade", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MysqlCtlServer).RunMysqlUpgrade(ctx, req.(*RunMysqlUpgradeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MysqlCtl_ReinitConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReinitConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MysqlCtlServer).ReinitConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mysqlctl.MysqlCtl/ReinitConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MysqlCtlServer).ReinitConfig(ctx, req.(*ReinitConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MysqlCtl_RefreshConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RefreshConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MysqlCtlServer).RefreshConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mysqlctl.MysqlCtl/RefreshConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MysqlCtlServer).RefreshConfig(ctx, req.(*RefreshConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MysqlCtl_serviceDesc = grpc.ServiceDesc{ + ServiceName: "mysqlctl.MysqlCtl", + HandlerType: (*MysqlCtlServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Start", + Handler: _MysqlCtl_Start_Handler, + }, + { + MethodName: "Shutdown", + Handler: _MysqlCtl_Shutdown_Handler, + }, + { + MethodName: "RunMysqlUpgrade", + Handler: _MysqlCtl_RunMysqlUpgrade_Handler, + }, + { + MethodName: "ReinitConfig", + Handler: _MysqlCtl_ReinitConfig_Handler, + }, + { + MethodName: "RefreshConfig", + Handler: _MysqlCtl_RefreshConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "mysqlctl.proto", +} diff --git a/internal/stackql-parser-fork/go/vt/proto/query/query.pb.go b/internal/stackql-parser-fork/go/vt/proto/query/query.pb.go new file mode 100644 index 00000000..91acf143 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/query/query.pb.go @@ -0,0 +1,4509 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: query.proto + +package query + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + topodata "github.com/stackql/stackql-parser/go/vt/proto/topodata" + vtrpc "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Flags sent from the MySQL C API +type MySqlFlag int32 + +const ( + MySqlFlag_EMPTY MySqlFlag = 0 + MySqlFlag_NOT_NULL_FLAG MySqlFlag = 1 + MySqlFlag_PRI_KEY_FLAG MySqlFlag = 2 + MySqlFlag_UNIQUE_KEY_FLAG MySqlFlag = 4 + MySqlFlag_MULTIPLE_KEY_FLAG MySqlFlag = 8 + MySqlFlag_BLOB_FLAG MySqlFlag = 16 + MySqlFlag_UNSIGNED_FLAG MySqlFlag = 32 + MySqlFlag_ZEROFILL_FLAG MySqlFlag = 64 + MySqlFlag_BINARY_FLAG MySqlFlag = 128 + MySqlFlag_ENUM_FLAG MySqlFlag = 256 + MySqlFlag_AUTO_INCREMENT_FLAG MySqlFlag = 512 + MySqlFlag_TIMESTAMP_FLAG MySqlFlag = 1024 + MySqlFlag_SET_FLAG MySqlFlag = 2048 + MySqlFlag_NO_DEFAULT_VALUE_FLAG MySqlFlag = 4096 + MySqlFlag_ON_UPDATE_NOW_FLAG MySqlFlag = 8192 + MySqlFlag_NUM_FLAG MySqlFlag = 32768 + MySqlFlag_PART_KEY_FLAG MySqlFlag = 16384 + MySqlFlag_GROUP_FLAG MySqlFlag = 32768 + MySqlFlag_UNIQUE_FLAG MySqlFlag = 65536 + MySqlFlag_BINCMP_FLAG MySqlFlag = 131072 +) + +var MySqlFlag_name = map[int32]string{ + 0: "EMPTY", + 1: "NOT_NULL_FLAG", + 2: "PRI_KEY_FLAG", + 4: "UNIQUE_KEY_FLAG", + 8: "MULTIPLE_KEY_FLAG", + 16: "BLOB_FLAG", + 32: "UNSIGNED_FLAG", + 64: "ZEROFILL_FLAG", + 128: "BINARY_FLAG", + 256: "ENUM_FLAG", + 512: "AUTO_INCREMENT_FLAG", + 1024: "TIMESTAMP_FLAG", + 2048: "SET_FLAG", + 4096: "NO_DEFAULT_VALUE_FLAG", + 8192: "ON_UPDATE_NOW_FLAG", + 32768: "NUM_FLAG", + 16384: "PART_KEY_FLAG", + // Duplicate value: 32768: "GROUP_FLAG", + 65536: "UNIQUE_FLAG", + 131072: "BINCMP_FLAG", +} + +var MySqlFlag_value = map[string]int32{ + "EMPTY": 0, + "NOT_NULL_FLAG": 1, + "PRI_KEY_FLAG": 2, + "UNIQUE_KEY_FLAG": 4, + "MULTIPLE_KEY_FLAG": 8, + "BLOB_FLAG": 16, + "UNSIGNED_FLAG": 32, + "ZEROFILL_FLAG": 64, + "BINARY_FLAG": 128, + "ENUM_FLAG": 256, + "AUTO_INCREMENT_FLAG": 512, + "TIMESTAMP_FLAG": 1024, + "SET_FLAG": 2048, + "NO_DEFAULT_VALUE_FLAG": 4096, + "ON_UPDATE_NOW_FLAG": 8192, + "NUM_FLAG": 32768, + "PART_KEY_FLAG": 16384, + "GROUP_FLAG": 32768, + "UNIQUE_FLAG": 65536, + "BINCMP_FLAG": 131072, +} + +func (x MySqlFlag) String() string { + return proto.EnumName(MySqlFlag_name, int32(x)) +} + +func (MySqlFlag) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{0} +} + +// Flag allows us to qualify types by their common properties. +type Flag int32 + +const ( + Flag_NONE Flag = 0 + Flag_ISINTEGRAL Flag = 256 + Flag_ISUNSIGNED Flag = 512 + Flag_ISFLOAT Flag = 1024 + Flag_ISQUOTED Flag = 2048 + Flag_ISTEXT Flag = 4096 + Flag_ISBINARY Flag = 8192 +) + +var Flag_name = map[int32]string{ + 0: "NONE", + 256: "ISINTEGRAL", + 512: "ISUNSIGNED", + 1024: "ISFLOAT", + 2048: "ISQUOTED", + 4096: "ISTEXT", + 8192: "ISBINARY", +} + +var Flag_value = map[string]int32{ + "NONE": 0, + "ISINTEGRAL": 256, + "ISUNSIGNED": 512, + "ISFLOAT": 1024, + "ISQUOTED": 2048, + "ISTEXT": 4096, + "ISBINARY": 8192, +} + +func (x Flag) String() string { + return proto.EnumName(Flag_name, int32(x)) +} + +func (Flag) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{1} +} + +// Type defines the various supported data types in bind vars +// and query results. +type Type int32 + +const ( + // NULL_TYPE specifies a NULL type. + Type_NULL_TYPE Type = 0 + // INT8 specifies a TINYINT type. + // Properties: 1, IsNumber. + Type_INT8 Type = 257 + // UINT8 specifies a TINYINT UNSIGNED type. + // Properties: 2, IsNumber, IsUnsigned. + Type_UINT8 Type = 770 + // INT16 specifies a SMALLINT type. + // Properties: 3, IsNumber. + Type_INT16 Type = 259 + // UINT16 specifies a SMALLINT UNSIGNED type. + // Properties: 4, IsNumber, IsUnsigned. + Type_UINT16 Type = 772 + // INT24 specifies a MEDIUMINT type. + // Properties: 5, IsNumber. + Type_INT24 Type = 261 + // UINT24 specifies a MEDIUMINT UNSIGNED type. + // Properties: 6, IsNumber, IsUnsigned. + Type_UINT24 Type = 774 + // INT32 specifies a INTEGER type. + // Properties: 7, IsNumber. + Type_INT32 Type = 263 + // UINT32 specifies a INTEGER UNSIGNED type. + // Properties: 8, IsNumber, IsUnsigned. + Type_UINT32 Type = 776 + // INT64 specifies a BIGINT type. + // Properties: 9, IsNumber. + Type_INT64 Type = 265 + // UINT64 specifies a BIGINT UNSIGNED type. + // Properties: 10, IsNumber, IsUnsigned. + Type_UINT64 Type = 778 + // FLOAT32 specifies a FLOAT type. + // Properties: 11, IsFloat. + Type_FLOAT32 Type = 1035 + // FLOAT64 specifies a DOUBLE or REAL type. + // Properties: 12, IsFloat. + Type_FLOAT64 Type = 1036 + // TIMESTAMP specifies a TIMESTAMP type. + // Properties: 13, IsQuoted. + Type_TIMESTAMP Type = 2061 + // DATE specifies a DATE type. + // Properties: 14, IsQuoted. + Type_DATE Type = 2062 + // TIME specifies a TIME type. + // Properties: 15, IsQuoted. + Type_TIME Type = 2063 + // DATETIME specifies a DATETIME type. + // Properties: 16, IsQuoted. + Type_DATETIME Type = 2064 + // YEAR specifies a YEAR type. + // Properties: 17, IsNumber, IsUnsigned. + Type_YEAR Type = 785 + // DECIMAL specifies a DECIMAL or NUMERIC type. + // Properties: 18, None. + Type_DECIMAL Type = 18 + // TEXT specifies a TEXT type. + // Properties: 19, IsQuoted, IsText. + Type_TEXT Type = 6163 + // BLOB specifies a BLOB type. + // Properties: 20, IsQuoted, IsBinary. + Type_BLOB Type = 10260 + // VARCHAR specifies a VARCHAR type. + // Properties: 21, IsQuoted, IsText. + Type_VARCHAR Type = 6165 + // VARBINARY specifies a VARBINARY type. + // Properties: 22, IsQuoted, IsBinary. + Type_VARBINARY Type = 10262 + // CHAR specifies a CHAR type. + // Properties: 23, IsQuoted, IsText. + Type_CHAR Type = 6167 + // BINARY specifies a BINARY type. + // Properties: 24, IsQuoted, IsBinary. + Type_BINARY Type = 10264 + // BIT specifies a BIT type. + // Properties: 25, IsQuoted. + Type_BIT Type = 2073 + // ENUM specifies an ENUM type. + // Properties: 26, IsQuoted. + Type_ENUM Type = 2074 + // SET specifies a SET type. + // Properties: 27, IsQuoted. + Type_SET Type = 2075 + // TUPLE specifies a tuple. This cannot + // be returned in a QueryResult, but it can + // be sent as a bind var. + // Properties: 28, None. + Type_TUPLE Type = 28 + // GEOMETRY specifies a GEOMETRY type. + // Properties: 29, IsQuoted. + Type_GEOMETRY Type = 2077 + // JSON specifies a JSON type. + // Properties: 30, IsQuoted. + Type_JSON Type = 2078 + // EXPRESSION specifies a SQL expression. + // This type is for internal use only. + // Properties: 31, None. + Type_EXPRESSION Type = 31 +) + +var Type_name = map[int32]string{ + 0: "NULL_TYPE", + 257: "INT8", + 770: "UINT8", + 259: "INT16", + 772: "UINT16", + 261: "INT24", + 774: "UINT24", + 263: "INT32", + 776: "UINT32", + 265: "INT64", + 778: "UINT64", + 1035: "FLOAT32", + 1036: "FLOAT64", + 2061: "TIMESTAMP", + 2062: "DATE", + 2063: "TIME", + 2064: "DATETIME", + 785: "YEAR", + 18: "DECIMAL", + 6163: "TEXT", + 10260: "BLOB", + 6165: "VARCHAR", + 10262: "VARBINARY", + 6167: "CHAR", + 10264: "BINARY", + 2073: "BIT", + 2074: "ENUM", + 2075: "SET", + 28: "TUPLE", + 2077: "GEOMETRY", + 2078: "JSON", + 31: "EXPRESSION", +} + +var Type_value = map[string]int32{ + "NULL_TYPE": 0, + "INT8": 257, + "UINT8": 770, + "INT16": 259, + "UINT16": 772, + "INT24": 261, + "UINT24": 774, + "INT32": 263, + "UINT32": 776, + "INT64": 265, + "UINT64": 778, + "FLOAT32": 1035, + "FLOAT64": 1036, + "TIMESTAMP": 2061, + "DATE": 2062, + "TIME": 2063, + "DATETIME": 2064, + "YEAR": 785, + "DECIMAL": 18, + "TEXT": 6163, + "BLOB": 10260, + "VARCHAR": 6165, + "VARBINARY": 10262, + "CHAR": 6167, + "BINARY": 10264, + "BIT": 2073, + "ENUM": 2074, + "SET": 2075, + "TUPLE": 28, + "GEOMETRY": 2077, + "JSON": 2078, + "EXPRESSION": 31, +} + +func (x Type) String() string { + return proto.EnumName(Type_name, int32(x)) +} + +func (Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{2} +} + +// TransactionState represents the state of a distributed transaction. +type TransactionState int32 + +const ( + TransactionState_UNKNOWN TransactionState = 0 + TransactionState_PREPARE TransactionState = 1 + TransactionState_COMMIT TransactionState = 2 + TransactionState_ROLLBACK TransactionState = 3 +) + +var TransactionState_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PREPARE", + 2: "COMMIT", + 3: "ROLLBACK", +} + +var TransactionState_value = map[string]int32{ + "UNKNOWN": 0, + "PREPARE": 1, + "COMMIT": 2, + "ROLLBACK": 3, +} + +func (x TransactionState) String() string { + return proto.EnumName(TransactionState_name, int32(x)) +} + +func (TransactionState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{3} +} + +type ExecuteOptions_IncludedFields int32 + +const ( + ExecuteOptions_TYPE_AND_NAME ExecuteOptions_IncludedFields = 0 + ExecuteOptions_TYPE_ONLY ExecuteOptions_IncludedFields = 1 + ExecuteOptions_ALL ExecuteOptions_IncludedFields = 2 +) + +var ExecuteOptions_IncludedFields_name = map[int32]string{ + 0: "TYPE_AND_NAME", + 1: "TYPE_ONLY", + 2: "ALL", +} + +var ExecuteOptions_IncludedFields_value = map[string]int32{ + "TYPE_AND_NAME": 0, + "TYPE_ONLY": 1, + "ALL": 2, +} + +func (x ExecuteOptions_IncludedFields) String() string { + return proto.EnumName(ExecuteOptions_IncludedFields_name, int32(x)) +} + +func (ExecuteOptions_IncludedFields) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{6, 0} +} + +type ExecuteOptions_Workload int32 + +const ( + ExecuteOptions_UNSPECIFIED ExecuteOptions_Workload = 0 + ExecuteOptions_OLTP ExecuteOptions_Workload = 1 + ExecuteOptions_OLAP ExecuteOptions_Workload = 2 + ExecuteOptions_DBA ExecuteOptions_Workload = 3 +) + +var ExecuteOptions_Workload_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "OLTP", + 2: "OLAP", + 3: "DBA", +} + +var ExecuteOptions_Workload_value = map[string]int32{ + "UNSPECIFIED": 0, + "OLTP": 1, + "OLAP": 2, + "DBA": 3, +} + +func (x ExecuteOptions_Workload) String() string { + return proto.EnumName(ExecuteOptions_Workload_name, int32(x)) +} + +func (ExecuteOptions_Workload) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{6, 1} +} + +type ExecuteOptions_TransactionIsolation int32 + +const ( + ExecuteOptions_DEFAULT ExecuteOptions_TransactionIsolation = 0 + ExecuteOptions_REPEATABLE_READ ExecuteOptions_TransactionIsolation = 1 + ExecuteOptions_READ_COMMITTED ExecuteOptions_TransactionIsolation = 2 + ExecuteOptions_READ_UNCOMMITTED ExecuteOptions_TransactionIsolation = 3 + ExecuteOptions_SERIALIZABLE ExecuteOptions_TransactionIsolation = 4 + // This is not an "official" transaction level but it will do a + // START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY + ExecuteOptions_CONSISTENT_SNAPSHOT_READ_ONLY ExecuteOptions_TransactionIsolation = 5 + // This not an "official" transaction level, it will send queries to mysql + // without wrapping them in a transaction + ExecuteOptions_AUTOCOMMIT ExecuteOptions_TransactionIsolation = 6 +) + +var ExecuteOptions_TransactionIsolation_name = map[int32]string{ + 0: "DEFAULT", + 1: "REPEATABLE_READ", + 2: "READ_COMMITTED", + 3: "READ_UNCOMMITTED", + 4: "SERIALIZABLE", + 5: "CONSISTENT_SNAPSHOT_READ_ONLY", + 6: "AUTOCOMMIT", +} + +var ExecuteOptions_TransactionIsolation_value = map[string]int32{ + "DEFAULT": 0, + "REPEATABLE_READ": 1, + "READ_COMMITTED": 2, + "READ_UNCOMMITTED": 3, + "SERIALIZABLE": 4, + "CONSISTENT_SNAPSHOT_READ_ONLY": 5, + "AUTOCOMMIT": 6, +} + +func (x ExecuteOptions_TransactionIsolation) String() string { + return proto.EnumName(ExecuteOptions_TransactionIsolation_name, int32(x)) +} + +func (ExecuteOptions_TransactionIsolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{6, 2} +} + +// The category of one statement. +type StreamEvent_Statement_Category int32 + +const ( + StreamEvent_Statement_Error StreamEvent_Statement_Category = 0 + StreamEvent_Statement_DML StreamEvent_Statement_Category = 1 + StreamEvent_Statement_DDL StreamEvent_Statement_Category = 2 +) + +var StreamEvent_Statement_Category_name = map[int32]string{ + 0: "Error", + 1: "DML", + 2: "DDL", +} + +var StreamEvent_Statement_Category_value = map[string]int32{ + "Error": 0, + "DML": 1, + "DDL": 2, +} + +func (x StreamEvent_Statement_Category) String() string { + return proto.EnumName(StreamEvent_Statement_Category_name, int32(x)) +} + +func (StreamEvent_Statement_Category) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{11, 0, 0} +} + +// Target describes what the client expects the tablet is. +// If the tablet does not match, an error is returned. +type Target struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + TabletType topodata.TabletType `protobuf:"varint,3,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + // cell is used for routing queries between vtgate and vttablets. It + // is not used when Target is part of the Session sent by the client. + Cell string `protobuf:"bytes,4,opt,name=cell,proto3" json:"cell,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Target) Reset() { *m = Target{} } +func (m *Target) String() string { return proto.CompactTextString(m) } +func (*Target) ProtoMessage() {} +func (*Target) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{0} +} + +func (m *Target) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Target.Unmarshal(m, b) +} +func (m *Target) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Target.Marshal(b, m, deterministic) +} +func (m *Target) XXX_Merge(src proto.Message) { + xxx_messageInfo_Target.Merge(m, src) +} +func (m *Target) XXX_Size() int { + return xxx_messageInfo_Target.Size(m) +} +func (m *Target) XXX_DiscardUnknown() { + xxx_messageInfo_Target.DiscardUnknown(m) +} + +var xxx_messageInfo_Target proto.InternalMessageInfo + +func (m *Target) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *Target) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *Target) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *Target) GetCell() string { + if m != nil { + return m.Cell + } + return "" +} + +// VTGateCallerID is sent by VTGate to VTTablet to describe the +// caller. If possible, this information is secure. For instance, +// if using unique certificates that guarantee that VTGate->VTTablet +// traffic cannot be spoofed, then VTTablet can trust this information, +// and VTTablet will use it for tablet ACLs, for instance. +// Because of this security guarantee, this is different than the CallerID +// structure, which is not secure at all, because it is provided +// by the Vitess client. +type VTGateCallerID struct { + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Groups []string `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VTGateCallerID) Reset() { *m = VTGateCallerID{} } +func (m *VTGateCallerID) String() string { return proto.CompactTextString(m) } +func (*VTGateCallerID) ProtoMessage() {} +func (*VTGateCallerID) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{1} +} + +func (m *VTGateCallerID) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VTGateCallerID.Unmarshal(m, b) +} +func (m *VTGateCallerID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VTGateCallerID.Marshal(b, m, deterministic) +} +func (m *VTGateCallerID) XXX_Merge(src proto.Message) { + xxx_messageInfo_VTGateCallerID.Merge(m, src) +} +func (m *VTGateCallerID) XXX_Size() int { + return xxx_messageInfo_VTGateCallerID.Size(m) +} +func (m *VTGateCallerID) XXX_DiscardUnknown() { + xxx_messageInfo_VTGateCallerID.DiscardUnknown(m) +} + +var xxx_messageInfo_VTGateCallerID proto.InternalMessageInfo + +func (m *VTGateCallerID) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *VTGateCallerID) GetGroups() []string { + if m != nil { + return m.Groups + } + return nil +} + +// EventToken is a structure that describes a point in time in a +// replication stream on one shard. The most recent known replication +// position can be retrieved from vttablet when executing a query. It +// is also sent with the replication streams from the binlog service. +type EventToken struct { + // timestamp is the MySQL timestamp of the statements. Seconds since Epoch. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The shard name that applied the statements. Note this is not set when + // streaming from a vttablet. It is only used on the client -> vtgate link. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // The position on the replication stream after this statement was applied. + // It is not the transaction ID / GTID, but the position / GTIDSet. + Position string `protobuf:"bytes,3,opt,name=position,proto3" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EventToken) Reset() { *m = EventToken{} } +func (m *EventToken) String() string { return proto.CompactTextString(m) } +func (*EventToken) ProtoMessage() {} +func (*EventToken) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{2} +} + +func (m *EventToken) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EventToken.Unmarshal(m, b) +} +func (m *EventToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EventToken.Marshal(b, m, deterministic) +} +func (m *EventToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventToken.Merge(m, src) +} +func (m *EventToken) XXX_Size() int { + return xxx_messageInfo_EventToken.Size(m) +} +func (m *EventToken) XXX_DiscardUnknown() { + xxx_messageInfo_EventToken.DiscardUnknown(m) +} + +var xxx_messageInfo_EventToken proto.InternalMessageInfo + +func (m *EventToken) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *EventToken) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *EventToken) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +// Value represents a typed value. +type Value struct { + Type Type `protobuf:"varint,1,opt,name=type,proto3,enum=query.Type" json:"type,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{3} +} + +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +func (m *Value) GetType() Type { + if m != nil { + return m.Type + } + return Type_NULL_TYPE +} + +func (m *Value) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// BindVariable represents a single bind variable in a Query. +type BindVariable struct { + Type Type `protobuf:"varint,1,opt,name=type,proto3,enum=query.Type" json:"type,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // values are set if type is TUPLE. + Values []*Value `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BindVariable) Reset() { *m = BindVariable{} } +func (m *BindVariable) String() string { return proto.CompactTextString(m) } +func (*BindVariable) ProtoMessage() {} +func (*BindVariable) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{4} +} + +func (m *BindVariable) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BindVariable.Unmarshal(m, b) +} +func (m *BindVariable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BindVariable.Marshal(b, m, deterministic) +} +func (m *BindVariable) XXX_Merge(src proto.Message) { + xxx_messageInfo_BindVariable.Merge(m, src) +} +func (m *BindVariable) XXX_Size() int { + return xxx_messageInfo_BindVariable.Size(m) +} +func (m *BindVariable) XXX_DiscardUnknown() { + xxx_messageInfo_BindVariable.DiscardUnknown(m) +} + +var xxx_messageInfo_BindVariable proto.InternalMessageInfo + +func (m *BindVariable) GetType() Type { + if m != nil { + return m.Type + } + return Type_NULL_TYPE +} + +func (m *BindVariable) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *BindVariable) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +// BoundQuery is a query with its bind variables +type BoundQuery struct { + // sql is the SQL query to execute + Sql string `protobuf:"bytes,1,opt,name=sql,proto3" json:"sql,omitempty"` + // bind_variables is a map of all bind variables to expand in the query. + // nil values are not allowed. Use NULL_TYPE to express a NULL value. + BindVariables map[string]*BindVariable `protobuf:"bytes,2,rep,name=bind_variables,json=bindVariables,proto3" json:"bind_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoundQuery) Reset() { *m = BoundQuery{} } +func (m *BoundQuery) String() string { return proto.CompactTextString(m) } +func (*BoundQuery) ProtoMessage() {} +func (*BoundQuery) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{5} +} + +func (m *BoundQuery) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoundQuery.Unmarshal(m, b) +} +func (m *BoundQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoundQuery.Marshal(b, m, deterministic) +} +func (m *BoundQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoundQuery.Merge(m, src) +} +func (m *BoundQuery) XXX_Size() int { + return xxx_messageInfo_BoundQuery.Size(m) +} +func (m *BoundQuery) XXX_DiscardUnknown() { + xxx_messageInfo_BoundQuery.DiscardUnknown(m) +} + +var xxx_messageInfo_BoundQuery proto.InternalMessageInfo + +func (m *BoundQuery) GetSql() string { + if m != nil { + return m.Sql + } + return "" +} + +func (m *BoundQuery) GetBindVariables() map[string]*BindVariable { + if m != nil { + return m.BindVariables + } + return nil +} + +// ExecuteOptions is passed around for all Execute calls. +type ExecuteOptions struct { + // Controls what fields are returned in Field message responses from mysql, i.e. + // field name, table name, etc. This is an optimization for high-QPS queries where + // the client knows what it's getting + IncludedFields ExecuteOptions_IncludedFields `protobuf:"varint,4,opt,name=included_fields,json=includedFields,proto3,enum=query.ExecuteOptions_IncludedFields" json:"included_fields,omitempty"` + // client_rows_found specifies if rows_affected should return + // rows found instead of rows affected. Behavior is defined + // by MySQL's CLIENT_FOUND_ROWS flag. + ClientFoundRows bool `protobuf:"varint,5,opt,name=client_found_rows,json=clientFoundRows,proto3" json:"client_found_rows,omitempty"` + // workload specifies the type of workload: + // OLTP: DMLs allowed, results have row count limit, and + // query timeouts are shorter. + // OLAP: DMLS not allowed, no limit on row count, timeouts + // can be as high as desired. + // DBA: no limit on rowcount or timeout, all queries allowed + // but intended for long DMLs and DDLs. + Workload ExecuteOptions_Workload `protobuf:"varint,6,opt,name=workload,proto3,enum=query.ExecuteOptions_Workload" json:"workload,omitempty"` + // sql_select_limit sets an implicit limit on all select statements. Since + // vitess also sets a rowcount limit on queries, the smallest value wins. + SqlSelectLimit int64 `protobuf:"varint,8,opt,name=sql_select_limit,json=sqlSelectLimit,proto3" json:"sql_select_limit,omitempty"` + TransactionIsolation ExecuteOptions_TransactionIsolation `protobuf:"varint,9,opt,name=transaction_isolation,json=transactionIsolation,proto3,enum=query.ExecuteOptions_TransactionIsolation" json:"transaction_isolation,omitempty"` + // skip_query_plan_cache specifies if the query plan should be cached by vitess. + // By default all query plans are cached. + SkipQueryPlanCache bool `protobuf:"varint,10,opt,name=skip_query_plan_cache,json=skipQueryPlanCache,proto3" json:"skip_query_plan_cache,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteOptions) Reset() { *m = ExecuteOptions{} } +func (m *ExecuteOptions) String() string { return proto.CompactTextString(m) } +func (*ExecuteOptions) ProtoMessage() {} +func (*ExecuteOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{6} +} + +func (m *ExecuteOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteOptions.Unmarshal(m, b) +} +func (m *ExecuteOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteOptions.Marshal(b, m, deterministic) +} +func (m *ExecuteOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteOptions.Merge(m, src) +} +func (m *ExecuteOptions) XXX_Size() int { + return xxx_messageInfo_ExecuteOptions.Size(m) +} +func (m *ExecuteOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteOptions proto.InternalMessageInfo + +func (m *ExecuteOptions) GetIncludedFields() ExecuteOptions_IncludedFields { + if m != nil { + return m.IncludedFields + } + return ExecuteOptions_TYPE_AND_NAME +} + +func (m *ExecuteOptions) GetClientFoundRows() bool { + if m != nil { + return m.ClientFoundRows + } + return false +} + +func (m *ExecuteOptions) GetWorkload() ExecuteOptions_Workload { + if m != nil { + return m.Workload + } + return ExecuteOptions_UNSPECIFIED +} + +func (m *ExecuteOptions) GetSqlSelectLimit() int64 { + if m != nil { + return m.SqlSelectLimit + } + return 0 +} + +func (m *ExecuteOptions) GetTransactionIsolation() ExecuteOptions_TransactionIsolation { + if m != nil { + return m.TransactionIsolation + } + return ExecuteOptions_DEFAULT +} + +func (m *ExecuteOptions) GetSkipQueryPlanCache() bool { + if m != nil { + return m.SkipQueryPlanCache + } + return false +} + +// Field describes a single column returned by a query +type Field struct { + // name of the field as returned by mysql C API + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // vitess-defined type. Conversion function is in sqltypes package. + Type Type `protobuf:"varint,2,opt,name=type,proto3,enum=query.Type" json:"type,omitempty"` + // Remaining fields from mysql C API. + // These fields are only populated when ExecuteOptions.included_fields + // is set to IncludedFields.ALL. + Table string `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"` + OrgTable string `protobuf:"bytes,4,opt,name=org_table,json=orgTable,proto3" json:"org_table,omitempty"` + Database string `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` + OrgName string `protobuf:"bytes,6,opt,name=org_name,json=orgName,proto3" json:"org_name,omitempty"` + // column_length is really a uint32. All 32 bits can be used. + ColumnLength uint32 `protobuf:"varint,7,opt,name=column_length,json=columnLength,proto3" json:"column_length,omitempty"` + // charset is actually a uint16. Only the lower 16 bits are used. + Charset uint32 `protobuf:"varint,8,opt,name=charset,proto3" json:"charset,omitempty"` + // decimals is actually a uint8. Only the lower 8 bits are used. + Decimals uint32 `protobuf:"varint,9,opt,name=decimals,proto3" json:"decimals,omitempty"` + // flags is actually a uint16. Only the lower 16 bits are used. + Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (m *Field) String() string { return proto.CompactTextString(m) } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{7} +} + +func (m *Field) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Field.Unmarshal(m, b) +} +func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Field.Marshal(b, m, deterministic) +} +func (m *Field) XXX_Merge(src proto.Message) { + xxx_messageInfo_Field.Merge(m, src) +} +func (m *Field) XXX_Size() int { + return xxx_messageInfo_Field.Size(m) +} +func (m *Field) XXX_DiscardUnknown() { + xxx_messageInfo_Field.DiscardUnknown(m) +} + +var xxx_messageInfo_Field proto.InternalMessageInfo + +func (m *Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Field) GetType() Type { + if m != nil { + return m.Type + } + return Type_NULL_TYPE +} + +func (m *Field) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *Field) GetOrgTable() string { + if m != nil { + return m.OrgTable + } + return "" +} + +func (m *Field) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *Field) GetOrgName() string { + if m != nil { + return m.OrgName + } + return "" +} + +func (m *Field) GetColumnLength() uint32 { + if m != nil { + return m.ColumnLength + } + return 0 +} + +func (m *Field) GetCharset() uint32 { + if m != nil { + return m.Charset + } + return 0 +} + +func (m *Field) GetDecimals() uint32 { + if m != nil { + return m.Decimals + } + return 0 +} + +func (m *Field) GetFlags() uint32 { + if m != nil { + return m.Flags + } + return 0 +} + +// Row is a database row. +type Row struct { + // lengths contains the length of each value in values. + // A length of -1 means that the field is NULL. While + // reading values, you have to accummulate the length + // to know the offset where the next value begins in values. + Lengths []int64 `protobuf:"zigzag64,1,rep,packed,name=lengths,proto3" json:"lengths,omitempty"` + // values contains a concatenation of all values in the row. + Values []byte `protobuf:"bytes,2,opt,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Row) Reset() { *m = Row{} } +func (m *Row) String() string { return proto.CompactTextString(m) } +func (*Row) ProtoMessage() {} +func (*Row) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{8} +} + +func (m *Row) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Row.Unmarshal(m, b) +} +func (m *Row) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Row.Marshal(b, m, deterministic) +} +func (m *Row) XXX_Merge(src proto.Message) { + xxx_messageInfo_Row.Merge(m, src) +} +func (m *Row) XXX_Size() int { + return xxx_messageInfo_Row.Size(m) +} +func (m *Row) XXX_DiscardUnknown() { + xxx_messageInfo_Row.DiscardUnknown(m) +} + +var xxx_messageInfo_Row proto.InternalMessageInfo + +func (m *Row) GetLengths() []int64 { + if m != nil { + return m.Lengths + } + return nil +} + +func (m *Row) GetValues() []byte { + if m != nil { + return m.Values + } + return nil +} + +// QueryResult is returned by Execute and ExecuteStream. +// +// As returned by Execute, len(fields) is always equal to len(row) +// (for each row in rows). +// +// As returned by StreamExecute, the first QueryResult has the fields +// set, and subsequent QueryResult have rows set. And as Execute, +// len(QueryResult[0].fields) is always equal to len(row) (for each +// row in rows for each QueryResult in QueryResult[1:]). +type QueryResult struct { + Fields []*Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` + RowsAffected uint64 `protobuf:"varint,2,opt,name=rows_affected,json=rowsAffected,proto3" json:"rows_affected,omitempty"` + InsertId uint64 `protobuf:"varint,3,opt,name=insert_id,json=insertId,proto3" json:"insert_id,omitempty"` + Rows []*Row `protobuf:"bytes,4,rep,name=rows,proto3" json:"rows,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} +func (*QueryResult) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{9} +} + +func (m *QueryResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryResult.Unmarshal(m, b) +} +func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) +} +func (m *QueryResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResult.Merge(m, src) +} +func (m *QueryResult) XXX_Size() int { + return xxx_messageInfo_QueryResult.Size(m) +} +func (m *QueryResult) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResult.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryResult proto.InternalMessageInfo + +func (m *QueryResult) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *QueryResult) GetRowsAffected() uint64 { + if m != nil { + return m.RowsAffected + } + return 0 +} + +func (m *QueryResult) GetInsertId() uint64 { + if m != nil { + return m.InsertId + } + return 0 +} + +func (m *QueryResult) GetRows() []*Row { + if m != nil { + return m.Rows + } + return nil +} + +// QueryWarning is used to convey out of band query execution warnings +// by storing in the vtgate.Session +type QueryWarning struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryWarning) Reset() { *m = QueryWarning{} } +func (m *QueryWarning) String() string { return proto.CompactTextString(m) } +func (*QueryWarning) ProtoMessage() {} +func (*QueryWarning) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{10} +} + +func (m *QueryWarning) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryWarning.Unmarshal(m, b) +} +func (m *QueryWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryWarning.Marshal(b, m, deterministic) +} +func (m *QueryWarning) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryWarning.Merge(m, src) +} +func (m *QueryWarning) XXX_Size() int { + return xxx_messageInfo_QueryWarning.Size(m) +} +func (m *QueryWarning) XXX_DiscardUnknown() { + xxx_messageInfo_QueryWarning.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryWarning proto.InternalMessageInfo + +func (m *QueryWarning) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *QueryWarning) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// StreamEvent describes a set of transformations that happened as a +// single transactional unit on a server. It is streamed back by the +// Update Stream calls. +type StreamEvent struct { + // The statements in this transaction. + Statements []*StreamEvent_Statement `protobuf:"bytes,1,rep,name=statements,proto3" json:"statements,omitempty"` + // The Event Token for this event. + EventToken *EventToken `protobuf:"bytes,2,opt,name=event_token,json=eventToken,proto3" json:"event_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamEvent) Reset() { *m = StreamEvent{} } +func (m *StreamEvent) String() string { return proto.CompactTextString(m) } +func (*StreamEvent) ProtoMessage() {} +func (*StreamEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{11} +} + +func (m *StreamEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamEvent.Unmarshal(m, b) +} +func (m *StreamEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamEvent.Marshal(b, m, deterministic) +} +func (m *StreamEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamEvent.Merge(m, src) +} +func (m *StreamEvent) XXX_Size() int { + return xxx_messageInfo_StreamEvent.Size(m) +} +func (m *StreamEvent) XXX_DiscardUnknown() { + xxx_messageInfo_StreamEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamEvent proto.InternalMessageInfo + +func (m *StreamEvent) GetStatements() []*StreamEvent_Statement { + if m != nil { + return m.Statements + } + return nil +} + +func (m *StreamEvent) GetEventToken() *EventToken { + if m != nil { + return m.EventToken + } + return nil +} + +// One individual Statement in a transaction. +type StreamEvent_Statement struct { + Category StreamEvent_Statement_Category `protobuf:"varint,1,opt,name=category,proto3,enum=query.StreamEvent_Statement_Category" json:"category,omitempty"` + // table_name, primary_key_fields and primary_key_values are set for DML. + TableName string `protobuf:"bytes,2,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + PrimaryKeyFields []*Field `protobuf:"bytes,3,rep,name=primary_key_fields,json=primaryKeyFields,proto3" json:"primary_key_fields,omitempty"` + PrimaryKeyValues []*Row `protobuf:"bytes,4,rep,name=primary_key_values,json=primaryKeyValues,proto3" json:"primary_key_values,omitempty"` + // sql is set for all queries. + // FIXME(alainjobart) we may not need it for DMLs. + Sql []byte `protobuf:"bytes,5,opt,name=sql,proto3" json:"sql,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamEvent_Statement) Reset() { *m = StreamEvent_Statement{} } +func (m *StreamEvent_Statement) String() string { return proto.CompactTextString(m) } +func (*StreamEvent_Statement) ProtoMessage() {} +func (*StreamEvent_Statement) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{11, 0} +} + +func (m *StreamEvent_Statement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamEvent_Statement.Unmarshal(m, b) +} +func (m *StreamEvent_Statement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamEvent_Statement.Marshal(b, m, deterministic) +} +func (m *StreamEvent_Statement) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamEvent_Statement.Merge(m, src) +} +func (m *StreamEvent_Statement) XXX_Size() int { + return xxx_messageInfo_StreamEvent_Statement.Size(m) +} +func (m *StreamEvent_Statement) XXX_DiscardUnknown() { + xxx_messageInfo_StreamEvent_Statement.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamEvent_Statement proto.InternalMessageInfo + +func (m *StreamEvent_Statement) GetCategory() StreamEvent_Statement_Category { + if m != nil { + return m.Category + } + return StreamEvent_Statement_Error +} + +func (m *StreamEvent_Statement) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *StreamEvent_Statement) GetPrimaryKeyFields() []*Field { + if m != nil { + return m.PrimaryKeyFields + } + return nil +} + +func (m *StreamEvent_Statement) GetPrimaryKeyValues() []*Row { + if m != nil { + return m.PrimaryKeyValues + } + return nil +} + +func (m *StreamEvent_Statement) GetSql() []byte { + if m != nil { + return m.Sql + } + return nil +} + +// ExecuteRequest is the payload to Execute +type ExecuteRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Query *BoundQuery `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` + TransactionId int64 `protobuf:"varint,5,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` + ReservedId int64 `protobuf:"varint,7,opt,name=reserved_id,json=reservedId,proto3" json:"reserved_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteRequest) Reset() { *m = ExecuteRequest{} } +func (m *ExecuteRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteRequest) ProtoMessage() {} +func (*ExecuteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{12} +} + +func (m *ExecuteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteRequest.Unmarshal(m, b) +} +func (m *ExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteRequest.Marshal(b, m, deterministic) +} +func (m *ExecuteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteRequest.Merge(m, src) +} +func (m *ExecuteRequest) XXX_Size() int { + return xxx_messageInfo_ExecuteRequest.Size(m) +} +func (m *ExecuteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteRequest proto.InternalMessageInfo + +func (m *ExecuteRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *ExecuteRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *ExecuteRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ExecuteRequest) GetQuery() *BoundQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *ExecuteRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *ExecuteRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *ExecuteRequest) GetReservedId() int64 { + if m != nil { + return m.ReservedId + } + return 0 +} + +// ExecuteResponse is the returned value from Execute +type ExecuteResponse struct { + Result *QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteResponse) Reset() { *m = ExecuteResponse{} } +func (m *ExecuteResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteResponse) ProtoMessage() {} +func (*ExecuteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{13} +} + +func (m *ExecuteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteResponse.Unmarshal(m, b) +} +func (m *ExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteResponse.Marshal(b, m, deterministic) +} +func (m *ExecuteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteResponse.Merge(m, src) +} +func (m *ExecuteResponse) XXX_Size() int { + return xxx_messageInfo_ExecuteResponse.Size(m) +} +func (m *ExecuteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteResponse proto.InternalMessageInfo + +func (m *ExecuteResponse) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +// ResultWithError represents a query response +// in the form of result or error but not both. +// TODO: To be used in ExecuteBatchResponse and BeginExecuteBatchResponse. +type ResultWithError struct { + // error contains an query level error, only set if result is unset. + Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + // result contains the query result, only set if error is unset. + Result *QueryResult `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResultWithError) Reset() { *m = ResultWithError{} } +func (m *ResultWithError) String() string { return proto.CompactTextString(m) } +func (*ResultWithError) ProtoMessage() {} +func (*ResultWithError) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{14} +} + +func (m *ResultWithError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResultWithError.Unmarshal(m, b) +} +func (m *ResultWithError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResultWithError.Marshal(b, m, deterministic) +} +func (m *ResultWithError) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResultWithError.Merge(m, src) +} +func (m *ResultWithError) XXX_Size() int { + return xxx_messageInfo_ResultWithError.Size(m) +} +func (m *ResultWithError) XXX_DiscardUnknown() { + xxx_messageInfo_ResultWithError.DiscardUnknown(m) +} + +var xxx_messageInfo_ResultWithError proto.InternalMessageInfo + +func (m *ResultWithError) GetError() *vtrpc.RPCError { + if m != nil { + return m.Error + } + return nil +} + +func (m *ResultWithError) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +// ExecuteBatchRequest is the payload to ExecuteBatch +type ExecuteBatchRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Queries []*BoundQuery `protobuf:"bytes,4,rep,name=queries,proto3" json:"queries,omitempty"` + AsTransaction bool `protobuf:"varint,5,opt,name=as_transaction,json=asTransaction,proto3" json:"as_transaction,omitempty"` + TransactionId int64 `protobuf:"varint,6,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,7,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteBatchRequest) Reset() { *m = ExecuteBatchRequest{} } +func (m *ExecuteBatchRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteBatchRequest) ProtoMessage() {} +func (*ExecuteBatchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{15} +} + +func (m *ExecuteBatchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteBatchRequest.Unmarshal(m, b) +} +func (m *ExecuteBatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteBatchRequest.Marshal(b, m, deterministic) +} +func (m *ExecuteBatchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteBatchRequest.Merge(m, src) +} +func (m *ExecuteBatchRequest) XXX_Size() int { + return xxx_messageInfo_ExecuteBatchRequest.Size(m) +} +func (m *ExecuteBatchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteBatchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteBatchRequest proto.InternalMessageInfo + +func (m *ExecuteBatchRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *ExecuteBatchRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *ExecuteBatchRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ExecuteBatchRequest) GetQueries() []*BoundQuery { + if m != nil { + return m.Queries + } + return nil +} + +func (m *ExecuteBatchRequest) GetAsTransaction() bool { + if m != nil { + return m.AsTransaction + } + return false +} + +func (m *ExecuteBatchRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *ExecuteBatchRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +// ExecuteBatchResponse is the returned value from ExecuteBatch +type ExecuteBatchResponse struct { + Results []*QueryResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteBatchResponse) Reset() { *m = ExecuteBatchResponse{} } +func (m *ExecuteBatchResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteBatchResponse) ProtoMessage() {} +func (*ExecuteBatchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{16} +} + +func (m *ExecuteBatchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteBatchResponse.Unmarshal(m, b) +} +func (m *ExecuteBatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteBatchResponse.Marshal(b, m, deterministic) +} +func (m *ExecuteBatchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteBatchResponse.Merge(m, src) +} +func (m *ExecuteBatchResponse) XXX_Size() int { + return xxx_messageInfo_ExecuteBatchResponse.Size(m) +} +func (m *ExecuteBatchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteBatchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteBatchResponse proto.InternalMessageInfo + +func (m *ExecuteBatchResponse) GetResults() []*QueryResult { + if m != nil { + return m.Results + } + return nil +} + +// StreamExecuteRequest is the payload to StreamExecute +type StreamExecuteRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Query *BoundQuery `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,5,opt,name=options,proto3" json:"options,omitempty"` + TransactionId int64 `protobuf:"varint,6,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamExecuteRequest) Reset() { *m = StreamExecuteRequest{} } +func (m *StreamExecuteRequest) String() string { return proto.CompactTextString(m) } +func (*StreamExecuteRequest) ProtoMessage() {} +func (*StreamExecuteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{17} +} + +func (m *StreamExecuteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamExecuteRequest.Unmarshal(m, b) +} +func (m *StreamExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamExecuteRequest.Marshal(b, m, deterministic) +} +func (m *StreamExecuteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamExecuteRequest.Merge(m, src) +} +func (m *StreamExecuteRequest) XXX_Size() int { + return xxx_messageInfo_StreamExecuteRequest.Size(m) +} +func (m *StreamExecuteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StreamExecuteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamExecuteRequest proto.InternalMessageInfo + +func (m *StreamExecuteRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *StreamExecuteRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *StreamExecuteRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *StreamExecuteRequest) GetQuery() *BoundQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *StreamExecuteRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *StreamExecuteRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +// StreamExecuteResponse is the returned value from StreamExecute +type StreamExecuteResponse struct { + Result *QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamExecuteResponse) Reset() { *m = StreamExecuteResponse{} } +func (m *StreamExecuteResponse) String() string { return proto.CompactTextString(m) } +func (*StreamExecuteResponse) ProtoMessage() {} +func (*StreamExecuteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{18} +} + +func (m *StreamExecuteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamExecuteResponse.Unmarshal(m, b) +} +func (m *StreamExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamExecuteResponse.Marshal(b, m, deterministic) +} +func (m *StreamExecuteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamExecuteResponse.Merge(m, src) +} +func (m *StreamExecuteResponse) XXX_Size() int { + return xxx_messageInfo_StreamExecuteResponse.Size(m) +} +func (m *StreamExecuteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StreamExecuteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamExecuteResponse proto.InternalMessageInfo + +func (m *StreamExecuteResponse) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +// BeginRequest is the payload to Begin +type BeginRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BeginRequest) Reset() { *m = BeginRequest{} } +func (m *BeginRequest) String() string { return proto.CompactTextString(m) } +func (*BeginRequest) ProtoMessage() {} +func (*BeginRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{19} +} + +func (m *BeginRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BeginRequest.Unmarshal(m, b) +} +func (m *BeginRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BeginRequest.Marshal(b, m, deterministic) +} +func (m *BeginRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BeginRequest.Merge(m, src) +} +func (m *BeginRequest) XXX_Size() int { + return xxx_messageInfo_BeginRequest.Size(m) +} +func (m *BeginRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BeginRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BeginRequest proto.InternalMessageInfo + +func (m *BeginRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *BeginRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *BeginRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *BeginRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +// BeginResponse is the returned value from Begin +type BeginResponse struct { + TransactionId int64 `protobuf:"varint,1,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,2,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BeginResponse) Reset() { *m = BeginResponse{} } +func (m *BeginResponse) String() string { return proto.CompactTextString(m) } +func (*BeginResponse) ProtoMessage() {} +func (*BeginResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{20} +} + +func (m *BeginResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BeginResponse.Unmarshal(m, b) +} +func (m *BeginResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BeginResponse.Marshal(b, m, deterministic) +} +func (m *BeginResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BeginResponse.Merge(m, src) +} +func (m *BeginResponse) XXX_Size() int { + return xxx_messageInfo_BeginResponse.Size(m) +} +func (m *BeginResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BeginResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BeginResponse proto.InternalMessageInfo + +func (m *BeginResponse) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *BeginResponse) GetTabletAlias() *topodata.TabletAlias { + if m != nil { + return m.TabletAlias + } + return nil +} + +// CommitRequest is the payload to Commit +type CommitRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} +func (*CommitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{21} +} + +func (m *CommitRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CommitRequest.Unmarshal(m, b) +} +func (m *CommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CommitRequest.Marshal(b, m, deterministic) +} +func (m *CommitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitRequest.Merge(m, src) +} +func (m *CommitRequest) XXX_Size() int { + return xxx_messageInfo_CommitRequest.Size(m) +} +func (m *CommitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CommitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitRequest proto.InternalMessageInfo + +func (m *CommitRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *CommitRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *CommitRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *CommitRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +// CommitResponse is the returned value from Commit +type CommitResponse struct { + ReservedId int64 `protobuf:"varint,1,opt,name=reserved_id,json=reservedId,proto3" json:"reserved_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} +func (*CommitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{22} +} + +func (m *CommitResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CommitResponse.Unmarshal(m, b) +} +func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) +} +func (m *CommitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitResponse.Merge(m, src) +} +func (m *CommitResponse) XXX_Size() int { + return xxx_messageInfo_CommitResponse.Size(m) +} +func (m *CommitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CommitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitResponse proto.InternalMessageInfo + +func (m *CommitResponse) GetReservedId() int64 { + if m != nil { + return m.ReservedId + } + return 0 +} + +// RollbackRequest is the payload to Rollback +type RollbackRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } +func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackRequest) ProtoMessage() {} +func (*RollbackRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{23} +} + +func (m *RollbackRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RollbackRequest.Unmarshal(m, b) +} +func (m *RollbackRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RollbackRequest.Marshal(b, m, deterministic) +} +func (m *RollbackRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackRequest.Merge(m, src) +} +func (m *RollbackRequest) XXX_Size() int { + return xxx_messageInfo_RollbackRequest.Size(m) +} +func (m *RollbackRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackRequest proto.InternalMessageInfo + +func (m *RollbackRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *RollbackRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *RollbackRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *RollbackRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +// RollbackResponse is the returned value from Rollback +type RollbackResponse struct { + ReservedId int64 `protobuf:"varint,1,opt,name=reserved_id,json=reservedId,proto3" json:"reserved_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } +func (m *RollbackResponse) String() string { return proto.CompactTextString(m) } +func (*RollbackResponse) ProtoMessage() {} +func (*RollbackResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{24} +} + +func (m *RollbackResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RollbackResponse.Unmarshal(m, b) +} +func (m *RollbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RollbackResponse.Marshal(b, m, deterministic) +} +func (m *RollbackResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackResponse.Merge(m, src) +} +func (m *RollbackResponse) XXX_Size() int { + return xxx_messageInfo_RollbackResponse.Size(m) +} +func (m *RollbackResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackResponse proto.InternalMessageInfo + +func (m *RollbackResponse) GetReservedId() int64 { + if m != nil { + return m.ReservedId + } + return 0 +} + +// PrepareRequest is the payload to Prepare +type PrepareRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + Dtid string `protobuf:"bytes,5,opt,name=dtid,proto3" json:"dtid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareRequest) Reset() { *m = PrepareRequest{} } +func (m *PrepareRequest) String() string { return proto.CompactTextString(m) } +func (*PrepareRequest) ProtoMessage() {} +func (*PrepareRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{25} +} + +func (m *PrepareRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareRequest.Unmarshal(m, b) +} +func (m *PrepareRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareRequest.Marshal(b, m, deterministic) +} +func (m *PrepareRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareRequest.Merge(m, src) +} +func (m *PrepareRequest) XXX_Size() int { + return xxx_messageInfo_PrepareRequest.Size(m) +} +func (m *PrepareRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareRequest proto.InternalMessageInfo + +func (m *PrepareRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *PrepareRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *PrepareRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *PrepareRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *PrepareRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// PrepareResponse is the returned value from Prepare +type PrepareResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareResponse) Reset() { *m = PrepareResponse{} } +func (m *PrepareResponse) String() string { return proto.CompactTextString(m) } +func (*PrepareResponse) ProtoMessage() {} +func (*PrepareResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{26} +} + +func (m *PrepareResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareResponse.Unmarshal(m, b) +} +func (m *PrepareResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareResponse.Marshal(b, m, deterministic) +} +func (m *PrepareResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareResponse.Merge(m, src) +} +func (m *PrepareResponse) XXX_Size() int { + return xxx_messageInfo_PrepareResponse.Size(m) +} +func (m *PrepareResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareResponse proto.InternalMessageInfo + +// CommitPreparedRequest is the payload to CommitPrepared +type CommitPreparedRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Dtid string `protobuf:"bytes,4,opt,name=dtid,proto3" json:"dtid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CommitPreparedRequest) Reset() { *m = CommitPreparedRequest{} } +func (m *CommitPreparedRequest) String() string { return proto.CompactTextString(m) } +func (*CommitPreparedRequest) ProtoMessage() {} +func (*CommitPreparedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{27} +} + +func (m *CommitPreparedRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CommitPreparedRequest.Unmarshal(m, b) +} +func (m *CommitPreparedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CommitPreparedRequest.Marshal(b, m, deterministic) +} +func (m *CommitPreparedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitPreparedRequest.Merge(m, src) +} +func (m *CommitPreparedRequest) XXX_Size() int { + return xxx_messageInfo_CommitPreparedRequest.Size(m) +} +func (m *CommitPreparedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CommitPreparedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitPreparedRequest proto.InternalMessageInfo + +func (m *CommitPreparedRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *CommitPreparedRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *CommitPreparedRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *CommitPreparedRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// CommitPreparedResponse is the returned value from CommitPrepared +type CommitPreparedResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CommitPreparedResponse) Reset() { *m = CommitPreparedResponse{} } +func (m *CommitPreparedResponse) String() string { return proto.CompactTextString(m) } +func (*CommitPreparedResponse) ProtoMessage() {} +func (*CommitPreparedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{28} +} + +func (m *CommitPreparedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CommitPreparedResponse.Unmarshal(m, b) +} +func (m *CommitPreparedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CommitPreparedResponse.Marshal(b, m, deterministic) +} +func (m *CommitPreparedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitPreparedResponse.Merge(m, src) +} +func (m *CommitPreparedResponse) XXX_Size() int { + return xxx_messageInfo_CommitPreparedResponse.Size(m) +} +func (m *CommitPreparedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CommitPreparedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitPreparedResponse proto.InternalMessageInfo + +// RollbackPreparedRequest is the payload to RollbackPrepared +type RollbackPreparedRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + Dtid string `protobuf:"bytes,5,opt,name=dtid,proto3" json:"dtid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RollbackPreparedRequest) Reset() { *m = RollbackPreparedRequest{} } +func (m *RollbackPreparedRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackPreparedRequest) ProtoMessage() {} +func (*RollbackPreparedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{29} +} + +func (m *RollbackPreparedRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RollbackPreparedRequest.Unmarshal(m, b) +} +func (m *RollbackPreparedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RollbackPreparedRequest.Marshal(b, m, deterministic) +} +func (m *RollbackPreparedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackPreparedRequest.Merge(m, src) +} +func (m *RollbackPreparedRequest) XXX_Size() int { + return xxx_messageInfo_RollbackPreparedRequest.Size(m) +} +func (m *RollbackPreparedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackPreparedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackPreparedRequest proto.InternalMessageInfo + +func (m *RollbackPreparedRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *RollbackPreparedRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *RollbackPreparedRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *RollbackPreparedRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *RollbackPreparedRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// RollbackPreparedResponse is the returned value from RollbackPrepared +type RollbackPreparedResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RollbackPreparedResponse) Reset() { *m = RollbackPreparedResponse{} } +func (m *RollbackPreparedResponse) String() string { return proto.CompactTextString(m) } +func (*RollbackPreparedResponse) ProtoMessage() {} +func (*RollbackPreparedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{30} +} + +func (m *RollbackPreparedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RollbackPreparedResponse.Unmarshal(m, b) +} +func (m *RollbackPreparedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RollbackPreparedResponse.Marshal(b, m, deterministic) +} +func (m *RollbackPreparedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackPreparedResponse.Merge(m, src) +} +func (m *RollbackPreparedResponse) XXX_Size() int { + return xxx_messageInfo_RollbackPreparedResponse.Size(m) +} +func (m *RollbackPreparedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackPreparedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackPreparedResponse proto.InternalMessageInfo + +// CreateTransactionRequest is the payload to CreateTransaction +type CreateTransactionRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Dtid string `protobuf:"bytes,4,opt,name=dtid,proto3" json:"dtid,omitempty"` + Participants []*Target `protobuf:"bytes,5,rep,name=participants,proto3" json:"participants,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTransactionRequest) Reset() { *m = CreateTransactionRequest{} } +func (m *CreateTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTransactionRequest) ProtoMessage() {} +func (*CreateTransactionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{31} +} + +func (m *CreateTransactionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateTransactionRequest.Unmarshal(m, b) +} +func (m *CreateTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateTransactionRequest.Marshal(b, m, deterministic) +} +func (m *CreateTransactionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTransactionRequest.Merge(m, src) +} +func (m *CreateTransactionRequest) XXX_Size() int { + return xxx_messageInfo_CreateTransactionRequest.Size(m) +} +func (m *CreateTransactionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTransactionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTransactionRequest proto.InternalMessageInfo + +func (m *CreateTransactionRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *CreateTransactionRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *CreateTransactionRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *CreateTransactionRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +func (m *CreateTransactionRequest) GetParticipants() []*Target { + if m != nil { + return m.Participants + } + return nil +} + +// CreateTransactionResponse is the returned value from CreateTransaction +type CreateTransactionResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTransactionResponse) Reset() { *m = CreateTransactionResponse{} } +func (m *CreateTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*CreateTransactionResponse) ProtoMessage() {} +func (*CreateTransactionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{32} +} + +func (m *CreateTransactionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateTransactionResponse.Unmarshal(m, b) +} +func (m *CreateTransactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateTransactionResponse.Marshal(b, m, deterministic) +} +func (m *CreateTransactionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTransactionResponse.Merge(m, src) +} +func (m *CreateTransactionResponse) XXX_Size() int { + return xxx_messageInfo_CreateTransactionResponse.Size(m) +} +func (m *CreateTransactionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTransactionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTransactionResponse proto.InternalMessageInfo + +// StartCommitRequest is the payload to StartCommit +type StartCommitRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + Dtid string `protobuf:"bytes,5,opt,name=dtid,proto3" json:"dtid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartCommitRequest) Reset() { *m = StartCommitRequest{} } +func (m *StartCommitRequest) String() string { return proto.CompactTextString(m) } +func (*StartCommitRequest) ProtoMessage() {} +func (*StartCommitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{33} +} + +func (m *StartCommitRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartCommitRequest.Unmarshal(m, b) +} +func (m *StartCommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartCommitRequest.Marshal(b, m, deterministic) +} +func (m *StartCommitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartCommitRequest.Merge(m, src) +} +func (m *StartCommitRequest) XXX_Size() int { + return xxx_messageInfo_StartCommitRequest.Size(m) +} +func (m *StartCommitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartCommitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartCommitRequest proto.InternalMessageInfo + +func (m *StartCommitRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *StartCommitRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *StartCommitRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *StartCommitRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *StartCommitRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// StartCommitResponse is the returned value from StartCommit +type StartCommitResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartCommitResponse) Reset() { *m = StartCommitResponse{} } +func (m *StartCommitResponse) String() string { return proto.CompactTextString(m) } +func (*StartCommitResponse) ProtoMessage() {} +func (*StartCommitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{34} +} + +func (m *StartCommitResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartCommitResponse.Unmarshal(m, b) +} +func (m *StartCommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartCommitResponse.Marshal(b, m, deterministic) +} +func (m *StartCommitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartCommitResponse.Merge(m, src) +} +func (m *StartCommitResponse) XXX_Size() int { + return xxx_messageInfo_StartCommitResponse.Size(m) +} +func (m *StartCommitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartCommitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartCommitResponse proto.InternalMessageInfo + +// SetRollbackRequest is the payload to SetRollback +type SetRollbackRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + Dtid string `protobuf:"bytes,5,opt,name=dtid,proto3" json:"dtid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetRollbackRequest) Reset() { *m = SetRollbackRequest{} } +func (m *SetRollbackRequest) String() string { return proto.CompactTextString(m) } +func (*SetRollbackRequest) ProtoMessage() {} +func (*SetRollbackRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{35} +} + +func (m *SetRollbackRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetRollbackRequest.Unmarshal(m, b) +} +func (m *SetRollbackRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetRollbackRequest.Marshal(b, m, deterministic) +} +func (m *SetRollbackRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetRollbackRequest.Merge(m, src) +} +func (m *SetRollbackRequest) XXX_Size() int { + return xxx_messageInfo_SetRollbackRequest.Size(m) +} +func (m *SetRollbackRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetRollbackRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetRollbackRequest proto.InternalMessageInfo + +func (m *SetRollbackRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *SetRollbackRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *SetRollbackRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *SetRollbackRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *SetRollbackRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// SetRollbackResponse is the returned value from SetRollback +type SetRollbackResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetRollbackResponse) Reset() { *m = SetRollbackResponse{} } +func (m *SetRollbackResponse) String() string { return proto.CompactTextString(m) } +func (*SetRollbackResponse) ProtoMessage() {} +func (*SetRollbackResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{36} +} + +func (m *SetRollbackResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetRollbackResponse.Unmarshal(m, b) +} +func (m *SetRollbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetRollbackResponse.Marshal(b, m, deterministic) +} +func (m *SetRollbackResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetRollbackResponse.Merge(m, src) +} +func (m *SetRollbackResponse) XXX_Size() int { + return xxx_messageInfo_SetRollbackResponse.Size(m) +} +func (m *SetRollbackResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetRollbackResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetRollbackResponse proto.InternalMessageInfo + +// ConcludeTransactionRequest is the payload to ConcludeTransaction +type ConcludeTransactionRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Dtid string `protobuf:"bytes,4,opt,name=dtid,proto3" json:"dtid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConcludeTransactionRequest) Reset() { *m = ConcludeTransactionRequest{} } +func (m *ConcludeTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*ConcludeTransactionRequest) ProtoMessage() {} +func (*ConcludeTransactionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{37} +} + +func (m *ConcludeTransactionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConcludeTransactionRequest.Unmarshal(m, b) +} +func (m *ConcludeTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConcludeTransactionRequest.Marshal(b, m, deterministic) +} +func (m *ConcludeTransactionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConcludeTransactionRequest.Merge(m, src) +} +func (m *ConcludeTransactionRequest) XXX_Size() int { + return xxx_messageInfo_ConcludeTransactionRequest.Size(m) +} +func (m *ConcludeTransactionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConcludeTransactionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConcludeTransactionRequest proto.InternalMessageInfo + +func (m *ConcludeTransactionRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *ConcludeTransactionRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *ConcludeTransactionRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ConcludeTransactionRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// ConcludeTransactionResponse is the returned value from ConcludeTransaction +type ConcludeTransactionResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConcludeTransactionResponse) Reset() { *m = ConcludeTransactionResponse{} } +func (m *ConcludeTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*ConcludeTransactionResponse) ProtoMessage() {} +func (*ConcludeTransactionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{38} +} + +func (m *ConcludeTransactionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConcludeTransactionResponse.Unmarshal(m, b) +} +func (m *ConcludeTransactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConcludeTransactionResponse.Marshal(b, m, deterministic) +} +func (m *ConcludeTransactionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConcludeTransactionResponse.Merge(m, src) +} +func (m *ConcludeTransactionResponse) XXX_Size() int { + return xxx_messageInfo_ConcludeTransactionResponse.Size(m) +} +func (m *ConcludeTransactionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConcludeTransactionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConcludeTransactionResponse proto.InternalMessageInfo + +// ReadTransactionRequest is the payload to ReadTransaction +type ReadTransactionRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Dtid string `protobuf:"bytes,4,opt,name=dtid,proto3" json:"dtid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadTransactionRequest) Reset() { *m = ReadTransactionRequest{} } +func (m *ReadTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*ReadTransactionRequest) ProtoMessage() {} +func (*ReadTransactionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{39} +} + +func (m *ReadTransactionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadTransactionRequest.Unmarshal(m, b) +} +func (m *ReadTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadTransactionRequest.Marshal(b, m, deterministic) +} +func (m *ReadTransactionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadTransactionRequest.Merge(m, src) +} +func (m *ReadTransactionRequest) XXX_Size() int { + return xxx_messageInfo_ReadTransactionRequest.Size(m) +} +func (m *ReadTransactionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadTransactionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadTransactionRequest proto.InternalMessageInfo + +func (m *ReadTransactionRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *ReadTransactionRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *ReadTransactionRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ReadTransactionRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// ReadTransactionResponse is the returned value from ReadTransaction +type ReadTransactionResponse struct { + Metadata *TransactionMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadTransactionResponse) Reset() { *m = ReadTransactionResponse{} } +func (m *ReadTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*ReadTransactionResponse) ProtoMessage() {} +func (*ReadTransactionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{40} +} + +func (m *ReadTransactionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadTransactionResponse.Unmarshal(m, b) +} +func (m *ReadTransactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadTransactionResponse.Marshal(b, m, deterministic) +} +func (m *ReadTransactionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadTransactionResponse.Merge(m, src) +} +func (m *ReadTransactionResponse) XXX_Size() int { + return xxx_messageInfo_ReadTransactionResponse.Size(m) +} +func (m *ReadTransactionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadTransactionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadTransactionResponse proto.InternalMessageInfo + +func (m *ReadTransactionResponse) GetMetadata() *TransactionMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +// BeginExecuteRequest is the payload to BeginExecute +type BeginExecuteRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Query *BoundQuery `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,5,opt,name=options,proto3" json:"options,omitempty"` + ReservedId int64 `protobuf:"varint,6,opt,name=reserved_id,json=reservedId,proto3" json:"reserved_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BeginExecuteRequest) Reset() { *m = BeginExecuteRequest{} } +func (m *BeginExecuteRequest) String() string { return proto.CompactTextString(m) } +func (*BeginExecuteRequest) ProtoMessage() {} +func (*BeginExecuteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{41} +} + +func (m *BeginExecuteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BeginExecuteRequest.Unmarshal(m, b) +} +func (m *BeginExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BeginExecuteRequest.Marshal(b, m, deterministic) +} +func (m *BeginExecuteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BeginExecuteRequest.Merge(m, src) +} +func (m *BeginExecuteRequest) XXX_Size() int { + return xxx_messageInfo_BeginExecuteRequest.Size(m) +} +func (m *BeginExecuteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BeginExecuteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BeginExecuteRequest proto.InternalMessageInfo + +func (m *BeginExecuteRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *BeginExecuteRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *BeginExecuteRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *BeginExecuteRequest) GetQuery() *BoundQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *BeginExecuteRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *BeginExecuteRequest) GetReservedId() int64 { + if m != nil { + return m.ReservedId + } + return 0 +} + +// BeginExecuteResponse is the returned value from BeginExecute +type BeginExecuteResponse struct { + // error contains an application level error if necessary. Note the + // transaction_id may be set, even when an error is returned, if the begin + // worked but the execute failed. + Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Result *QueryResult `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` + // transaction_id might be non-zero even if an error is present. + TransactionId int64 `protobuf:"varint,3,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,4,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BeginExecuteResponse) Reset() { *m = BeginExecuteResponse{} } +func (m *BeginExecuteResponse) String() string { return proto.CompactTextString(m) } +func (*BeginExecuteResponse) ProtoMessage() {} +func (*BeginExecuteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{42} +} + +func (m *BeginExecuteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BeginExecuteResponse.Unmarshal(m, b) +} +func (m *BeginExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BeginExecuteResponse.Marshal(b, m, deterministic) +} +func (m *BeginExecuteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BeginExecuteResponse.Merge(m, src) +} +func (m *BeginExecuteResponse) XXX_Size() int { + return xxx_messageInfo_BeginExecuteResponse.Size(m) +} +func (m *BeginExecuteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BeginExecuteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BeginExecuteResponse proto.InternalMessageInfo + +func (m *BeginExecuteResponse) GetError() *vtrpc.RPCError { + if m != nil { + return m.Error + } + return nil +} + +func (m *BeginExecuteResponse) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *BeginExecuteResponse) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *BeginExecuteResponse) GetTabletAlias() *topodata.TabletAlias { + if m != nil { + return m.TabletAlias + } + return nil +} + +// BeginExecuteBatchRequest is the payload to BeginExecuteBatch +type BeginExecuteBatchRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Queries []*BoundQuery `protobuf:"bytes,4,rep,name=queries,proto3" json:"queries,omitempty"` + AsTransaction bool `protobuf:"varint,5,opt,name=as_transaction,json=asTransaction,proto3" json:"as_transaction,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BeginExecuteBatchRequest) Reset() { *m = BeginExecuteBatchRequest{} } +func (m *BeginExecuteBatchRequest) String() string { return proto.CompactTextString(m) } +func (*BeginExecuteBatchRequest) ProtoMessage() {} +func (*BeginExecuteBatchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{43} +} + +func (m *BeginExecuteBatchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BeginExecuteBatchRequest.Unmarshal(m, b) +} +func (m *BeginExecuteBatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BeginExecuteBatchRequest.Marshal(b, m, deterministic) +} +func (m *BeginExecuteBatchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BeginExecuteBatchRequest.Merge(m, src) +} +func (m *BeginExecuteBatchRequest) XXX_Size() int { + return xxx_messageInfo_BeginExecuteBatchRequest.Size(m) +} +func (m *BeginExecuteBatchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BeginExecuteBatchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BeginExecuteBatchRequest proto.InternalMessageInfo + +func (m *BeginExecuteBatchRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *BeginExecuteBatchRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *BeginExecuteBatchRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *BeginExecuteBatchRequest) GetQueries() []*BoundQuery { + if m != nil { + return m.Queries + } + return nil +} + +func (m *BeginExecuteBatchRequest) GetAsTransaction() bool { + if m != nil { + return m.AsTransaction + } + return false +} + +func (m *BeginExecuteBatchRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +// BeginExecuteBatchResponse is the returned value from BeginExecuteBatch +type BeginExecuteBatchResponse struct { + // error contains an application level error if necessary. Note the + // transaction_id may be set, even when an error is returned, if the begin + // worked but the execute failed. + Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Results []*QueryResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` + // transaction_id might be non-zero even if an error is present. + TransactionId int64 `protobuf:"varint,3,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,4,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BeginExecuteBatchResponse) Reset() { *m = BeginExecuteBatchResponse{} } +func (m *BeginExecuteBatchResponse) String() string { return proto.CompactTextString(m) } +func (*BeginExecuteBatchResponse) ProtoMessage() {} +func (*BeginExecuteBatchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{44} +} + +func (m *BeginExecuteBatchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BeginExecuteBatchResponse.Unmarshal(m, b) +} +func (m *BeginExecuteBatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BeginExecuteBatchResponse.Marshal(b, m, deterministic) +} +func (m *BeginExecuteBatchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BeginExecuteBatchResponse.Merge(m, src) +} +func (m *BeginExecuteBatchResponse) XXX_Size() int { + return xxx_messageInfo_BeginExecuteBatchResponse.Size(m) +} +func (m *BeginExecuteBatchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BeginExecuteBatchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BeginExecuteBatchResponse proto.InternalMessageInfo + +func (m *BeginExecuteBatchResponse) GetError() *vtrpc.RPCError { + if m != nil { + return m.Error + } + return nil +} + +func (m *BeginExecuteBatchResponse) GetResults() []*QueryResult { + if m != nil { + return m.Results + } + return nil +} + +func (m *BeginExecuteBatchResponse) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *BeginExecuteBatchResponse) GetTabletAlias() *topodata.TabletAlias { + if m != nil { + return m.TabletAlias + } + return nil +} + +// MessageStreamRequest is the request payload for MessageStream. +type MessageStreamRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + // name is the message table name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageStreamRequest) Reset() { *m = MessageStreamRequest{} } +func (m *MessageStreamRequest) String() string { return proto.CompactTextString(m) } +func (*MessageStreamRequest) ProtoMessage() {} +func (*MessageStreamRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{45} +} + +func (m *MessageStreamRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageStreamRequest.Unmarshal(m, b) +} +func (m *MessageStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageStreamRequest.Marshal(b, m, deterministic) +} +func (m *MessageStreamRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageStreamRequest.Merge(m, src) +} +func (m *MessageStreamRequest) XXX_Size() int { + return xxx_messageInfo_MessageStreamRequest.Size(m) +} +func (m *MessageStreamRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MessageStreamRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageStreamRequest proto.InternalMessageInfo + +func (m *MessageStreamRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *MessageStreamRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *MessageStreamRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *MessageStreamRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// MessageStreamResponse is a response for MessageStream. +type MessageStreamResponse struct { + Result *QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageStreamResponse) Reset() { *m = MessageStreamResponse{} } +func (m *MessageStreamResponse) String() string { return proto.CompactTextString(m) } +func (*MessageStreamResponse) ProtoMessage() {} +func (*MessageStreamResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{46} +} + +func (m *MessageStreamResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageStreamResponse.Unmarshal(m, b) +} +func (m *MessageStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageStreamResponse.Marshal(b, m, deterministic) +} +func (m *MessageStreamResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageStreamResponse.Merge(m, src) +} +func (m *MessageStreamResponse) XXX_Size() int { + return xxx_messageInfo_MessageStreamResponse.Size(m) +} +func (m *MessageStreamResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MessageStreamResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageStreamResponse proto.InternalMessageInfo + +func (m *MessageStreamResponse) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +// MessageAckRequest is the request payload for MessageAck. +type MessageAckRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + // name is the message table name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Ids []*Value `protobuf:"bytes,5,rep,name=ids,proto3" json:"ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageAckRequest) Reset() { *m = MessageAckRequest{} } +func (m *MessageAckRequest) String() string { return proto.CompactTextString(m) } +func (*MessageAckRequest) ProtoMessage() {} +func (*MessageAckRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{47} +} + +func (m *MessageAckRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageAckRequest.Unmarshal(m, b) +} +func (m *MessageAckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageAckRequest.Marshal(b, m, deterministic) +} +func (m *MessageAckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageAckRequest.Merge(m, src) +} +func (m *MessageAckRequest) XXX_Size() int { + return xxx_messageInfo_MessageAckRequest.Size(m) +} +func (m *MessageAckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MessageAckRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageAckRequest proto.InternalMessageInfo + +func (m *MessageAckRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *MessageAckRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *MessageAckRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *MessageAckRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MessageAckRequest) GetIds() []*Value { + if m != nil { + return m.Ids + } + return nil +} + +// MessageAckResponse is the response for MessageAck. +type MessageAckResponse struct { + // result contains the result of the ack operation. + // Since this acts like a DML, only + // RowsAffected is returned in the result. + Result *QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageAckResponse) Reset() { *m = MessageAckResponse{} } +func (m *MessageAckResponse) String() string { return proto.CompactTextString(m) } +func (*MessageAckResponse) ProtoMessage() {} +func (*MessageAckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{48} +} + +func (m *MessageAckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageAckResponse.Unmarshal(m, b) +} +func (m *MessageAckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageAckResponse.Marshal(b, m, deterministic) +} +func (m *MessageAckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageAckResponse.Merge(m, src) +} +func (m *MessageAckResponse) XXX_Size() int { + return xxx_messageInfo_MessageAckResponse.Size(m) +} +func (m *MessageAckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MessageAckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageAckResponse proto.InternalMessageInfo + +func (m *MessageAckResponse) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +// ReserveExecuteRequest is the payload to ReserveExecute +type ReserveExecuteRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Query *BoundQuery `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` + TransactionId int64 `protobuf:"varint,5,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` + PreQueries []string `protobuf:"bytes,7,rep,name=pre_queries,json=preQueries,proto3" json:"pre_queries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReserveExecuteRequest) Reset() { *m = ReserveExecuteRequest{} } +func (m *ReserveExecuteRequest) String() string { return proto.CompactTextString(m) } +func (*ReserveExecuteRequest) ProtoMessage() {} +func (*ReserveExecuteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{49} +} + +func (m *ReserveExecuteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReserveExecuteRequest.Unmarshal(m, b) +} +func (m *ReserveExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReserveExecuteRequest.Marshal(b, m, deterministic) +} +func (m *ReserveExecuteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReserveExecuteRequest.Merge(m, src) +} +func (m *ReserveExecuteRequest) XXX_Size() int { + return xxx_messageInfo_ReserveExecuteRequest.Size(m) +} +func (m *ReserveExecuteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReserveExecuteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReserveExecuteRequest proto.InternalMessageInfo + +func (m *ReserveExecuteRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *ReserveExecuteRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *ReserveExecuteRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ReserveExecuteRequest) GetQuery() *BoundQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *ReserveExecuteRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *ReserveExecuteRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *ReserveExecuteRequest) GetPreQueries() []string { + if m != nil { + return m.PreQueries + } + return nil +} + +// ReserveExecuteResponse is the returned value from ReserveExecute +type ReserveExecuteResponse struct { + Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Result *QueryResult `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` + // The following fields might be non-zero even if an error is present. + ReservedId int64 `protobuf:"varint,3,opt,name=reserved_id,json=reservedId,proto3" json:"reserved_id,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,4,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReserveExecuteResponse) Reset() { *m = ReserveExecuteResponse{} } +func (m *ReserveExecuteResponse) String() string { return proto.CompactTextString(m) } +func (*ReserveExecuteResponse) ProtoMessage() {} +func (*ReserveExecuteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{50} +} + +func (m *ReserveExecuteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReserveExecuteResponse.Unmarshal(m, b) +} +func (m *ReserveExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReserveExecuteResponse.Marshal(b, m, deterministic) +} +func (m *ReserveExecuteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReserveExecuteResponse.Merge(m, src) +} +func (m *ReserveExecuteResponse) XXX_Size() int { + return xxx_messageInfo_ReserveExecuteResponse.Size(m) +} +func (m *ReserveExecuteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReserveExecuteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReserveExecuteResponse proto.InternalMessageInfo + +func (m *ReserveExecuteResponse) GetError() *vtrpc.RPCError { + if m != nil { + return m.Error + } + return nil +} + +func (m *ReserveExecuteResponse) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *ReserveExecuteResponse) GetReservedId() int64 { + if m != nil { + return m.ReservedId + } + return 0 +} + +func (m *ReserveExecuteResponse) GetTabletAlias() *topodata.TabletAlias { + if m != nil { + return m.TabletAlias + } + return nil +} + +// ReserveBeginExecuteRequest is the payload to ReserveBeginExecute +type ReserveBeginExecuteRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Query *BoundQuery `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,5,opt,name=options,proto3" json:"options,omitempty"` + PreQueries []string `protobuf:"bytes,6,rep,name=pre_queries,json=preQueries,proto3" json:"pre_queries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReserveBeginExecuteRequest) Reset() { *m = ReserveBeginExecuteRequest{} } +func (m *ReserveBeginExecuteRequest) String() string { return proto.CompactTextString(m) } +func (*ReserveBeginExecuteRequest) ProtoMessage() {} +func (*ReserveBeginExecuteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{51} +} + +func (m *ReserveBeginExecuteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReserveBeginExecuteRequest.Unmarshal(m, b) +} +func (m *ReserveBeginExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReserveBeginExecuteRequest.Marshal(b, m, deterministic) +} +func (m *ReserveBeginExecuteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReserveBeginExecuteRequest.Merge(m, src) +} +func (m *ReserveBeginExecuteRequest) XXX_Size() int { + return xxx_messageInfo_ReserveBeginExecuteRequest.Size(m) +} +func (m *ReserveBeginExecuteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReserveBeginExecuteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReserveBeginExecuteRequest proto.InternalMessageInfo + +func (m *ReserveBeginExecuteRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *ReserveBeginExecuteRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *ReserveBeginExecuteRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ReserveBeginExecuteRequest) GetQuery() *BoundQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *ReserveBeginExecuteRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *ReserveBeginExecuteRequest) GetPreQueries() []string { + if m != nil { + return m.PreQueries + } + return nil +} + +// ReserveBeginExecuteResponse is the returned value from ReserveBeginExecute +type ReserveBeginExecuteResponse struct { + // error contains an application level error if necessary. Note the + // transaction_id may be set, even when an error is returned, if the begin + // worked but the execute failed. + Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Result *QueryResult `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` + // The following fields might be non-zero even if an error is present. + TransactionId int64 `protobuf:"varint,3,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + ReservedId int64 `protobuf:"varint,4,opt,name=reserved_id,json=reservedId,proto3" json:"reserved_id,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,5,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReserveBeginExecuteResponse) Reset() { *m = ReserveBeginExecuteResponse{} } +func (m *ReserveBeginExecuteResponse) String() string { return proto.CompactTextString(m) } +func (*ReserveBeginExecuteResponse) ProtoMessage() {} +func (*ReserveBeginExecuteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{52} +} + +func (m *ReserveBeginExecuteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReserveBeginExecuteResponse.Unmarshal(m, b) +} +func (m *ReserveBeginExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReserveBeginExecuteResponse.Marshal(b, m, deterministic) +} +func (m *ReserveBeginExecuteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReserveBeginExecuteResponse.Merge(m, src) +} +func (m *ReserveBeginExecuteResponse) XXX_Size() int { + return xxx_messageInfo_ReserveBeginExecuteResponse.Size(m) +} +func (m *ReserveBeginExecuteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReserveBeginExecuteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReserveBeginExecuteResponse proto.InternalMessageInfo + +func (m *ReserveBeginExecuteResponse) GetError() *vtrpc.RPCError { + if m != nil { + return m.Error + } + return nil +} + +func (m *ReserveBeginExecuteResponse) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *ReserveBeginExecuteResponse) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *ReserveBeginExecuteResponse) GetReservedId() int64 { + if m != nil { + return m.ReservedId + } + return 0 +} + +func (m *ReserveBeginExecuteResponse) GetTabletAlias() *topodata.TabletAlias { + if m != nil { + return m.TabletAlias + } + return nil +} + +// ReleaseRequest is the payload to Release +type ReleaseRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + ReservedId int64 `protobuf:"varint,5,opt,name=reserved_id,json=reservedId,proto3" json:"reserved_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReleaseRequest) Reset() { *m = ReleaseRequest{} } +func (m *ReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*ReleaseRequest) ProtoMessage() {} +func (*ReleaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{53} +} + +func (m *ReleaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReleaseRequest.Unmarshal(m, b) +} +func (m *ReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReleaseRequest.Marshal(b, m, deterministic) +} +func (m *ReleaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReleaseRequest.Merge(m, src) +} +func (m *ReleaseRequest) XXX_Size() int { + return xxx_messageInfo_ReleaseRequest.Size(m) +} +func (m *ReleaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReleaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReleaseRequest proto.InternalMessageInfo + +func (m *ReleaseRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *ReleaseRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *ReleaseRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ReleaseRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *ReleaseRequest) GetReservedId() int64 { + if m != nil { + return m.ReservedId + } + return 0 +} + +// ReleaseResponse is the returned value from Release +type ReleaseResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReleaseResponse) Reset() { *m = ReleaseResponse{} } +func (m *ReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*ReleaseResponse) ProtoMessage() {} +func (*ReleaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{54} +} + +func (m *ReleaseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReleaseResponse.Unmarshal(m, b) +} +func (m *ReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReleaseResponse.Marshal(b, m, deterministic) +} +func (m *ReleaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReleaseResponse.Merge(m, src) +} +func (m *ReleaseResponse) XXX_Size() int { + return xxx_messageInfo_ReleaseResponse.Size(m) +} +func (m *ReleaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReleaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReleaseResponse proto.InternalMessageInfo + +// StreamHealthRequest is the payload for StreamHealth +type StreamHealthRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamHealthRequest) Reset() { *m = StreamHealthRequest{} } +func (m *StreamHealthRequest) String() string { return proto.CompactTextString(m) } +func (*StreamHealthRequest) ProtoMessage() {} +func (*StreamHealthRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{55} +} + +func (m *StreamHealthRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamHealthRequest.Unmarshal(m, b) +} +func (m *StreamHealthRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamHealthRequest.Marshal(b, m, deterministic) +} +func (m *StreamHealthRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamHealthRequest.Merge(m, src) +} +func (m *StreamHealthRequest) XXX_Size() int { + return xxx_messageInfo_StreamHealthRequest.Size(m) +} +func (m *StreamHealthRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StreamHealthRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamHealthRequest proto.InternalMessageInfo + +// RealtimeStats contains information about the tablet status. +// It is only valid for a single tablet. +type RealtimeStats struct { + // health_error is the last error we got from health check, + // or empty is the server is healthy. This is used for subset selection, + // we do not send queries to servers that are not healthy. + HealthError string `protobuf:"bytes,1,opt,name=health_error,json=healthError,proto3" json:"health_error,omitempty"` + // seconds_behind_master is populated for replicas only. It indicates + // how far behind on (MySQL) replication a replica currently is. It is used + // by clients for subset selection (so we don't try to send traffic + // to tablets that are too far behind). + // NOTE: This field must not be evaluated if "health_error" is not empty. + // TODO(mberlin): Let's switch it to int64 instead? + SecondsBehindMaster uint32 `protobuf:"varint,2,opt,name=seconds_behind_master,json=secondsBehindMaster,proto3" json:"seconds_behind_master,omitempty"` + // bin_log_players_count is the number of currently running binlog players. + // if the value is 0, it means that filtered replication is currently not + // running on the tablet. If >0, filtered replication is running. + // NOTE: This field must not be evaluated if "health_error" is not empty. + BinlogPlayersCount int32 `protobuf:"varint,3,opt,name=binlog_players_count,json=binlogPlayersCount,proto3" json:"binlog_players_count,omitempty"` + // seconds_behind_master_filtered_replication is populated for the receiving + // master of an ongoing filtered replication only. + // It specifies how far the receiving master lags behind the sending master. + // NOTE: This field must not be evaluated if "health_error" is not empty. + // NOTE: This field must not be evaluated if "bin_log_players_count" is 0. + SecondsBehindMasterFilteredReplication int64 `protobuf:"varint,4,opt,name=seconds_behind_master_filtered_replication,json=secondsBehindMasterFilteredReplication,proto3" json:"seconds_behind_master_filtered_replication,omitempty"` + // cpu_usage is used for load-based balancing + CpuUsage float64 `protobuf:"fixed64,5,opt,name=cpu_usage,json=cpuUsage,proto3" json:"cpu_usage,omitempty"` + // qps is the average QPS (queries per second) rate in the last XX seconds + // where XX is usually 60 (See query_service_stats.go). + Qps float64 `protobuf:"fixed64,6,opt,name=qps,proto3" json:"qps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RealtimeStats) Reset() { *m = RealtimeStats{} } +func (m *RealtimeStats) String() string { return proto.CompactTextString(m) } +func (*RealtimeStats) ProtoMessage() {} +func (*RealtimeStats) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{56} +} + +func (m *RealtimeStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RealtimeStats.Unmarshal(m, b) +} +func (m *RealtimeStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RealtimeStats.Marshal(b, m, deterministic) +} +func (m *RealtimeStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_RealtimeStats.Merge(m, src) +} +func (m *RealtimeStats) XXX_Size() int { + return xxx_messageInfo_RealtimeStats.Size(m) +} +func (m *RealtimeStats) XXX_DiscardUnknown() { + xxx_messageInfo_RealtimeStats.DiscardUnknown(m) +} + +var xxx_messageInfo_RealtimeStats proto.InternalMessageInfo + +func (m *RealtimeStats) GetHealthError() string { + if m != nil { + return m.HealthError + } + return "" +} + +func (m *RealtimeStats) GetSecondsBehindMaster() uint32 { + if m != nil { + return m.SecondsBehindMaster + } + return 0 +} + +func (m *RealtimeStats) GetBinlogPlayersCount() int32 { + if m != nil { + return m.BinlogPlayersCount + } + return 0 +} + +func (m *RealtimeStats) GetSecondsBehindMasterFilteredReplication() int64 { + if m != nil { + return m.SecondsBehindMasterFilteredReplication + } + return 0 +} + +func (m *RealtimeStats) GetCpuUsage() float64 { + if m != nil { + return m.CpuUsage + } + return 0 +} + +func (m *RealtimeStats) GetQps() float64 { + if m != nil { + return m.Qps + } + return 0 +} + +// AggregateStats contains information about the health of a group of +// tablets for a Target. It is used to propagate stats from a vtgate +// to another, or from the Gateway layer of a vtgate to the routing +// layer. +type AggregateStats struct { + // healthy_tablet_count is the number of healthy tablets in the group. + HealthyTabletCount int32 `protobuf:"varint,1,opt,name=healthy_tablet_count,json=healthyTabletCount,proto3" json:"healthy_tablet_count,omitempty"` + // unhealthy_tablet_count is the number of unhealthy tablets in the group. + UnhealthyTabletCount int32 `protobuf:"varint,2,opt,name=unhealthy_tablet_count,json=unhealthyTabletCount,proto3" json:"unhealthy_tablet_count,omitempty"` + // seconds_behind_master_min is the minimum of the + // seconds_behind_master values of the healthy tablets. It is unset + // if the tablet type is master. + SecondsBehindMasterMin uint32 `protobuf:"varint,3,opt,name=seconds_behind_master_min,json=secondsBehindMasterMin,proto3" json:"seconds_behind_master_min,omitempty"` + // seconds_behind_master_max is the maximum of the + // seconds_behind_master values of the healthy tablets. It is unset + // if the tablet type is master. + SecondsBehindMasterMax uint32 `protobuf:"varint,4,opt,name=seconds_behind_master_max,json=secondsBehindMasterMax,proto3" json:"seconds_behind_master_max,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AggregateStats) Reset() { *m = AggregateStats{} } +func (m *AggregateStats) String() string { return proto.CompactTextString(m) } +func (*AggregateStats) ProtoMessage() {} +func (*AggregateStats) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{57} +} + +func (m *AggregateStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AggregateStats.Unmarshal(m, b) +} +func (m *AggregateStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AggregateStats.Marshal(b, m, deterministic) +} +func (m *AggregateStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregateStats.Merge(m, src) +} +func (m *AggregateStats) XXX_Size() int { + return xxx_messageInfo_AggregateStats.Size(m) +} +func (m *AggregateStats) XXX_DiscardUnknown() { + xxx_messageInfo_AggregateStats.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregateStats proto.InternalMessageInfo + +func (m *AggregateStats) GetHealthyTabletCount() int32 { + if m != nil { + return m.HealthyTabletCount + } + return 0 +} + +func (m *AggregateStats) GetUnhealthyTabletCount() int32 { + if m != nil { + return m.UnhealthyTabletCount + } + return 0 +} + +func (m *AggregateStats) GetSecondsBehindMasterMin() uint32 { + if m != nil { + return m.SecondsBehindMasterMin + } + return 0 +} + +func (m *AggregateStats) GetSecondsBehindMasterMax() uint32 { + if m != nil { + return m.SecondsBehindMasterMax + } + return 0 +} + +// StreamHealthResponse is streamed by StreamHealth on a regular basis. +// It is expected to be used between a vtgate and vttablet: +// - target describes the tablet. +// - realtime_stats is set. +// - aggregate_stats is not set (deprecated) +type StreamHealthResponse struct { + // target is the current server type. Only queries with that exact Target + // record will be accepted (the cell may not match, however). + Target *Target `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + // serving is true iff the tablet is serving. A tablet may not be serving + // if filtered replication is enabled on a master for instance, + // or if a replica should not be used because the keyspace is being resharded. + Serving bool `protobuf:"varint,2,opt,name=serving,proto3" json:"serving,omitempty"` + // tablet_externally_reparented_timestamp can be interpreted as the + // last time we knew that this tablet was the MASTER of this shard + // (if StreamHealthResponse describes a group of tablets, between + // two vtgates, only one master will be present in the group, and + // this is this master's value). + // + // It is used by vtgate when determining the current MASTER of a shard. + // If vtgate sees more than one MASTER tablet, this timestamp is used + // as tiebreaker where the MASTER with the highest timestamp wins. + // Another usage of this timestamp is in go/vt/vtgate/buffer to detect the end + // of a reparent (failover) and stop buffering. + // + // In practice, this field is set to: + // a) the last time the RPC tabletmanager.TabletExternallyReparented was + // called on this tablet (usually done by an external failover tool e.g. + // Orchestrator). The failover tool can call this as long as we are the + // master i.e. even ages after the last reparent occurred. + // OR + // b) the last time an active reparent was executed through a vtctl command + // (InitShardMaster, PlannedReparentShard, EmergencyReparentShard) + // OR + // c) the last time vttablet was started and it initialized its tablet type + // as MASTER because it was recorded as the shard's current master in the + // topology (see go/vt/vttablet/tabletmanager/init_tablet.go) + // OR + // d) 0 if the vttablet was never a MASTER. + TabletExternallyReparentedTimestamp int64 `protobuf:"varint,3,opt,name=tablet_externally_reparented_timestamp,json=tabletExternallyReparentedTimestamp,proto3" json:"tablet_externally_reparented_timestamp,omitempty"` + // realtime_stats contains information about the tablet status. + // It is only filled in if the information is about a tablet. + RealtimeStats *RealtimeStats `protobuf:"bytes,4,opt,name=realtime_stats,json=realtimeStats,proto3" json:"realtime_stats,omitempty"` + // tablet_alias is the alias of the sending tablet. The discovery/healthcheck.go + // code uses it to verify that it's talking to the correct tablet and that it + // hasn't changed in the meantime e.g. due to tablet restarts where ports or + // ips have been reused but assigned differently. + TabletAlias *topodata.TabletAlias `protobuf:"bytes,5,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamHealthResponse) Reset() { *m = StreamHealthResponse{} } +func (m *StreamHealthResponse) String() string { return proto.CompactTextString(m) } +func (*StreamHealthResponse) ProtoMessage() {} +func (*StreamHealthResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{58} +} + +func (m *StreamHealthResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamHealthResponse.Unmarshal(m, b) +} +func (m *StreamHealthResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamHealthResponse.Marshal(b, m, deterministic) +} +func (m *StreamHealthResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamHealthResponse.Merge(m, src) +} +func (m *StreamHealthResponse) XXX_Size() int { + return xxx_messageInfo_StreamHealthResponse.Size(m) +} +func (m *StreamHealthResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StreamHealthResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamHealthResponse proto.InternalMessageInfo + +func (m *StreamHealthResponse) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *StreamHealthResponse) GetServing() bool { + if m != nil { + return m.Serving + } + return false +} + +func (m *StreamHealthResponse) GetTabletExternallyReparentedTimestamp() int64 { + if m != nil { + return m.TabletExternallyReparentedTimestamp + } + return 0 +} + +func (m *StreamHealthResponse) GetRealtimeStats() *RealtimeStats { + if m != nil { + return m.RealtimeStats + } + return nil +} + +func (m *StreamHealthResponse) GetTabletAlias() *topodata.TabletAlias { + if m != nil { + return m.TabletAlias + } + return nil +} + +// TransactionMetadata contains the metadata for a distributed transaction. +type TransactionMetadata struct { + Dtid string `protobuf:"bytes,1,opt,name=dtid,proto3" json:"dtid,omitempty"` + State TransactionState `protobuf:"varint,2,opt,name=state,proto3,enum=query.TransactionState" json:"state,omitempty"` + TimeCreated int64 `protobuf:"varint,3,opt,name=time_created,json=timeCreated,proto3" json:"time_created,omitempty"` + Participants []*Target `protobuf:"bytes,4,rep,name=participants,proto3" json:"participants,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TransactionMetadata) Reset() { *m = TransactionMetadata{} } +func (m *TransactionMetadata) String() string { return proto.CompactTextString(m) } +func (*TransactionMetadata) ProtoMessage() {} +func (*TransactionMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_5c6ac9b241082464, []int{59} +} + +func (m *TransactionMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TransactionMetadata.Unmarshal(m, b) +} +func (m *TransactionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TransactionMetadata.Marshal(b, m, deterministic) +} +func (m *TransactionMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransactionMetadata.Merge(m, src) +} +func (m *TransactionMetadata) XXX_Size() int { + return xxx_messageInfo_TransactionMetadata.Size(m) +} +func (m *TransactionMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_TransactionMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_TransactionMetadata proto.InternalMessageInfo + +func (m *TransactionMetadata) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +func (m *TransactionMetadata) GetState() TransactionState { + if m != nil { + return m.State + } + return TransactionState_UNKNOWN +} + +func (m *TransactionMetadata) GetTimeCreated() int64 { + if m != nil { + return m.TimeCreated + } + return 0 +} + +func (m *TransactionMetadata) GetParticipants() []*Target { + if m != nil { + return m.Participants + } + return nil +} + +func init() { + proto.RegisterEnum("query.MySqlFlag", MySqlFlag_name, MySqlFlag_value) + proto.RegisterEnum("query.Flag", Flag_name, Flag_value) + proto.RegisterEnum("query.Type", Type_name, Type_value) + proto.RegisterEnum("query.TransactionState", TransactionState_name, TransactionState_value) + proto.RegisterEnum("query.ExecuteOptions_IncludedFields", ExecuteOptions_IncludedFields_name, ExecuteOptions_IncludedFields_value) + proto.RegisterEnum("query.ExecuteOptions_Workload", ExecuteOptions_Workload_name, ExecuteOptions_Workload_value) + proto.RegisterEnum("query.ExecuteOptions_TransactionIsolation", ExecuteOptions_TransactionIsolation_name, ExecuteOptions_TransactionIsolation_value) + proto.RegisterEnum("query.StreamEvent_Statement_Category", StreamEvent_Statement_Category_name, StreamEvent_Statement_Category_value) + proto.RegisterType((*Target)(nil), "query.Target") + proto.RegisterType((*VTGateCallerID)(nil), "query.VTGateCallerID") + proto.RegisterType((*EventToken)(nil), "query.EventToken") + proto.RegisterType((*Value)(nil), "query.Value") + proto.RegisterType((*BindVariable)(nil), "query.BindVariable") + proto.RegisterType((*BoundQuery)(nil), "query.BoundQuery") + proto.RegisterMapType((map[string]*BindVariable)(nil), "query.BoundQuery.BindVariablesEntry") + proto.RegisterType((*ExecuteOptions)(nil), "query.ExecuteOptions") + proto.RegisterType((*Field)(nil), "query.Field") + proto.RegisterType((*Row)(nil), "query.Row") + proto.RegisterType((*QueryResult)(nil), "query.QueryResult") + proto.RegisterType((*QueryWarning)(nil), "query.QueryWarning") + proto.RegisterType((*StreamEvent)(nil), "query.StreamEvent") + proto.RegisterType((*StreamEvent_Statement)(nil), "query.StreamEvent.Statement") + proto.RegisterType((*ExecuteRequest)(nil), "query.ExecuteRequest") + proto.RegisterType((*ExecuteResponse)(nil), "query.ExecuteResponse") + proto.RegisterType((*ResultWithError)(nil), "query.ResultWithError") + proto.RegisterType((*ExecuteBatchRequest)(nil), "query.ExecuteBatchRequest") + proto.RegisterType((*ExecuteBatchResponse)(nil), "query.ExecuteBatchResponse") + proto.RegisterType((*StreamExecuteRequest)(nil), "query.StreamExecuteRequest") + proto.RegisterType((*StreamExecuteResponse)(nil), "query.StreamExecuteResponse") + proto.RegisterType((*BeginRequest)(nil), "query.BeginRequest") + proto.RegisterType((*BeginResponse)(nil), "query.BeginResponse") + proto.RegisterType((*CommitRequest)(nil), "query.CommitRequest") + proto.RegisterType((*CommitResponse)(nil), "query.CommitResponse") + proto.RegisterType((*RollbackRequest)(nil), "query.RollbackRequest") + proto.RegisterType((*RollbackResponse)(nil), "query.RollbackResponse") + proto.RegisterType((*PrepareRequest)(nil), "query.PrepareRequest") + proto.RegisterType((*PrepareResponse)(nil), "query.PrepareResponse") + proto.RegisterType((*CommitPreparedRequest)(nil), "query.CommitPreparedRequest") + proto.RegisterType((*CommitPreparedResponse)(nil), "query.CommitPreparedResponse") + proto.RegisterType((*RollbackPreparedRequest)(nil), "query.RollbackPreparedRequest") + proto.RegisterType((*RollbackPreparedResponse)(nil), "query.RollbackPreparedResponse") + proto.RegisterType((*CreateTransactionRequest)(nil), "query.CreateTransactionRequest") + proto.RegisterType((*CreateTransactionResponse)(nil), "query.CreateTransactionResponse") + proto.RegisterType((*StartCommitRequest)(nil), "query.StartCommitRequest") + proto.RegisterType((*StartCommitResponse)(nil), "query.StartCommitResponse") + proto.RegisterType((*SetRollbackRequest)(nil), "query.SetRollbackRequest") + proto.RegisterType((*SetRollbackResponse)(nil), "query.SetRollbackResponse") + proto.RegisterType((*ConcludeTransactionRequest)(nil), "query.ConcludeTransactionRequest") + proto.RegisterType((*ConcludeTransactionResponse)(nil), "query.ConcludeTransactionResponse") + proto.RegisterType((*ReadTransactionRequest)(nil), "query.ReadTransactionRequest") + proto.RegisterType((*ReadTransactionResponse)(nil), "query.ReadTransactionResponse") + proto.RegisterType((*BeginExecuteRequest)(nil), "query.BeginExecuteRequest") + proto.RegisterType((*BeginExecuteResponse)(nil), "query.BeginExecuteResponse") + proto.RegisterType((*BeginExecuteBatchRequest)(nil), "query.BeginExecuteBatchRequest") + proto.RegisterType((*BeginExecuteBatchResponse)(nil), "query.BeginExecuteBatchResponse") + proto.RegisterType((*MessageStreamRequest)(nil), "query.MessageStreamRequest") + proto.RegisterType((*MessageStreamResponse)(nil), "query.MessageStreamResponse") + proto.RegisterType((*MessageAckRequest)(nil), "query.MessageAckRequest") + proto.RegisterType((*MessageAckResponse)(nil), "query.MessageAckResponse") + proto.RegisterType((*ReserveExecuteRequest)(nil), "query.ReserveExecuteRequest") + proto.RegisterType((*ReserveExecuteResponse)(nil), "query.ReserveExecuteResponse") + proto.RegisterType((*ReserveBeginExecuteRequest)(nil), "query.ReserveBeginExecuteRequest") + proto.RegisterType((*ReserveBeginExecuteResponse)(nil), "query.ReserveBeginExecuteResponse") + proto.RegisterType((*ReleaseRequest)(nil), "query.ReleaseRequest") + proto.RegisterType((*ReleaseResponse)(nil), "query.ReleaseResponse") + proto.RegisterType((*StreamHealthRequest)(nil), "query.StreamHealthRequest") + proto.RegisterType((*RealtimeStats)(nil), "query.RealtimeStats") + proto.RegisterType((*AggregateStats)(nil), "query.AggregateStats") + proto.RegisterType((*StreamHealthResponse)(nil), "query.StreamHealthResponse") + proto.RegisterType((*TransactionMetadata)(nil), "query.TransactionMetadata") +} + +func init() { proto.RegisterFile("query.proto", fileDescriptor_5c6ac9b241082464) } + +var fileDescriptor_5c6ac9b241082464 = []byte{ + // 3141 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4b, 0x70, 0x1b, 0x47, + 0x7a, 0xd6, 0xe0, 0x45, 0xe0, 0x07, 0x01, 0x36, 0x9b, 0xa4, 0x04, 0x51, 0x7e, 0xd0, 0x63, 0xcb, + 0x66, 0x98, 0x84, 0x92, 0x28, 0x59, 0x51, 0x6c, 0x27, 0xd1, 0x10, 0x1c, 0xca, 0x90, 0x80, 0x01, + 0xd4, 0x18, 0x48, 0x96, 0x2a, 0x55, 0x53, 0x43, 0xa0, 0x05, 0x4e, 0x71, 0x80, 0x81, 0x66, 0x86, + 0x94, 0x78, 0x53, 0xe2, 0x38, 0xce, 0x3b, 0xce, 0xd3, 0x71, 0x5c, 0x71, 0xa5, 0x2a, 0x87, 0x54, + 0x2e, 0x39, 0xef, 0x6d, 0xab, 0xf6, 0xe0, 0xc3, 0x1e, 0xb6, 0x6a, 0x8f, 0xbb, 0x7b, 0xd8, 0xda, + 0xc3, 0xd6, 0xee, 0xc9, 0xb5, 0xb5, 0x87, 0x3d, 0xec, 0x61, 0x6b, 0xab, 0x1f, 0x33, 0x00, 0x48, + 0x58, 0xa2, 0xe5, 0x75, 0x6d, 0x49, 0xd6, 0xad, 0xff, 0x47, 0x3f, 0xbe, 0xaf, 0xff, 0xf9, 0xfb, + 0x31, 0x0d, 0xf9, 0xbb, 0xbb, 0xd4, 0xdf, 0x5f, 0x1d, 0xf8, 0x5e, 0xe8, 0xe1, 0x34, 0x17, 0x16, + 0x8b, 0xa1, 0x37, 0xf0, 0x3a, 0x76, 0x68, 0x0b, 0xf5, 0x62, 0x7e, 0x2f, 0xf4, 0x07, 0x6d, 0x21, + 0xa8, 0xef, 0x29, 0x90, 0x31, 0x6d, 0xbf, 0x4b, 0x43, 0xbc, 0x08, 0xd9, 0x1d, 0xba, 0x1f, 0x0c, + 0xec, 0x36, 0x2d, 0x29, 0x4b, 0xca, 0x72, 0x8e, 0xc4, 0x32, 0x9e, 0x87, 0x74, 0xb0, 0x6d, 0xfb, + 0x9d, 0x52, 0x82, 0x1b, 0x84, 0x80, 0x5f, 0x87, 0x7c, 0x68, 0x6f, 0xb9, 0x34, 0xb4, 0xc2, 0xfd, + 0x01, 0x2d, 0x25, 0x97, 0x94, 0xe5, 0xe2, 0xda, 0xfc, 0x6a, 0xdc, 0x9f, 0xc9, 0x8d, 0xe6, 0xfe, + 0x80, 0x12, 0x08, 0xe3, 0x32, 0xc6, 0x90, 0x6a, 0x53, 0xd7, 0x2d, 0xa5, 0x78, 0x5b, 0xbc, 0xac, + 0x6e, 0x40, 0xf1, 0x86, 0x79, 0xc5, 0x0e, 0x69, 0xd9, 0x76, 0x5d, 0xea, 0x57, 0x36, 0xd8, 0x70, + 0x76, 0x03, 0xea, 0xf7, 0xed, 0x5e, 0x3c, 0x9c, 0x48, 0xc6, 0xc7, 0x21, 0xd3, 0xf5, 0xbd, 0xdd, + 0x41, 0x50, 0x4a, 0x2c, 0x25, 0x97, 0x73, 0x44, 0x4a, 0xea, 0x9f, 0x02, 0xe8, 0x7b, 0xb4, 0x1f, + 0x9a, 0xde, 0x0e, 0xed, 0xe3, 0xe7, 0x20, 0x17, 0x3a, 0x3d, 0x1a, 0x84, 0x76, 0x6f, 0xc0, 0x9b, + 0x48, 0x92, 0xa1, 0xe2, 0x73, 0x20, 0x2d, 0x42, 0x76, 0xe0, 0x05, 0x4e, 0xe8, 0x78, 0x7d, 0x8e, + 0x27, 0x47, 0x62, 0x59, 0xfd, 0x63, 0x48, 0xdf, 0xb0, 0xdd, 0x5d, 0x8a, 0x5f, 0x84, 0x14, 0x07, + 0xac, 0x70, 0xc0, 0xf9, 0x55, 0x41, 0x3a, 0xc7, 0xc9, 0x0d, 0xac, 0xed, 0x3d, 0xe6, 0xc9, 0xdb, + 0x9e, 0x26, 0x42, 0x50, 0x77, 0x60, 0x7a, 0xdd, 0xe9, 0x77, 0x6e, 0xd8, 0xbe, 0xc3, 0xc8, 0x78, + 0xcc, 0x66, 0xf0, 0x2b, 0x90, 0xe1, 0x85, 0xa0, 0x94, 0x5c, 0x4a, 0x2e, 0xe7, 0xd7, 0xa6, 0x65, + 0x45, 0x3e, 0x36, 0x22, 0x6d, 0xea, 0xb7, 0x14, 0x80, 0x75, 0x6f, 0xb7, 0xdf, 0xb9, 0xce, 0x8c, + 0x18, 0x41, 0x32, 0xb8, 0xeb, 0x4a, 0x22, 0x59, 0x11, 0x5f, 0x83, 0xe2, 0x96, 0xd3, 0xef, 0x58, + 0x7b, 0x72, 0x38, 0x82, 0xcb, 0xfc, 0xda, 0x2b, 0xb2, 0xb9, 0x61, 0xe5, 0xd5, 0xd1, 0x51, 0x07, + 0x7a, 0x3f, 0xf4, 0xf7, 0x49, 0x61, 0x6b, 0x54, 0xb7, 0xd8, 0x02, 0x7c, 0xd8, 0x89, 0x75, 0xba, + 0x43, 0xf7, 0xa3, 0x4e, 0x77, 0xe8, 0x3e, 0xfe, 0x9d, 0x51, 0x44, 0xf9, 0xb5, 0xb9, 0xa8, 0xaf, + 0x91, 0xba, 0x12, 0xe6, 0x1b, 0x89, 0x4b, 0x8a, 0xfa, 0x8d, 0x34, 0x14, 0xf5, 0xfb, 0xb4, 0xbd, + 0x1b, 0xd2, 0xfa, 0x80, 0xcd, 0x41, 0x80, 0x6b, 0x30, 0xe3, 0xf4, 0xdb, 0xee, 0x6e, 0x87, 0x76, + 0xac, 0x3b, 0x0e, 0x75, 0x3b, 0x01, 0x8f, 0xa3, 0x62, 0x3c, 0xee, 0x71, 0xff, 0xd5, 0x8a, 0x74, + 0xde, 0xe4, 0xbe, 0xa4, 0xe8, 0x8c, 0xc9, 0x78, 0x05, 0x66, 0xdb, 0xae, 0x43, 0xfb, 0xa1, 0x75, + 0x87, 0xe1, 0xb5, 0x7c, 0xef, 0x5e, 0x50, 0x4a, 0x2f, 0x29, 0xcb, 0x59, 0x32, 0x23, 0x0c, 0x9b, + 0x4c, 0x4f, 0xbc, 0x7b, 0x01, 0x7e, 0x03, 0xb2, 0xf7, 0x3c, 0x7f, 0xc7, 0xf5, 0xec, 0x4e, 0x29, + 0xc3, 0xfb, 0x7c, 0x61, 0x72, 0x9f, 0x37, 0xa5, 0x17, 0x89, 0xfd, 0xf1, 0x32, 0xa0, 0xe0, 0xae, + 0x6b, 0x05, 0xd4, 0xa5, 0xed, 0xd0, 0x72, 0x9d, 0x9e, 0x13, 0x96, 0xb2, 0x3c, 0x24, 0x8b, 0xc1, + 0x5d, 0xb7, 0xc9, 0xd5, 0x55, 0xa6, 0xc5, 0x16, 0x2c, 0x84, 0xbe, 0xdd, 0x0f, 0xec, 0x36, 0x6b, + 0xcc, 0x72, 0x02, 0xcf, 0xb5, 0x79, 0x38, 0xe6, 0x78, 0x97, 0x2b, 0x93, 0xbb, 0x34, 0x87, 0x55, + 0x2a, 0x51, 0x0d, 0x32, 0x1f, 0x4e, 0xd0, 0xe2, 0x73, 0xb0, 0x10, 0xec, 0x38, 0x03, 0x8b, 0xb7, + 0x63, 0x0d, 0x5c, 0xbb, 0x6f, 0xb5, 0xed, 0xf6, 0x36, 0x2d, 0x01, 0x87, 0x8d, 0x99, 0x91, 0xcf, + 0x7b, 0xc3, 0xb5, 0xfb, 0x65, 0x66, 0x51, 0xdf, 0x84, 0xe2, 0x38, 0x8f, 0x78, 0x16, 0x0a, 0xe6, + 0xad, 0x86, 0x6e, 0x69, 0xc6, 0x86, 0x65, 0x68, 0x35, 0x1d, 0x1d, 0xc3, 0x05, 0xc8, 0x71, 0x55, + 0xdd, 0xa8, 0xde, 0x42, 0x0a, 0x9e, 0x82, 0xa4, 0x56, 0xad, 0xa2, 0x84, 0x7a, 0x09, 0xb2, 0x11, + 0x21, 0x78, 0x06, 0xf2, 0x2d, 0xa3, 0xd9, 0xd0, 0xcb, 0x95, 0xcd, 0x8a, 0xbe, 0x81, 0x8e, 0xe1, + 0x2c, 0xa4, 0xea, 0x55, 0xb3, 0x81, 0x14, 0x51, 0xd2, 0x1a, 0x28, 0xc1, 0x6a, 0x6e, 0xac, 0x6b, + 0x28, 0xa9, 0xfe, 0xaf, 0x02, 0xf3, 0x93, 0x80, 0xe1, 0x3c, 0x4c, 0x6d, 0xe8, 0x9b, 0x5a, 0xab, + 0x6a, 0xa2, 0x63, 0x78, 0x0e, 0x66, 0x88, 0xde, 0xd0, 0x35, 0x53, 0x5b, 0xaf, 0xea, 0x16, 0xd1, + 0xb5, 0x0d, 0xa4, 0x60, 0x0c, 0x45, 0x56, 0xb2, 0xca, 0xf5, 0x5a, 0xad, 0x62, 0x9a, 0xfa, 0x06, + 0x4a, 0xe0, 0x79, 0x40, 0x5c, 0xd7, 0x32, 0x86, 0xda, 0x24, 0x46, 0x30, 0xdd, 0xd4, 0x49, 0x45, + 0xab, 0x56, 0x6e, 0xb3, 0x06, 0x50, 0x0a, 0xbf, 0x04, 0xcf, 0x97, 0xeb, 0x46, 0xb3, 0xd2, 0x34, + 0x75, 0xc3, 0xb4, 0x9a, 0x86, 0xd6, 0x68, 0xbe, 0x5d, 0x37, 0x79, 0xcb, 0x02, 0x5c, 0x1a, 0x17, + 0x01, 0xb4, 0x96, 0x59, 0x17, 0xed, 0xa0, 0xcc, 0xd5, 0x54, 0x56, 0x41, 0x89, 0xab, 0xa9, 0x6c, + 0x02, 0x25, 0xaf, 0xa6, 0xb2, 0x49, 0x94, 0x52, 0x3f, 0x4c, 0x40, 0x9a, 0x73, 0xc5, 0xd2, 0xdd, + 0x48, 0x12, 0xe3, 0xe5, 0xf8, 0xd3, 0x4f, 0x3c, 0xe4, 0xd3, 0xe7, 0x19, 0x53, 0x26, 0x21, 0x21, + 0xe0, 0x53, 0x90, 0xf3, 0xfc, 0xae, 0x25, 0x2c, 0x22, 0x7d, 0x66, 0x3d, 0xbf, 0xcb, 0xf3, 0x2c, + 0x4b, 0x5d, 0x2c, 0xeb, 0x6e, 0xd9, 0x01, 0xe5, 0x11, 0x9c, 0x23, 0xb1, 0x8c, 0x4f, 0x02, 0xf3, + 0xb3, 0xf8, 0x38, 0x32, 0xdc, 0x36, 0xe5, 0xf9, 0x5d, 0x83, 0x0d, 0xe5, 0x65, 0x28, 0xb4, 0x3d, + 0x77, 0xb7, 0xd7, 0xb7, 0x5c, 0xda, 0xef, 0x86, 0xdb, 0xa5, 0xa9, 0x25, 0x65, 0xb9, 0x40, 0xa6, + 0x85, 0xb2, 0xca, 0x75, 0xb8, 0x04, 0x53, 0xed, 0x6d, 0xdb, 0x0f, 0xa8, 0x88, 0xda, 0x02, 0x89, + 0x44, 0xde, 0x2b, 0x6d, 0x3b, 0x3d, 0xdb, 0x0d, 0x78, 0x84, 0x16, 0x48, 0x2c, 0x33, 0x10, 0x77, + 0x5c, 0xbb, 0x1b, 0xf0, 0xc8, 0x2a, 0x10, 0x21, 0xa8, 0x7f, 0x00, 0x49, 0xe2, 0xdd, 0x63, 0x4d, + 0x8a, 0x0e, 0x83, 0x92, 0xb2, 0x94, 0x5c, 0xc6, 0x24, 0x12, 0x59, 0x76, 0x97, 0x09, 0x4e, 0xe4, + 0xbd, 0x28, 0xa5, 0x7d, 0xac, 0x40, 0x9e, 0x07, 0x26, 0xa1, 0xc1, 0xae, 0x1b, 0xb2, 0x44, 0x28, + 0x33, 0x80, 0x32, 0x96, 0x08, 0x39, 0xed, 0x44, 0xda, 0x18, 0x3e, 0xf6, 0x51, 0x5b, 0xf6, 0x9d, + 0x3b, 0xb4, 0x1d, 0x52, 0x91, 0xef, 0x53, 0x64, 0x9a, 0x29, 0x35, 0xa9, 0x63, 0xc4, 0x3a, 0xfd, + 0x80, 0xfa, 0xa1, 0xe5, 0x74, 0x38, 0xe5, 0x29, 0x92, 0x15, 0x8a, 0x4a, 0x07, 0xbf, 0x00, 0x29, + 0x9e, 0x16, 0x52, 0xbc, 0x17, 0x90, 0xbd, 0x10, 0xef, 0x1e, 0xe1, 0xfa, 0xab, 0xa9, 0x6c, 0x1a, + 0x65, 0xd4, 0xb7, 0x60, 0x9a, 0x0f, 0xee, 0xa6, 0xed, 0xf7, 0x9d, 0x7e, 0x97, 0xaf, 0x72, 0x5e, + 0x47, 0x4c, 0x7b, 0x81, 0xf0, 0x32, 0xc3, 0xdc, 0xa3, 0x41, 0x60, 0x77, 0xa9, 0x5c, 0x75, 0x22, + 0x51, 0xfd, 0xef, 0x24, 0xe4, 0x9b, 0xa1, 0x4f, 0xed, 0x1e, 0x5f, 0xc0, 0xf0, 0x5b, 0x00, 0x41, + 0x68, 0x87, 0xb4, 0x47, 0xfb, 0x61, 0x84, 0xef, 0x39, 0xd9, 0xf3, 0x88, 0xdf, 0x6a, 0x33, 0x72, + 0x22, 0x23, 0xfe, 0x78, 0x0d, 0xf2, 0x94, 0x99, 0xad, 0x90, 0x2d, 0x84, 0x32, 0xd9, 0xce, 0x46, + 0x99, 0x23, 0x5e, 0x21, 0x09, 0xd0, 0xb8, 0xbc, 0xf8, 0x49, 0x02, 0x72, 0x71, 0x6b, 0x58, 0x83, + 0x6c, 0xdb, 0x0e, 0x69, 0xd7, 0xf3, 0xf7, 0xe5, 0xfa, 0x74, 0xfa, 0x61, 0xbd, 0xaf, 0x96, 0xa5, + 0x33, 0x89, 0xab, 0xe1, 0xe7, 0x41, 0x2c, 0xfa, 0x22, 0xea, 0x04, 0xde, 0x1c, 0xd7, 0xf0, 0xb8, + 0x7b, 0x03, 0xf0, 0xc0, 0x77, 0x7a, 0xb6, 0xbf, 0x6f, 0xed, 0xd0, 0xfd, 0x28, 0x97, 0x27, 0x27, + 0xcc, 0x24, 0x92, 0x7e, 0xd7, 0xe8, 0xbe, 0xcc, 0x3e, 0x97, 0xc6, 0xeb, 0xca, 0x68, 0x39, 0x3c, + 0x3f, 0x23, 0x35, 0xf9, 0xea, 0x18, 0x44, 0xeb, 0x60, 0x9a, 0x07, 0x16, 0x2b, 0xaa, 0xaf, 0x41, + 0x36, 0x1a, 0x3c, 0xce, 0x41, 0x5a, 0xf7, 0x7d, 0xcf, 0x47, 0xc7, 0x78, 0x12, 0xaa, 0x55, 0x45, + 0x1e, 0xdb, 0xd8, 0x60, 0x79, 0xec, 0x47, 0x89, 0x78, 0x31, 0x22, 0xf4, 0xee, 0x2e, 0x0d, 0x42, + 0xfc, 0x27, 0x30, 0x47, 0x79, 0x08, 0x39, 0x7b, 0xd4, 0x6a, 0xf3, 0x9d, 0x0b, 0x0b, 0x20, 0x85, + 0xf3, 0x3d, 0xb3, 0x2a, 0x36, 0x5a, 0xd1, 0x8e, 0x86, 0xcc, 0xc6, 0xbe, 0x52, 0xd5, 0xc1, 0x3a, + 0xcc, 0x39, 0xbd, 0x1e, 0xed, 0x38, 0x76, 0x38, 0xda, 0x80, 0x98, 0xb0, 0x85, 0x68, 0x61, 0x1f, + 0xdb, 0x18, 0x91, 0xd9, 0xb8, 0x46, 0xdc, 0xcc, 0x69, 0xc8, 0x84, 0x7c, 0x13, 0xc7, 0x63, 0x37, + 0xbf, 0x56, 0x88, 0x12, 0x0a, 0x57, 0x12, 0x69, 0xc4, 0xaf, 0x81, 0xd8, 0x12, 0xf2, 0xd4, 0x31, + 0x0c, 0x88, 0xe1, 0x4a, 0x4f, 0x84, 0x1d, 0x9f, 0x86, 0xe2, 0xd8, 0x1a, 0xd4, 0xe1, 0x84, 0x25, + 0x49, 0x61, 0x74, 0x41, 0xe9, 0xe0, 0x33, 0x30, 0xe5, 0x89, 0xf5, 0x87, 0x27, 0x95, 0xe1, 0x88, + 0xc7, 0x17, 0x27, 0x12, 0x79, 0xe1, 0x17, 0x21, 0xef, 0xd3, 0x80, 0xfa, 0x7b, 0xb4, 0xc3, 0x1a, + 0x9d, 0xe2, 0x8d, 0x42, 0xa4, 0xaa, 0x74, 0xd4, 0x3f, 0x82, 0x99, 0x98, 0xe2, 0x60, 0xe0, 0xf5, + 0x03, 0x8a, 0x57, 0x20, 0xe3, 0xf3, 0xef, 0x5d, 0xd2, 0x8a, 0x65, 0x1f, 0x23, 0x99, 0x80, 0x48, + 0x0f, 0xb5, 0x03, 0x33, 0x42, 0x73, 0xd3, 0x09, 0xb7, 0xf9, 0x4c, 0xe2, 0xd3, 0x90, 0xa6, 0xac, + 0x70, 0x60, 0x52, 0x48, 0xa3, 0xcc, 0xed, 0x44, 0x58, 0x47, 0x7a, 0x49, 0x3c, 0xb2, 0x97, 0x9f, + 0x25, 0x60, 0x4e, 0x8e, 0x72, 0xdd, 0x0e, 0xdb, 0xdb, 0x4f, 0x68, 0x34, 0xfc, 0x2e, 0x4c, 0x31, + 0xbd, 0x13, 0x7f, 0x39, 0x13, 0xe2, 0x21, 0xf2, 0x60, 0x11, 0x61, 0x07, 0xd6, 0xc8, 0xf4, 0xcb, + 0x4d, 0x52, 0xc1, 0x0e, 0x46, 0x56, 0xe8, 0x09, 0x81, 0x93, 0x79, 0x44, 0xe0, 0x4c, 0x1d, 0x25, + 0x70, 0xd4, 0x0d, 0x98, 0x1f, 0x67, 0x5c, 0x06, 0xc7, 0xef, 0xc1, 0x94, 0x98, 0x94, 0x28, 0x47, + 0x4e, 0x9a, 0xb7, 0xc8, 0x45, 0xfd, 0x34, 0x01, 0xf3, 0x32, 0x7d, 0x7d, 0x3d, 0xbe, 0xe3, 0x11, + 0x9e, 0xd3, 0x47, 0xfa, 0x40, 0x8f, 0x36, 0x7f, 0x6a, 0x19, 0x16, 0x0e, 0xf0, 0xf8, 0x18, 0x1f, + 0xeb, 0x67, 0x0a, 0x4c, 0xaf, 0xd3, 0xae, 0xd3, 0x7f, 0x42, 0x67, 0x61, 0x84, 0xdc, 0xd4, 0x91, + 0x82, 0x78, 0x00, 0x05, 0x89, 0x57, 0xb2, 0x75, 0x98, 0x6d, 0x65, 0xd2, 0xd7, 0x72, 0x09, 0xa6, + 0xe5, 0x31, 0xdb, 0x76, 0x1d, 0x3b, 0x88, 0xf1, 0x1c, 0x38, 0x67, 0x6b, 0xcc, 0x48, 0xe4, 0x89, + 0x9c, 0x0b, 0xea, 0x8f, 0x15, 0x28, 0x94, 0xbd, 0x5e, 0xcf, 0x09, 0x9f, 0x50, 0x8e, 0x0f, 0x33, + 0x94, 0x9a, 0x14, 0x8f, 0xe7, 0xa0, 0x18, 0xc1, 0x94, 0xd4, 0x1e, 0x58, 0x69, 0x94, 0x43, 0x2b, + 0xcd, 0x4f, 0x14, 0x98, 0x21, 0x9e, 0xeb, 0x6e, 0xd9, 0xed, 0x9d, 0xa7, 0x9b, 0x9c, 0xf3, 0x80, + 0x86, 0x40, 0x8f, 0x4a, 0xcf, 0x2f, 0x15, 0x28, 0x36, 0x7c, 0x3a, 0xb0, 0x7d, 0xfa, 0x54, 0xb3, + 0xc3, 0xb6, 0xe9, 0x9d, 0x50, 0x6e, 0x70, 0x72, 0x84, 0x97, 0xd5, 0x59, 0x98, 0x89, 0xb1, 0x0b, + 0xc2, 0xd4, 0xef, 0x2b, 0xb0, 0x20, 0x42, 0x4c, 0x5a, 0x3a, 0x4f, 0x28, 0x2d, 0x11, 0xde, 0xd4, + 0x08, 0xde, 0x12, 0x1c, 0x3f, 0x88, 0x4d, 0xc2, 0x7e, 0x37, 0x01, 0x27, 0xa2, 0xe0, 0x79, 0xc2, + 0x81, 0x7f, 0x89, 0x78, 0x58, 0x84, 0xd2, 0x61, 0x12, 0x24, 0x43, 0x1f, 0x24, 0xa0, 0x54, 0xf6, + 0xa9, 0x1d, 0xd2, 0x91, 0x7d, 0xd0, 0xd3, 0x13, 0x1b, 0xf8, 0x1c, 0x4c, 0x0f, 0x6c, 0x3f, 0x74, + 0xda, 0xce, 0xc0, 0x66, 0x47, 0xd1, 0x34, 0xdf, 0x66, 0x1d, 0x68, 0x60, 0xcc, 0x45, 0x3d, 0x05, + 0x27, 0x27, 0x30, 0x22, 0xf9, 0xfa, 0x95, 0x02, 0xb8, 0x19, 0xda, 0x7e, 0xf8, 0x35, 0x58, 0x97, + 0x26, 0x06, 0xd3, 0x02, 0xcc, 0x8d, 0xe1, 0x1f, 0xe5, 0x85, 0x86, 0x5f, 0x8b, 0x25, 0xe9, 0x73, + 0x79, 0x19, 0xc5, 0x2f, 0x79, 0xf9, 0xa1, 0x02, 0x8b, 0x65, 0x4f, 0x5c, 0x3e, 0x3e, 0x95, 0x5f, + 0x98, 0xfa, 0x3c, 0x9c, 0x9a, 0x08, 0x50, 0x12, 0xf0, 0x03, 0x05, 0x8e, 0x13, 0x6a, 0x77, 0x9e, + 0x4e, 0xf0, 0xd7, 0xe1, 0xc4, 0x21, 0x70, 0x72, 0x8f, 0x72, 0x11, 0xb2, 0x3d, 0x1a, 0xda, 0x6c, + 0x87, 0x2b, 0x21, 0x2d, 0x46, 0xed, 0x0e, 0xbd, 0x6b, 0xd2, 0x83, 0xc4, 0xbe, 0xea, 0x37, 0x13, + 0x30, 0xc7, 0xf7, 0xd9, 0xcf, 0x0e, 0x79, 0x47, 0xba, 0x85, 0xc9, 0x1c, 0xda, 0xfc, 0x7d, 0x5b, + 0x81, 0xf9, 0x71, 0x06, 0xe3, 0x03, 0xcb, 0x6f, 0xfa, 0x32, 0x65, 0x42, 0xc6, 0x48, 0x1e, 0xe5, + 0x0c, 0x94, 0x3a, 0xf2, 0x19, 0xe8, 0x3b, 0x09, 0x28, 0x8d, 0x82, 0x79, 0x76, 0x65, 0x33, 0x7e, + 0x65, 0xf3, 0x45, 0x2f, 0xf1, 0xd4, 0xef, 0x2a, 0x70, 0x72, 0x02, 0xa1, 0x5f, 0x2c, 0x44, 0x46, + 0x2e, 0x6e, 0x12, 0x8f, 0xbc, 0xb8, 0xf9, 0xea, 0x83, 0xe4, 0x7b, 0x0a, 0xcc, 0xd7, 0xc4, 0x55, + 0xbc, 0xb8, 0xd8, 0x78, 0x72, 0x53, 0x2c, 0xbf, 0x6d, 0x4f, 0x0d, 0xff, 0x35, 0xa9, 0x65, 0x58, + 0x38, 0x00, 0xed, 0x31, 0x2e, 0x6b, 0x7e, 0xa1, 0xc0, 0xac, 0x6c, 0x45, 0x7b, 0x62, 0x77, 0x27, + 0x13, 0xd8, 0xc1, 0x2f, 0x40, 0xd2, 0xe9, 0x44, 0xdb, 0xda, 0xf1, 0x5f, 0xe9, 0xcc, 0xa0, 0x5e, + 0x06, 0x3c, 0x8a, 0xfb, 0x31, 0xa8, 0xfb, 0x69, 0x02, 0x16, 0x88, 0x48, 0xae, 0xcf, 0x7e, 0x1f, + 0x7c, 0xd9, 0xdf, 0x07, 0x03, 0x9f, 0x5a, 0x51, 0x0a, 0x9c, 0xe2, 0x6f, 0x3f, 0x60, 0xe0, 0xd3, + 0xeb, 0x42, 0xa3, 0x7e, 0xca, 0xf7, 0x4a, 0xe3, 0x54, 0x7f, 0x75, 0x4b, 0xd7, 0x81, 0x75, 0x34, + 0x79, 0x70, 0x1d, 0xfd, 0x12, 0xf9, 0xe8, 0xd3, 0x04, 0x2c, 0x4a, 0x20, 0xcf, 0xb6, 0x32, 0x47, + 0x8f, 0x88, 0xcc, 0xa1, 0x88, 0xf8, 0xb9, 0x02, 0xa7, 0x26, 0x12, 0xf9, 0x5b, 0xdf, 0xd1, 0x1c, + 0x88, 0x9e, 0xd4, 0x23, 0xa3, 0x27, 0x7d, 0xe4, 0xe8, 0x79, 0x3f, 0x01, 0x45, 0x42, 0x5d, 0x6a, + 0x07, 0x4f, 0xf9, 0xe5, 0xdd, 0x01, 0x0e, 0xd3, 0x87, 0x76, 0xb2, 0xb3, 0x30, 0x13, 0x13, 0x21, + 0xcf, 0x53, 0xfc, 0xfc, 0xcd, 0xd6, 0xc1, 0xb7, 0xa9, 0xed, 0x86, 0xd1, 0x4e, 0x50, 0xfd, 0x9f, + 0x04, 0x14, 0x08, 0xd3, 0x38, 0x3d, 0xda, 0x0c, 0xed, 0x30, 0xc0, 0x2f, 0xc1, 0xf4, 0x36, 0x77, + 0xb1, 0x86, 0x11, 0x92, 0x23, 0x79, 0xa1, 0x13, 0x3f, 0x17, 0xd7, 0x60, 0x21, 0xa0, 0x6d, 0xaf, + 0xdf, 0x09, 0xac, 0x2d, 0xba, 0xed, 0xf4, 0x3b, 0x56, 0xcf, 0x0e, 0x42, 0xea, 0x73, 0x5a, 0x0a, + 0x64, 0x4e, 0x1a, 0xd7, 0xb9, 0xad, 0xc6, 0x4d, 0xf8, 0x2c, 0xcc, 0x6f, 0x39, 0x7d, 0xd7, 0xeb, + 0x5a, 0x03, 0xd7, 0xde, 0xa7, 0x7e, 0x60, 0xb5, 0xbd, 0xdd, 0xbe, 0xe0, 0x23, 0x4d, 0xb0, 0xb0, + 0x35, 0x84, 0xa9, 0xcc, 0x2c, 0xf8, 0x36, 0xac, 0x4c, 0xec, 0xc5, 0xba, 0xe3, 0xb8, 0x21, 0xf5, + 0x69, 0xc7, 0xf2, 0xe9, 0xc0, 0x75, 0xda, 0xe2, 0x99, 0x90, 0x20, 0xea, 0xd5, 0x09, 0x5d, 0x6f, + 0x4a, 0x77, 0x32, 0xf4, 0xc6, 0xa7, 0x20, 0xd7, 0x1e, 0xec, 0x5a, 0xbb, 0xfc, 0x4d, 0x02, 0xe3, + 0x4f, 0x21, 0xd9, 0xf6, 0x60, 0xb7, 0xc5, 0x64, 0x8c, 0x20, 0x79, 0x77, 0x20, 0x92, 0xb3, 0x42, + 0x58, 0x51, 0xfd, 0x4c, 0x81, 0xa2, 0xd6, 0xed, 0xfa, 0xb4, 0x6b, 0x87, 0x92, 0xa6, 0xb3, 0x30, + 0x2f, 0x28, 0xd9, 0xb7, 0x64, 0xb8, 0x0a, 0x3c, 0x8a, 0xc0, 0x23, 0x6d, 0x22, 0x56, 0x05, 0x9e, + 0x0b, 0x70, 0x7c, 0xb7, 0x3f, 0xb1, 0x4e, 0x82, 0xd7, 0x99, 0x8f, 0xad, 0xa3, 0xb5, 0xfe, 0x10, + 0x4e, 0x4e, 0x66, 0xa1, 0xe7, 0x88, 0xa7, 0x7a, 0x05, 0x72, 0x7c, 0x02, 0xe8, 0x9a, 0xd3, 0x7f, + 0x48, 0x55, 0xfb, 0x3e, 0xe7, 0xeb, 0x73, 0xaa, 0xda, 0xf7, 0xd5, 0xff, 0x8b, 0x7f, 0x19, 0x46, + 0xe1, 0x12, 0x27, 0x8e, 0x28, 0x90, 0x95, 0x87, 0x05, 0x72, 0x09, 0xa6, 0x58, 0x30, 0x3a, 0xfd, + 0x2e, 0x07, 0x97, 0x25, 0x91, 0x88, 0x9b, 0xf0, 0xaa, 0xc4, 0x4e, 0xef, 0x87, 0xd4, 0xef, 0xdb, + 0xae, 0xbb, 0x6f, 0x89, 0xdb, 0xc5, 0x7e, 0x48, 0x3b, 0xd6, 0xf0, 0xe9, 0xa2, 0x48, 0x1f, 0x2f, + 0x0b, 0x6f, 0x3d, 0x76, 0x26, 0xb1, 0xaf, 0x19, 0x3f, 0x6a, 0x7c, 0x13, 0x8a, 0xbe, 0x0c, 0x62, + 0x2b, 0x60, 0xd3, 0x23, 0x53, 0xee, 0x7c, 0xf4, 0x28, 0x62, 0x34, 0xc2, 0x49, 0xc1, 0x1f, 0x0b, + 0xf8, 0xc7, 0x4e, 0x38, 0x57, 0x53, 0xd9, 0x0c, 0x9a, 0x52, 0xff, 0x5f, 0x81, 0xb9, 0x09, 0x47, + 0xf3, 0xf8, 0xdc, 0xaf, 0x8c, 0x5c, 0x2b, 0xfe, 0x3e, 0xa4, 0xf9, 0x7b, 0x15, 0xf9, 0x02, 0xea, + 0xc4, 0xe1, 0x93, 0x3d, 0x7f, 0x5b, 0x42, 0x84, 0x17, 0xfb, 0x16, 0x39, 0xa6, 0x36, 0xbf, 0x57, + 0x8c, 0x32, 0x6a, 0x9e, 0xe9, 0xc4, 0x55, 0xe3, 0xe1, 0x8b, 0xca, 0xd4, 0x23, 0x2f, 0x2a, 0x57, + 0xfe, 0x39, 0x09, 0xb9, 0xda, 0x7e, 0xf3, 0xae, 0xbb, 0xe9, 0xda, 0x5d, 0xfe, 0xf8, 0xa3, 0xd6, + 0x30, 0x6f, 0xa1, 0x63, 0x78, 0x16, 0x0a, 0x46, 0xdd, 0xb4, 0x8c, 0x56, 0xb5, 0x6a, 0x6d, 0x56, + 0xb5, 0x2b, 0x48, 0xc1, 0x08, 0xa6, 0x1b, 0xa4, 0x62, 0x5d, 0xd3, 0x6f, 0x09, 0x4d, 0x02, 0xcf, + 0xc1, 0x4c, 0xcb, 0xa8, 0x5c, 0x6f, 0xe9, 0x43, 0x65, 0x0a, 0x2f, 0xc0, 0x6c, 0xad, 0x55, 0x35, + 0x2b, 0x8d, 0xea, 0x88, 0x3a, 0x8b, 0x0b, 0x90, 0x5b, 0xaf, 0xd6, 0xd7, 0x85, 0x88, 0x58, 0xfb, + 0x2d, 0xa3, 0x59, 0xb9, 0x62, 0xe8, 0x1b, 0x42, 0xb5, 0xc4, 0x54, 0xb7, 0x75, 0x52, 0xdf, 0xac, + 0x44, 0x5d, 0x5e, 0xc6, 0x08, 0xf2, 0xeb, 0x15, 0x43, 0x23, 0xb2, 0x95, 0x07, 0x0a, 0x2e, 0x42, + 0x4e, 0x37, 0x5a, 0x35, 0x29, 0x27, 0x70, 0x09, 0xe6, 0xb4, 0x96, 0x59, 0xb7, 0x2a, 0x46, 0x99, + 0xe8, 0x35, 0xdd, 0x30, 0xa5, 0x25, 0x85, 0xe7, 0xa0, 0x68, 0x56, 0x6a, 0x7a, 0xd3, 0xd4, 0x6a, + 0x0d, 0xa9, 0x64, 0xa3, 0xc8, 0x36, 0xf5, 0xc8, 0x07, 0xe1, 0x45, 0x58, 0x30, 0xea, 0x96, 0x7c, + 0x48, 0x67, 0xdd, 0xd0, 0xaa, 0x2d, 0x5d, 0xda, 0x96, 0xf0, 0x09, 0xc0, 0x75, 0xc3, 0x6a, 0x35, + 0x36, 0x34, 0x53, 0xb7, 0x8c, 0xfa, 0x4d, 0x69, 0xb8, 0x8c, 0x8b, 0x90, 0x1d, 0x8e, 0xe0, 0x01, + 0x63, 0xa1, 0xd0, 0xd0, 0x88, 0x39, 0x04, 0xfb, 0xe0, 0x01, 0x23, 0x0b, 0xae, 0x90, 0x7a, 0xab, + 0x31, 0x74, 0x9b, 0x85, 0xbc, 0x24, 0x4b, 0xaa, 0x52, 0x4c, 0xb5, 0x5e, 0x31, 0xca, 0xf1, 0xf8, + 0x1e, 0x64, 0x17, 0x13, 0x48, 0x59, 0xd9, 0x81, 0x14, 0x9f, 0x8e, 0x2c, 0xa4, 0x8c, 0xba, 0xa1, + 0xa3, 0x63, 0x78, 0x06, 0xa0, 0xd2, 0xac, 0x18, 0xa6, 0x7e, 0x85, 0x68, 0x55, 0x06, 0x9b, 0x2b, + 0x22, 0x02, 0x19, 0xda, 0x69, 0x98, 0xaa, 0x34, 0x37, 0xab, 0x75, 0xcd, 0x94, 0x30, 0x2b, 0xcd, + 0xeb, 0xad, 0xba, 0xc9, 0x8c, 0x08, 0xe7, 0x21, 0x53, 0x69, 0x9a, 0xfa, 0x3b, 0x26, 0xc3, 0xc5, + 0x6d, 0x82, 0x55, 0xf4, 0xe0, 0xf2, 0xca, 0x47, 0x49, 0x48, 0xf1, 0x37, 0xc9, 0x05, 0xc8, 0xf1, + 0xd9, 0x36, 0x6f, 0x35, 0x58, 0x97, 0x39, 0x48, 0x55, 0x0c, 0xf3, 0x12, 0xfa, 0xb3, 0x04, 0x06, + 0x48, 0xb7, 0x78, 0xf9, 0xcf, 0x33, 0xac, 0x5c, 0x31, 0xcc, 0x73, 0x17, 0xd1, 0xbb, 0x09, 0xd6, + 0x6c, 0x4b, 0x08, 0x7f, 0x11, 0x19, 0xd6, 0x2e, 0xa0, 0xf7, 0x62, 0xc3, 0xda, 0x05, 0xf4, 0x97, + 0x91, 0xe1, 0xfc, 0x1a, 0x7a, 0x3f, 0x36, 0x9c, 0x5f, 0x43, 0x7f, 0x15, 0x19, 0x2e, 0x5e, 0x40, + 0x7f, 0x1d, 0x1b, 0x2e, 0x5e, 0x40, 0x7f, 0x93, 0x61, 0x58, 0x38, 0x92, 0xf3, 0x6b, 0xe8, 0x6f, + 0xb3, 0xb1, 0x74, 0xf1, 0x02, 0xfa, 0xbb, 0x2c, 0x9b, 0xff, 0x78, 0x56, 0xd1, 0xdf, 0x23, 0x36, + 0x4c, 0x36, 0x41, 0xe8, 0x1f, 0x78, 0x91, 0x99, 0xd0, 0x3f, 0x22, 0x86, 0x91, 0x69, 0xb9, 0xf8, + 0x01, 0xb7, 0xdc, 0xd2, 0x35, 0x82, 0xfe, 0x29, 0x23, 0xde, 0x4d, 0x96, 0x2b, 0x35, 0xad, 0x8a, + 0x30, 0xaf, 0xc1, 0x58, 0xf9, 0x97, 0xb3, 0xac, 0xc8, 0xc2, 0x13, 0xfd, 0x6b, 0x83, 0x75, 0x78, + 0x43, 0x23, 0xe5, 0xb7, 0x35, 0x82, 0xfe, 0xed, 0x2c, 0xeb, 0xf0, 0x86, 0x46, 0x24, 0x5f, 0xff, + 0xde, 0x60, 0x8e, 0xdc, 0xf4, 0xe1, 0x59, 0x36, 0x68, 0xa9, 0xff, 0x8f, 0x06, 0xce, 0x42, 0x72, + 0xbd, 0x62, 0xa2, 0x8f, 0x78, 0x6f, 0x2c, 0x44, 0xd1, 0x7f, 0x22, 0xa6, 0x6c, 0xea, 0x26, 0xfa, + 0x98, 0x29, 0xd3, 0x66, 0xab, 0x51, 0xd5, 0xd1, 0x73, 0x6c, 0x70, 0x57, 0xf4, 0x7a, 0x4d, 0x37, + 0xc9, 0x2d, 0xf4, 0x5f, 0xdc, 0xfd, 0x6a, 0xb3, 0x6e, 0xa0, 0x4f, 0x10, 0x2e, 0x02, 0xe8, 0xef, + 0x34, 0x88, 0xde, 0x6c, 0x56, 0xea, 0x06, 0x7a, 0x71, 0x65, 0x13, 0xd0, 0xc1, 0x74, 0xc0, 0x00, + 0xb4, 0x8c, 0x6b, 0x46, 0xfd, 0xa6, 0x81, 0x8e, 0x31, 0xa1, 0x41, 0xf4, 0x86, 0x46, 0x74, 0xa4, + 0x60, 0x80, 0x8c, 0x7c, 0x8d, 0x99, 0xc0, 0xd3, 0x90, 0x25, 0xf5, 0x6a, 0x75, 0x5d, 0x2b, 0x5f, + 0x43, 0xc9, 0xf5, 0xd7, 0x61, 0xc6, 0xf1, 0x56, 0xf7, 0x9c, 0x90, 0x06, 0x81, 0x78, 0xf5, 0x7e, + 0x5b, 0x95, 0x92, 0xe3, 0x9d, 0x11, 0xa5, 0x33, 0x5d, 0xef, 0xcc, 0x5e, 0x78, 0x86, 0x5b, 0xcf, + 0xf0, 0x8c, 0xb1, 0x95, 0xe1, 0xc2, 0xf9, 0x5f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x48, 0x08, 0xc2, + 0xc3, 0x53, 0x2f, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/queryservice/queryservice.pb.go b/internal/stackql-parser-fork/go/vt/proto/queryservice/queryservice.pb.go new file mode 100644 index 00000000..aa972fa4 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/queryservice/queryservice.pb.go @@ -0,0 +1,1235 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: queryservice.proto + +package queryservice + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + binlogdata "github.com/stackql/stackql-parser/go/vt/proto/binlogdata" + query "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("queryservice.proto", fileDescriptor_4bd2dde8711f22e3) } + +var fileDescriptor_4bd2dde8711f22e3 = []byte{ + // 598 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x55, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0x85, 0x43, 0x1b, 0xb4, 0x09, 0xa1, 0x6c, 0x29, 0x50, 0x27, 0xa4, 0x4d, 0x6e, 0x08, 0x29, + 0x41, 0x80, 0x84, 0x54, 0x89, 0x43, 0x13, 0x51, 0x81, 0x10, 0x5f, 0x2e, 0x54, 0x08, 0x24, 0xa4, + 0x8d, 0x33, 0x0a, 0x56, 0x1d, 0x6f, 0xea, 0x5d, 0xa7, 0xf0, 0xdb, 0xb9, 0x54, 0xb1, 0x3d, 0xe3, + 0xdd, 0x8d, 0x9d, 0x5b, 0xe7, 0xbd, 0x99, 0xd7, 0xc9, 0x8c, 0xe7, 0x2d, 0xe3, 0x57, 0x29, 0x24, + 0xff, 0x14, 0x24, 0xab, 0x30, 0x80, 0xe1, 0x32, 0x91, 0x5a, 0xf2, 0x96, 0x89, 0x79, 0xcd, 0x2c, + 0xca, 0x29, 0x6f, 0x6f, 0x1a, 0xc6, 0x91, 0x9c, 0xcf, 0x84, 0x16, 0x39, 0xf2, 0xe2, 0x7f, 0x9b, + 0xed, 0x7c, 0x5d, 0x67, 0xf0, 0x13, 0xd6, 0x78, 0xfb, 0x17, 0x82, 0x54, 0x03, 0x3f, 0x18, 0xe6, + 0x45, 0x45, 0xec, 0xc3, 0x55, 0x0a, 0x4a, 0x7b, 0x0f, 0x5d, 0x58, 0x2d, 0x65, 0xac, 0x60, 0x70, + 0x8b, 0xbf, 0x67, 0xad, 0x02, 0x1c, 0x0b, 0x1d, 0xfc, 0xe1, 0x9e, 0x9d, 0x99, 0x81, 0xa8, 0xd2, + 0xa9, 0xe4, 0x48, 0xea, 0x13, 0xbb, 0x7b, 0xae, 0x13, 0x10, 0x0b, 0x6c, 0x06, 0xf3, 0x2d, 0x14, + 0xc5, 0xba, 0xd5, 0x24, 0xaa, 0x3d, 0xbf, 0xcd, 0x5f, 0xb1, 0x9d, 0x31, 0xcc, 0xc3, 0x98, 0xef, + 0x17, 0xa9, 0x59, 0x84, 0xf5, 0x0f, 0x6c, 0x90, 0xba, 0x78, 0xcd, 0x76, 0x27, 0x72, 0xb1, 0x08, + 0x35, 0xc7, 0x8c, 0x3c, 0xc4, 0xba, 0x03, 0x07, 0xa5, 0xc2, 0x37, 0xec, 0x8e, 0x2f, 0xa3, 0x68, + 0x2a, 0x82, 0x4b, 0x8e, 0xf3, 0x42, 0x00, 0x8b, 0x1f, 0x6d, 0xe0, 0x54, 0x7e, 0xc2, 0x1a, 0x5f, + 0x12, 0x58, 0x8a, 0xa4, 0x5c, 0x42, 0x11, 0xbb, 0x4b, 0x20, 0x98, 0x6a, 0x3f, 0xb3, 0x76, 0xde, + 0x4e, 0x41, 0xcd, 0x78, 0xd7, 0xea, 0x12, 0x61, 0x54, 0x7a, 0x52, 0xc3, 0x92, 0xe0, 0x77, 0xb6, + 0x87, 0x2d, 0x92, 0x64, 0xcf, 0xe9, 0xdd, 0x15, 0x3d, 0xaa, 0xe5, 0x49, 0xf6, 0x07, 0xbb, 0x3f, + 0x49, 0x40, 0x68, 0xf8, 0x96, 0x88, 0x58, 0x89, 0x40, 0x87, 0x32, 0xe6, 0x58, 0xb7, 0xc1, 0xa0, + 0xf0, 0x71, 0x7d, 0x02, 0x29, 0x9f, 0xb1, 0xe6, 0xb9, 0x16, 0x89, 0x2e, 0x56, 0x77, 0x48, 0x1f, + 0x07, 0x61, 0xa8, 0xe6, 0x55, 0x51, 0x96, 0x0e, 0x68, 0xda, 0x23, 0xe9, 0x94, 0xd8, 0x86, 0x8e, + 0x49, 0x91, 0xce, 0x6f, 0xb6, 0x3f, 0x91, 0x71, 0x10, 0xa5, 0x33, 0xeb, 0xb7, 0xf6, 0x69, 0xf0, + 0x1b, 0x1c, 0xea, 0x0e, 0xb6, 0xa5, 0x90, 0xbe, 0xcf, 0xee, 0xf9, 0x20, 0x66, 0xa6, 0x36, 0x2e, + 0xd5, 0xc1, 0x51, 0xb7, 0x57, 0x47, 0x9b, 0xa7, 0x9c, 0x1d, 0x03, 0x9e, 0x9f, 0x67, 0x5e, 0x88, + 0x73, 0x7d, 0x9d, 0x4a, 0xce, 0x5c, 0xb4, 0xc9, 0xe4, 0xd6, 0x70, 0x54, 0x51, 0x63, 0xf9, 0xc3, + 0x71, 0x7d, 0x82, 0x69, 0x12, 0x1f, 0x41, 0x29, 0x31, 0x87, 0xfc, 0xf0, 0xc9, 0x24, 0x2c, 0xd4, + 0x35, 0x09, 0x87, 0x34, 0x4c, 0x62, 0xc2, 0x58, 0x41, 0x9e, 0x06, 0x97, 0xfc, 0xb1, 0x9d, 0x7f, + 0x5a, 0xae, 0xfb, 0xb0, 0x82, 0x31, 0xef, 0xcf, 0x87, 0xb5, 0xed, 0x02, 0xce, 0xae, 0x4b, 0xd3, + 0x36, 0x61, 0xf7, 0xfe, 0x5c, 0xd6, 0xfc, 0x7c, 0x0a, 0xce, 0xda, 0x48, 0xdf, 0xae, 0xab, 0x5a, + 0xcc, 0x60, 0x5b, 0x8a, 0x69, 0x36, 0x3e, 0x44, 0x20, 0x54, 0x69, 0x36, 0x45, 0xec, 0x9a, 0x0d, + 0xc1, 0x54, 0xfb, 0x81, 0xb5, 0xf2, 0x39, 0xbe, 0x03, 0x11, 0xe9, 0xd2, 0xf1, 0x4d, 0xd0, 0xfd, + 0x4c, 0x6c, 0xce, 0x18, 0xff, 0x19, 0x6b, 0x5c, 0x14, 0x8b, 0xf4, 0x86, 0xc6, 0x13, 0x75, 0x61, + 0xef, 0xb1, 0x53, 0xc9, 0x19, 0x3a, 0x3e, 0x6b, 0x22, 0x2c, 0xaf, 0x15, 0xef, 0x55, 0xe5, 0xcb, + 0x6b, 0x55, 0x7a, 0x55, 0x1d, 0x6f, 0x68, 0xfe, 0x62, 0xed, 0xf2, 0x5f, 0xa5, 0x91, 0x56, 0xbc, + 0x5f, 0xdd, 0xc6, 0x9a, 0x2b, 0xe7, 0xbf, 0x25, 0xa5, 0x14, 0x1f, 0x3f, 0xfb, 0xf9, 0x74, 0x15, + 0x6a, 0x50, 0x6a, 0x18, 0xca, 0x51, 0xfe, 0xd7, 0x68, 0x2e, 0x47, 0x2b, 0x3d, 0xca, 0x5e, 0xe7, + 0x91, 0xf9, 0x92, 0x4f, 0x77, 0x33, 0xec, 0xe5, 0x4d, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe2, 0x30, + 0x29, 0x02, 0xf4, 0x07, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Execute executes the specified SQL query (might be in a + // transaction context, if Query.transaction_id is set). + Execute(ctx context.Context, in *query.ExecuteRequest, opts ...grpc.CallOption) (*query.ExecuteResponse, error) + // ExecuteBatch executes a list of queries, and returns the result + // for each query. + ExecuteBatch(ctx context.Context, in *query.ExecuteBatchRequest, opts ...grpc.CallOption) (*query.ExecuteBatchResponse, error) + // StreamExecute executes a streaming query. Use this method if the + // query returns a large number of rows. The first QueryResult will + // contain the Fields, subsequent QueryResult messages will contain + // the rows. + StreamExecute(ctx context.Context, in *query.StreamExecuteRequest, opts ...grpc.CallOption) (Query_StreamExecuteClient, error) + // Begin a transaction. + Begin(ctx context.Context, in *query.BeginRequest, opts ...grpc.CallOption) (*query.BeginResponse, error) + // Commit a transaction. + Commit(ctx context.Context, in *query.CommitRequest, opts ...grpc.CallOption) (*query.CommitResponse, error) + // Rollback a transaction. + Rollback(ctx context.Context, in *query.RollbackRequest, opts ...grpc.CallOption) (*query.RollbackResponse, error) + // Prepare preares a transaction. + Prepare(ctx context.Context, in *query.PrepareRequest, opts ...grpc.CallOption) (*query.PrepareResponse, error) + // CommitPrepared commits a prepared transaction. + CommitPrepared(ctx context.Context, in *query.CommitPreparedRequest, opts ...grpc.CallOption) (*query.CommitPreparedResponse, error) + // RollbackPrepared rolls back a prepared transaction. + RollbackPrepared(ctx context.Context, in *query.RollbackPreparedRequest, opts ...grpc.CallOption) (*query.RollbackPreparedResponse, error) + // CreateTransaction creates the metadata for a 2pc transaction. + CreateTransaction(ctx context.Context, in *query.CreateTransactionRequest, opts ...grpc.CallOption) (*query.CreateTransactionResponse, error) + // StartCommit initiates a commit for a 2pc transaction. + StartCommit(ctx context.Context, in *query.StartCommitRequest, opts ...grpc.CallOption) (*query.StartCommitResponse, error) + // SetRollback marks the 2pc transaction for rollback. + SetRollback(ctx context.Context, in *query.SetRollbackRequest, opts ...grpc.CallOption) (*query.SetRollbackResponse, error) + // ConcludeTransaction marks the 2pc transaction as resolved. + ConcludeTransaction(ctx context.Context, in *query.ConcludeTransactionRequest, opts ...grpc.CallOption) (*query.ConcludeTransactionResponse, error) + // ReadTransaction returns the 2pc transaction info. + ReadTransaction(ctx context.Context, in *query.ReadTransactionRequest, opts ...grpc.CallOption) (*query.ReadTransactionResponse, error) + // BeginExecute executes a begin and the specified SQL query. + BeginExecute(ctx context.Context, in *query.BeginExecuteRequest, opts ...grpc.CallOption) (*query.BeginExecuteResponse, error) + // BeginExecuteBatch executes a begin and a list of queries. + BeginExecuteBatch(ctx context.Context, in *query.BeginExecuteBatchRequest, opts ...grpc.CallOption) (*query.BeginExecuteBatchResponse, error) + // MessageStream streams messages from a message table. + MessageStream(ctx context.Context, in *query.MessageStreamRequest, opts ...grpc.CallOption) (Query_MessageStreamClient, error) + // MessageAck acks messages for a table. + MessageAck(ctx context.Context, in *query.MessageAckRequest, opts ...grpc.CallOption) (*query.MessageAckResponse, error) + ReserveExecute(ctx context.Context, in *query.ReserveExecuteRequest, opts ...grpc.CallOption) (*query.ReserveExecuteResponse, error) + ReserveBeginExecute(ctx context.Context, in *query.ReserveBeginExecuteRequest, opts ...grpc.CallOption) (*query.ReserveBeginExecuteResponse, error) + Release(ctx context.Context, in *query.ReleaseRequest, opts ...grpc.CallOption) (*query.ReleaseResponse, error) + // StreamHealth runs a streaming RPC to the tablet, that returns the + // current health of the tablet on a regular basis. + StreamHealth(ctx context.Context, in *query.StreamHealthRequest, opts ...grpc.CallOption) (Query_StreamHealthClient, error) + // VStream streams vreplication events. + VStream(ctx context.Context, in *binlogdata.VStreamRequest, opts ...grpc.CallOption) (Query_VStreamClient, error) + // VStreamRows streams rows from the specified starting point. + VStreamRows(ctx context.Context, in *binlogdata.VStreamRowsRequest, opts ...grpc.CallOption) (Query_VStreamRowsClient, error) + // VStreamResults streams results along with the gtid of the snapshot. + VStreamResults(ctx context.Context, in *binlogdata.VStreamResultsRequest, opts ...grpc.CallOption) (Query_VStreamResultsClient, error) +} + +type queryClient struct { + cc *grpc.ClientConn +} + +func NewQueryClient(cc *grpc.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Execute(ctx context.Context, in *query.ExecuteRequest, opts ...grpc.CallOption) (*query.ExecuteResponse, error) { + out := new(query.ExecuteResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/Execute", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ExecuteBatch(ctx context.Context, in *query.ExecuteBatchRequest, opts ...grpc.CallOption) (*query.ExecuteBatchResponse, error) { + out := new(query.ExecuteBatchResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/ExecuteBatch", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) StreamExecute(ctx context.Context, in *query.StreamExecuteRequest, opts ...grpc.CallOption) (Query_StreamExecuteClient, error) { + stream, err := c.cc.NewStream(ctx, &_Query_serviceDesc.Streams[0], "/queryservice.Query/StreamExecute", opts...) + if err != nil { + return nil, err + } + x := &queryStreamExecuteClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Query_StreamExecuteClient interface { + Recv() (*query.StreamExecuteResponse, error) + grpc.ClientStream +} + +type queryStreamExecuteClient struct { + grpc.ClientStream +} + +func (x *queryStreamExecuteClient) Recv() (*query.StreamExecuteResponse, error) { + m := new(query.StreamExecuteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *queryClient) Begin(ctx context.Context, in *query.BeginRequest, opts ...grpc.CallOption) (*query.BeginResponse, error) { + out := new(query.BeginResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/Begin", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Commit(ctx context.Context, in *query.CommitRequest, opts ...grpc.CallOption) (*query.CommitResponse, error) { + out := new(query.CommitResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/Commit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Rollback(ctx context.Context, in *query.RollbackRequest, opts ...grpc.CallOption) (*query.RollbackResponse, error) { + out := new(query.RollbackResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/Rollback", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Prepare(ctx context.Context, in *query.PrepareRequest, opts ...grpc.CallOption) (*query.PrepareResponse, error) { + out := new(query.PrepareResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/Prepare", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) CommitPrepared(ctx context.Context, in *query.CommitPreparedRequest, opts ...grpc.CallOption) (*query.CommitPreparedResponse, error) { + out := new(query.CommitPreparedResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/CommitPrepared", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RollbackPrepared(ctx context.Context, in *query.RollbackPreparedRequest, opts ...grpc.CallOption) (*query.RollbackPreparedResponse, error) { + out := new(query.RollbackPreparedResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/RollbackPrepared", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) CreateTransaction(ctx context.Context, in *query.CreateTransactionRequest, opts ...grpc.CallOption) (*query.CreateTransactionResponse, error) { + out := new(query.CreateTransactionResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/CreateTransaction", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) StartCommit(ctx context.Context, in *query.StartCommitRequest, opts ...grpc.CallOption) (*query.StartCommitResponse, error) { + out := new(query.StartCommitResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/StartCommit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) SetRollback(ctx context.Context, in *query.SetRollbackRequest, opts ...grpc.CallOption) (*query.SetRollbackResponse, error) { + out := new(query.SetRollbackResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/SetRollback", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ConcludeTransaction(ctx context.Context, in *query.ConcludeTransactionRequest, opts ...grpc.CallOption) (*query.ConcludeTransactionResponse, error) { + out := new(query.ConcludeTransactionResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/ConcludeTransaction", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ReadTransaction(ctx context.Context, in *query.ReadTransactionRequest, opts ...grpc.CallOption) (*query.ReadTransactionResponse, error) { + out := new(query.ReadTransactionResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/ReadTransaction", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) BeginExecute(ctx context.Context, in *query.BeginExecuteRequest, opts ...grpc.CallOption) (*query.BeginExecuteResponse, error) { + out := new(query.BeginExecuteResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/BeginExecute", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) BeginExecuteBatch(ctx context.Context, in *query.BeginExecuteBatchRequest, opts ...grpc.CallOption) (*query.BeginExecuteBatchResponse, error) { + out := new(query.BeginExecuteBatchResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/BeginExecuteBatch", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) MessageStream(ctx context.Context, in *query.MessageStreamRequest, opts ...grpc.CallOption) (Query_MessageStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_Query_serviceDesc.Streams[1], "/queryservice.Query/MessageStream", opts...) + if err != nil { + return nil, err + } + x := &queryMessageStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Query_MessageStreamClient interface { + Recv() (*query.MessageStreamResponse, error) + grpc.ClientStream +} + +type queryMessageStreamClient struct { + grpc.ClientStream +} + +func (x *queryMessageStreamClient) Recv() (*query.MessageStreamResponse, error) { + m := new(query.MessageStreamResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *queryClient) MessageAck(ctx context.Context, in *query.MessageAckRequest, opts ...grpc.CallOption) (*query.MessageAckResponse, error) { + out := new(query.MessageAckResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/MessageAck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ReserveExecute(ctx context.Context, in *query.ReserveExecuteRequest, opts ...grpc.CallOption) (*query.ReserveExecuteResponse, error) { + out := new(query.ReserveExecuteResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/ReserveExecute", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ReserveBeginExecute(ctx context.Context, in *query.ReserveBeginExecuteRequest, opts ...grpc.CallOption) (*query.ReserveBeginExecuteResponse, error) { + out := new(query.ReserveBeginExecuteResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/ReserveBeginExecute", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Release(ctx context.Context, in *query.ReleaseRequest, opts ...grpc.CallOption) (*query.ReleaseResponse, error) { + out := new(query.ReleaseResponse) + err := c.cc.Invoke(ctx, "/queryservice.Query/Release", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) StreamHealth(ctx context.Context, in *query.StreamHealthRequest, opts ...grpc.CallOption) (Query_StreamHealthClient, error) { + stream, err := c.cc.NewStream(ctx, &_Query_serviceDesc.Streams[2], "/queryservice.Query/StreamHealth", opts...) + if err != nil { + return nil, err + } + x := &queryStreamHealthClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Query_StreamHealthClient interface { + Recv() (*query.StreamHealthResponse, error) + grpc.ClientStream +} + +type queryStreamHealthClient struct { + grpc.ClientStream +} + +func (x *queryStreamHealthClient) Recv() (*query.StreamHealthResponse, error) { + m := new(query.StreamHealthResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *queryClient) VStream(ctx context.Context, in *binlogdata.VStreamRequest, opts ...grpc.CallOption) (Query_VStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_Query_serviceDesc.Streams[3], "/queryservice.Query/VStream", opts...) + if err != nil { + return nil, err + } + x := &queryVStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Query_VStreamClient interface { + Recv() (*binlogdata.VStreamResponse, error) + grpc.ClientStream +} + +type queryVStreamClient struct { + grpc.ClientStream +} + +func (x *queryVStreamClient) Recv() (*binlogdata.VStreamResponse, error) { + m := new(binlogdata.VStreamResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *queryClient) VStreamRows(ctx context.Context, in *binlogdata.VStreamRowsRequest, opts ...grpc.CallOption) (Query_VStreamRowsClient, error) { + stream, err := c.cc.NewStream(ctx, &_Query_serviceDesc.Streams[4], "/queryservice.Query/VStreamRows", opts...) + if err != nil { + return nil, err + } + x := &queryVStreamRowsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Query_VStreamRowsClient interface { + Recv() (*binlogdata.VStreamRowsResponse, error) + grpc.ClientStream +} + +type queryVStreamRowsClient struct { + grpc.ClientStream +} + +func (x *queryVStreamRowsClient) Recv() (*binlogdata.VStreamRowsResponse, error) { + m := new(binlogdata.VStreamRowsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *queryClient) VStreamResults(ctx context.Context, in *binlogdata.VStreamResultsRequest, opts ...grpc.CallOption) (Query_VStreamResultsClient, error) { + stream, err := c.cc.NewStream(ctx, &_Query_serviceDesc.Streams[5], "/queryservice.Query/VStreamResults", opts...) + if err != nil { + return nil, err + } + x := &queryVStreamResultsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Query_VStreamResultsClient interface { + Recv() (*binlogdata.VStreamResultsResponse, error) + grpc.ClientStream +} + +type queryVStreamResultsClient struct { + grpc.ClientStream +} + +func (x *queryVStreamResultsClient) Recv() (*binlogdata.VStreamResultsResponse, error) { + m := new(binlogdata.VStreamResultsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Execute executes the specified SQL query (might be in a + // transaction context, if Query.transaction_id is set). + Execute(context.Context, *query.ExecuteRequest) (*query.ExecuteResponse, error) + // ExecuteBatch executes a list of queries, and returns the result + // for each query. + ExecuteBatch(context.Context, *query.ExecuteBatchRequest) (*query.ExecuteBatchResponse, error) + // StreamExecute executes a streaming query. Use this method if the + // query returns a large number of rows. The first QueryResult will + // contain the Fields, subsequent QueryResult messages will contain + // the rows. + StreamExecute(*query.StreamExecuteRequest, Query_StreamExecuteServer) error + // Begin a transaction. + Begin(context.Context, *query.BeginRequest) (*query.BeginResponse, error) + // Commit a transaction. + Commit(context.Context, *query.CommitRequest) (*query.CommitResponse, error) + // Rollback a transaction. + Rollback(context.Context, *query.RollbackRequest) (*query.RollbackResponse, error) + // Prepare preares a transaction. + Prepare(context.Context, *query.PrepareRequest) (*query.PrepareResponse, error) + // CommitPrepared commits a prepared transaction. + CommitPrepared(context.Context, *query.CommitPreparedRequest) (*query.CommitPreparedResponse, error) + // RollbackPrepared rolls back a prepared transaction. + RollbackPrepared(context.Context, *query.RollbackPreparedRequest) (*query.RollbackPreparedResponse, error) + // CreateTransaction creates the metadata for a 2pc transaction. + CreateTransaction(context.Context, *query.CreateTransactionRequest) (*query.CreateTransactionResponse, error) + // StartCommit initiates a commit for a 2pc transaction. + StartCommit(context.Context, *query.StartCommitRequest) (*query.StartCommitResponse, error) + // SetRollback marks the 2pc transaction for rollback. + SetRollback(context.Context, *query.SetRollbackRequest) (*query.SetRollbackResponse, error) + // ConcludeTransaction marks the 2pc transaction as resolved. + ConcludeTransaction(context.Context, *query.ConcludeTransactionRequest) (*query.ConcludeTransactionResponse, error) + // ReadTransaction returns the 2pc transaction info. + ReadTransaction(context.Context, *query.ReadTransactionRequest) (*query.ReadTransactionResponse, error) + // BeginExecute executes a begin and the specified SQL query. + BeginExecute(context.Context, *query.BeginExecuteRequest) (*query.BeginExecuteResponse, error) + // BeginExecuteBatch executes a begin and a list of queries. + BeginExecuteBatch(context.Context, *query.BeginExecuteBatchRequest) (*query.BeginExecuteBatchResponse, error) + // MessageStream streams messages from a message table. + MessageStream(*query.MessageStreamRequest, Query_MessageStreamServer) error + // MessageAck acks messages for a table. + MessageAck(context.Context, *query.MessageAckRequest) (*query.MessageAckResponse, error) + ReserveExecute(context.Context, *query.ReserveExecuteRequest) (*query.ReserveExecuteResponse, error) + ReserveBeginExecute(context.Context, *query.ReserveBeginExecuteRequest) (*query.ReserveBeginExecuteResponse, error) + Release(context.Context, *query.ReleaseRequest) (*query.ReleaseResponse, error) + // StreamHealth runs a streaming RPC to the tablet, that returns the + // current health of the tablet on a regular basis. + StreamHealth(*query.StreamHealthRequest, Query_StreamHealthServer) error + // VStream streams vreplication events. + VStream(*binlogdata.VStreamRequest, Query_VStreamServer) error + // VStreamRows streams rows from the specified starting point. + VStreamRows(*binlogdata.VStreamRowsRequest, Query_VStreamRowsServer) error + // VStreamResults streams results along with the gtid of the snapshot. + VStreamResults(*binlogdata.VStreamResultsRequest, Query_VStreamResultsServer) error +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Execute(ctx context.Context, req *query.ExecuteRequest) (*query.ExecuteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Execute not implemented") +} +func (*UnimplementedQueryServer) ExecuteBatch(ctx context.Context, req *query.ExecuteBatchRequest) (*query.ExecuteBatchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExecuteBatch not implemented") +} +func (*UnimplementedQueryServer) StreamExecute(req *query.StreamExecuteRequest, srv Query_StreamExecuteServer) error { + return status.Errorf(codes.Unimplemented, "method StreamExecute not implemented") +} +func (*UnimplementedQueryServer) Begin(ctx context.Context, req *query.BeginRequest) (*query.BeginResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Begin not implemented") +} +func (*UnimplementedQueryServer) Commit(ctx context.Context, req *query.CommitRequest) (*query.CommitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") +} +func (*UnimplementedQueryServer) Rollback(ctx context.Context, req *query.RollbackRequest) (*query.RollbackResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Rollback not implemented") +} +func (*UnimplementedQueryServer) Prepare(ctx context.Context, req *query.PrepareRequest) (*query.PrepareResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Prepare not implemented") +} +func (*UnimplementedQueryServer) CommitPrepared(ctx context.Context, req *query.CommitPreparedRequest) (*query.CommitPreparedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CommitPrepared not implemented") +} +func (*UnimplementedQueryServer) RollbackPrepared(ctx context.Context, req *query.RollbackPreparedRequest) (*query.RollbackPreparedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RollbackPrepared not implemented") +} +func (*UnimplementedQueryServer) CreateTransaction(ctx context.Context, req *query.CreateTransactionRequest) (*query.CreateTransactionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateTransaction not implemented") +} +func (*UnimplementedQueryServer) StartCommit(ctx context.Context, req *query.StartCommitRequest) (*query.StartCommitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartCommit not implemented") +} +func (*UnimplementedQueryServer) SetRollback(ctx context.Context, req *query.SetRollbackRequest) (*query.SetRollbackResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetRollback not implemented") +} +func (*UnimplementedQueryServer) ConcludeTransaction(ctx context.Context, req *query.ConcludeTransactionRequest) (*query.ConcludeTransactionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConcludeTransaction not implemented") +} +func (*UnimplementedQueryServer) ReadTransaction(ctx context.Context, req *query.ReadTransactionRequest) (*query.ReadTransactionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadTransaction not implemented") +} +func (*UnimplementedQueryServer) BeginExecute(ctx context.Context, req *query.BeginExecuteRequest) (*query.BeginExecuteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BeginExecute not implemented") +} +func (*UnimplementedQueryServer) BeginExecuteBatch(ctx context.Context, req *query.BeginExecuteBatchRequest) (*query.BeginExecuteBatchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BeginExecuteBatch not implemented") +} +func (*UnimplementedQueryServer) MessageStream(req *query.MessageStreamRequest, srv Query_MessageStreamServer) error { + return status.Errorf(codes.Unimplemented, "method MessageStream not implemented") +} +func (*UnimplementedQueryServer) MessageAck(ctx context.Context, req *query.MessageAckRequest) (*query.MessageAckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MessageAck not implemented") +} +func (*UnimplementedQueryServer) ReserveExecute(ctx context.Context, req *query.ReserveExecuteRequest) (*query.ReserveExecuteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReserveExecute not implemented") +} +func (*UnimplementedQueryServer) ReserveBeginExecute(ctx context.Context, req *query.ReserveBeginExecuteRequest) (*query.ReserveBeginExecuteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReserveBeginExecute not implemented") +} +func (*UnimplementedQueryServer) Release(ctx context.Context, req *query.ReleaseRequest) (*query.ReleaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Release not implemented") +} +func (*UnimplementedQueryServer) StreamHealth(req *query.StreamHealthRequest, srv Query_StreamHealthServer) error { + return status.Errorf(codes.Unimplemented, "method StreamHealth not implemented") +} +func (*UnimplementedQueryServer) VStream(req *binlogdata.VStreamRequest, srv Query_VStreamServer) error { + return status.Errorf(codes.Unimplemented, "method VStream not implemented") +} +func (*UnimplementedQueryServer) VStreamRows(req *binlogdata.VStreamRowsRequest, srv Query_VStreamRowsServer) error { + return status.Errorf(codes.Unimplemented, "method VStreamRows not implemented") +} +func (*UnimplementedQueryServer) VStreamResults(req *binlogdata.VStreamResultsRequest, srv Query_VStreamResultsServer) error { + return status.Errorf(codes.Unimplemented, "method VStreamResults not implemented") +} + +func RegisterQueryServer(s *grpc.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Execute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.ExecuteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Execute(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/Execute", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Execute(ctx, req.(*query.ExecuteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ExecuteBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.ExecuteBatchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ExecuteBatch(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/ExecuteBatch", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ExecuteBatch(ctx, req.(*query.ExecuteBatchRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_StreamExecute_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(query.StreamExecuteRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueryServer).StreamExecute(m, &queryStreamExecuteServer{stream}) +} + +type Query_StreamExecuteServer interface { + Send(*query.StreamExecuteResponse) error + grpc.ServerStream +} + +type queryStreamExecuteServer struct { + grpc.ServerStream +} + +func (x *queryStreamExecuteServer) Send(m *query.StreamExecuteResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Query_Begin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.BeginRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Begin(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/Begin", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Begin(ctx, req.(*query.BeginRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.CommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Commit(ctx, req.(*query.CommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Rollback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.RollbackRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Rollback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/Rollback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Rollback(ctx, req.(*query.RollbackRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Prepare_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.PrepareRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Prepare(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/Prepare", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Prepare(ctx, req.(*query.PrepareRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_CommitPrepared_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.CommitPreparedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).CommitPrepared(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/CommitPrepared", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).CommitPrepared(ctx, req.(*query.CommitPreparedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RollbackPrepared_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.RollbackPreparedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RollbackPrepared(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/RollbackPrepared", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RollbackPrepared(ctx, req.(*query.RollbackPreparedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_CreateTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.CreateTransactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).CreateTransaction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/CreateTransaction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).CreateTransaction(ctx, req.(*query.CreateTransactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_StartCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.StartCommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).StartCommit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/StartCommit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).StartCommit(ctx, req.(*query.StartCommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_SetRollback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.SetRollbackRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).SetRollback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/SetRollback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).SetRollback(ctx, req.(*query.SetRollbackRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ConcludeTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.ConcludeTransactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ConcludeTransaction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/ConcludeTransaction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ConcludeTransaction(ctx, req.(*query.ConcludeTransactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ReadTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.ReadTransactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ReadTransaction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/ReadTransaction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ReadTransaction(ctx, req.(*query.ReadTransactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_BeginExecute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.BeginExecuteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).BeginExecute(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/BeginExecute", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).BeginExecute(ctx, req.(*query.BeginExecuteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_BeginExecuteBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.BeginExecuteBatchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).BeginExecuteBatch(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/BeginExecuteBatch", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).BeginExecuteBatch(ctx, req.(*query.BeginExecuteBatchRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_MessageStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(query.MessageStreamRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueryServer).MessageStream(m, &queryMessageStreamServer{stream}) +} + +type Query_MessageStreamServer interface { + Send(*query.MessageStreamResponse) error + grpc.ServerStream +} + +type queryMessageStreamServer struct { + grpc.ServerStream +} + +func (x *queryMessageStreamServer) Send(m *query.MessageStreamResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Query_MessageAck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.MessageAckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).MessageAck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/MessageAck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).MessageAck(ctx, req.(*query.MessageAckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ReserveExecute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.ReserveExecuteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ReserveExecute(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/ReserveExecute", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ReserveExecute(ctx, req.(*query.ReserveExecuteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ReserveBeginExecute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.ReserveBeginExecuteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ReserveBeginExecute(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/ReserveBeginExecute", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ReserveBeginExecute(ctx, req.(*query.ReserveBeginExecuteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Release_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(query.ReleaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Release(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queryservice.Query/Release", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Release(ctx, req.(*query.ReleaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_StreamHealth_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(query.StreamHealthRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueryServer).StreamHealth(m, &queryStreamHealthServer{stream}) +} + +type Query_StreamHealthServer interface { + Send(*query.StreamHealthResponse) error + grpc.ServerStream +} + +type queryStreamHealthServer struct { + grpc.ServerStream +} + +func (x *queryStreamHealthServer) Send(m *query.StreamHealthResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Query_VStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(binlogdata.VStreamRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueryServer).VStream(m, &queryVStreamServer{stream}) +} + +type Query_VStreamServer interface { + Send(*binlogdata.VStreamResponse) error + grpc.ServerStream +} + +type queryVStreamServer struct { + grpc.ServerStream +} + +func (x *queryVStreamServer) Send(m *binlogdata.VStreamResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Query_VStreamRows_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(binlogdata.VStreamRowsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueryServer).VStreamRows(m, &queryVStreamRowsServer{stream}) +} + +type Query_VStreamRowsServer interface { + Send(*binlogdata.VStreamRowsResponse) error + grpc.ServerStream +} + +type queryVStreamRowsServer struct { + grpc.ServerStream +} + +func (x *queryVStreamRowsServer) Send(m *binlogdata.VStreamRowsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Query_VStreamResults_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(binlogdata.VStreamResultsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueryServer).VStreamResults(m, &queryVStreamResultsServer{stream}) +} + +type Query_VStreamResultsServer interface { + Send(*binlogdata.VStreamResultsResponse) error + grpc.ServerStream +} + +type queryVStreamResultsServer struct { + grpc.ServerStream +} + +func (x *queryVStreamResultsServer) Send(m *binlogdata.VStreamResultsResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "queryservice.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Execute", + Handler: _Query_Execute_Handler, + }, + { + MethodName: "ExecuteBatch", + Handler: _Query_ExecuteBatch_Handler, + }, + { + MethodName: "Begin", + Handler: _Query_Begin_Handler, + }, + { + MethodName: "Commit", + Handler: _Query_Commit_Handler, + }, + { + MethodName: "Rollback", + Handler: _Query_Rollback_Handler, + }, + { + MethodName: "Prepare", + Handler: _Query_Prepare_Handler, + }, + { + MethodName: "CommitPrepared", + Handler: _Query_CommitPrepared_Handler, + }, + { + MethodName: "RollbackPrepared", + Handler: _Query_RollbackPrepared_Handler, + }, + { + MethodName: "CreateTransaction", + Handler: _Query_CreateTransaction_Handler, + }, + { + MethodName: "StartCommit", + Handler: _Query_StartCommit_Handler, + }, + { + MethodName: "SetRollback", + Handler: _Query_SetRollback_Handler, + }, + { + MethodName: "ConcludeTransaction", + Handler: _Query_ConcludeTransaction_Handler, + }, + { + MethodName: "ReadTransaction", + Handler: _Query_ReadTransaction_Handler, + }, + { + MethodName: "BeginExecute", + Handler: _Query_BeginExecute_Handler, + }, + { + MethodName: "BeginExecuteBatch", + Handler: _Query_BeginExecuteBatch_Handler, + }, + { + MethodName: "MessageAck", + Handler: _Query_MessageAck_Handler, + }, + { + MethodName: "ReserveExecute", + Handler: _Query_ReserveExecute_Handler, + }, + { + MethodName: "ReserveBeginExecute", + Handler: _Query_ReserveBeginExecute_Handler, + }, + { + MethodName: "Release", + Handler: _Query_Release_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamExecute", + Handler: _Query_StreamExecute_Handler, + ServerStreams: true, + }, + { + StreamName: "MessageStream", + Handler: _Query_MessageStream_Handler, + ServerStreams: true, + }, + { + StreamName: "StreamHealth", + Handler: _Query_StreamHealth_Handler, + ServerStreams: true, + }, + { + StreamName: "VStream", + Handler: _Query_VStream_Handler, + ServerStreams: true, + }, + { + StreamName: "VStreamRows", + Handler: _Query_VStreamRows_Handler, + ServerStreams: true, + }, + { + StreamName: "VStreamResults", + Handler: _Query_VStreamResults_Handler, + ServerStreams: true, + }, + }, + Metadata: "queryservice.proto", +} diff --git a/internal/stackql-parser-fork/go/vt/proto/replicationdata/replicationdata.pb.go b/internal/stackql-parser-fork/go/vt/proto/replicationdata/replicationdata.pb.go new file mode 100644 index 00000000..dfe2c943 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/replicationdata/replicationdata.pb.go @@ -0,0 +1,185 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: replicationdata.proto + +package replicationdata + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Status is the replication status for MySQL/MariaDB/File-based. Returned by a +// flavor-specific command and parsed into a Position and fields. +type Status struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + IoThreadRunning bool `protobuf:"varint,2,opt,name=io_thread_running,json=ioThreadRunning,proto3" json:"io_thread_running,omitempty"` + SqlThreadRunning bool `protobuf:"varint,3,opt,name=sql_thread_running,json=sqlThreadRunning,proto3" json:"sql_thread_running,omitempty"` + SecondsBehindMaster uint32 `protobuf:"varint,4,opt,name=seconds_behind_master,json=secondsBehindMaster,proto3" json:"seconds_behind_master,omitempty"` + MasterHost string `protobuf:"bytes,5,opt,name=master_host,json=masterHost,proto3" json:"master_host,omitempty"` + MasterPort int32 `protobuf:"varint,6,opt,name=master_port,json=masterPort,proto3" json:"master_port,omitempty"` + MasterConnectRetry int32 `protobuf:"varint,7,opt,name=master_connect_retry,json=masterConnectRetry,proto3" json:"master_connect_retry,omitempty"` + // RelayLogPosition will be empty for flavors that do not support returning the full GTIDSet from the relay log, such as MariaDB. + RelayLogPosition string `protobuf:"bytes,8,opt,name=relay_log_position,json=relayLogPosition,proto3" json:"relay_log_position,omitempty"` + FilePosition string `protobuf:"bytes,9,opt,name=file_position,json=filePosition,proto3" json:"file_position,omitempty"` + FileRelayLogPosition string `protobuf:"bytes,10,opt,name=file_relay_log_position,json=fileRelayLogPosition,proto3" json:"file_relay_log_position,omitempty"` + MasterServerId uint32 `protobuf:"varint,11,opt,name=master_server_id,json=masterServerId,proto3" json:"master_server_id,omitempty"` + MasterUuid string `protobuf:"bytes,12,opt,name=master_uuid,json=masterUuid,proto3" json:"master_uuid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_ee8ee22b8c4b9d06, []int{0} +} + +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *Status) GetIoThreadRunning() bool { + if m != nil { + return m.IoThreadRunning + } + return false +} + +func (m *Status) GetSqlThreadRunning() bool { + if m != nil { + return m.SqlThreadRunning + } + return false +} + +func (m *Status) GetSecondsBehindMaster() uint32 { + if m != nil { + return m.SecondsBehindMaster + } + return 0 +} + +func (m *Status) GetMasterHost() string { + if m != nil { + return m.MasterHost + } + return "" +} + +func (m *Status) GetMasterPort() int32 { + if m != nil { + return m.MasterPort + } + return 0 +} + +func (m *Status) GetMasterConnectRetry() int32 { + if m != nil { + return m.MasterConnectRetry + } + return 0 +} + +func (m *Status) GetRelayLogPosition() string { + if m != nil { + return m.RelayLogPosition + } + return "" +} + +func (m *Status) GetFilePosition() string { + if m != nil { + return m.FilePosition + } + return "" +} + +func (m *Status) GetFileRelayLogPosition() string { + if m != nil { + return m.FileRelayLogPosition + } + return "" +} + +func (m *Status) GetMasterServerId() uint32 { + if m != nil { + return m.MasterServerId + } + return 0 +} + +func (m *Status) GetMasterUuid() string { + if m != nil { + return m.MasterUuid + } + return "" +} + +func init() { + proto.RegisterType((*Status)(nil), "replicationdata.Status") +} + +func init() { proto.RegisterFile("replicationdata.proto", fileDescriptor_ee8ee22b8c4b9d06) } + +var fileDescriptor_ee8ee22b8c4b9d06 = []byte{ + // 353 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x41, 0x4f, 0xe2, 0x40, + 0x14, 0xc7, 0xd3, 0x65, 0x61, 0x61, 0x80, 0x85, 0x9d, 0x85, 0x38, 0xf1, 0x62, 0xa3, 0x97, 0xc6, + 0x10, 0x6a, 0x34, 0x7e, 0x01, 0xbc, 0x68, 0xa2, 0x09, 0x29, 0x7a, 0xf1, 0x32, 0x29, 0x9d, 0xb1, + 0x4c, 0x52, 0xe7, 0x95, 0x99, 0x29, 0x09, 0x9f, 0xdd, 0x8b, 0xe9, 0x2b, 0x20, 0x36, 0xde, 0xda, + 0xff, 0xef, 0x97, 0xd7, 0xd7, 0x7f, 0x1e, 0x19, 0x1b, 0x99, 0x67, 0x2a, 0x89, 0x9d, 0x02, 0x2d, + 0x62, 0x17, 0x4f, 0x73, 0x03, 0x0e, 0xe8, 0xa0, 0x16, 0x9f, 0x7f, 0x34, 0x48, 0x6b, 0xe1, 0x62, + 0x57, 0x58, 0x7a, 0x4a, 0xda, 0x39, 0x58, 0x55, 0x22, 0xe6, 0xf9, 0x5e, 0xd0, 0x89, 0x0e, 0xef, + 0xf4, 0x92, 0xfc, 0x53, 0xc0, 0xdd, 0xca, 0xc8, 0x58, 0x70, 0x53, 0x68, 0xad, 0x74, 0xca, 0x7e, + 0xf9, 0x5e, 0xd0, 0x8e, 0x06, 0x0a, 0x9e, 0x31, 0x8f, 0xaa, 0x98, 0x4e, 0x08, 0xb5, 0xeb, 0xac, + 0x2e, 0x37, 0x50, 0x1e, 0xda, 0x75, 0xf6, 0xdd, 0xbe, 0x26, 0x63, 0x2b, 0x13, 0xd0, 0xc2, 0xf2, + 0xa5, 0x5c, 0x29, 0x2d, 0xf8, 0x7b, 0x6c, 0x9d, 0x34, 0xec, 0xb7, 0xef, 0x05, 0xfd, 0xe8, 0xff, + 0x0e, 0xce, 0x90, 0x3d, 0x21, 0xa2, 0x67, 0xa4, 0x5b, 0x49, 0x7c, 0x05, 0xd6, 0xb1, 0x26, 0x2e, + 0x4b, 0xaa, 0xe8, 0x1e, 0xac, 0x3b, 0x12, 0x72, 0x30, 0x8e, 0xb5, 0x7c, 0x2f, 0x68, 0xee, 0x85, + 0x39, 0x18, 0x47, 0xaf, 0xc8, 0x68, 0x27, 0x24, 0xa0, 0xb5, 0x4c, 0x1c, 0x37, 0xd2, 0x99, 0x2d, + 0xfb, 0x83, 0x26, 0xad, 0xd8, 0x5d, 0x85, 0xa2, 0x92, 0x94, 0x7f, 0x65, 0x64, 0x16, 0x6f, 0x79, + 0x06, 0x29, 0x3f, 0xf4, 0xd4, 0xc6, 0x4f, 0x0f, 0x91, 0x3c, 0x42, 0x3a, 0xdf, 0xf7, 0x75, 0x41, + 0xfa, 0x6f, 0x2a, 0x93, 0x5f, 0x62, 0x07, 0xc5, 0x5e, 0x19, 0x1e, 0xa4, 0x5b, 0x72, 0x82, 0xd2, + 0x0f, 0x73, 0x09, 0xea, 0xa3, 0x12, 0x47, 0xf5, 0xd9, 0x01, 0x19, 0xee, 0x76, 0xb7, 0xd2, 0x6c, + 0xa4, 0xe1, 0x4a, 0xb0, 0x2e, 0x96, 0xf5, 0xb7, 0xca, 0x17, 0x18, 0x3f, 0x88, 0xa3, 0x1a, 0x8a, + 0x42, 0x09, 0xd6, 0x3b, 0xee, 0xe9, 0xa5, 0x50, 0x62, 0x36, 0x7d, 0x9d, 0x6c, 0x94, 0x93, 0xd6, + 0x4e, 0x15, 0x84, 0xd5, 0x53, 0x98, 0x42, 0xb8, 0x71, 0x21, 0x9e, 0x4b, 0x58, 0xbb, 0x96, 0x65, + 0x0b, 0xe3, 0x9b, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x10, 0xaa, 0xaa, 0x5e, 0x02, 0x00, + 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/tableacl/tableacl.pb.go b/internal/stackql-parser-fork/go/vt/proto/tableacl/tableacl.pb.go new file mode 100644 index 00000000..5fbfc778 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/tableacl/tableacl.pb.go @@ -0,0 +1,160 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tableacl.proto + +package tableacl + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// TableGroupSpec defines ACLs for a group of tables. +type TableGroupSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // either tables or a table name prefixes (if it ends in a %) + TableNamesOrPrefixes []string `protobuf:"bytes,2,rep,name=table_names_or_prefixes,json=tableNamesOrPrefixes,proto3" json:"table_names_or_prefixes,omitempty"` + Readers []string `protobuf:"bytes,3,rep,name=readers,proto3" json:"readers,omitempty"` + Writers []string `protobuf:"bytes,4,rep,name=writers,proto3" json:"writers,omitempty"` + Admins []string `protobuf:"bytes,5,rep,name=admins,proto3" json:"admins,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TableGroupSpec) Reset() { *m = TableGroupSpec{} } +func (m *TableGroupSpec) String() string { return proto.CompactTextString(m) } +func (*TableGroupSpec) ProtoMessage() {} +func (*TableGroupSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d0bedb248a1632e, []int{0} +} + +func (m *TableGroupSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TableGroupSpec.Unmarshal(m, b) +} +func (m *TableGroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TableGroupSpec.Marshal(b, m, deterministic) +} +func (m *TableGroupSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TableGroupSpec.Merge(m, src) +} +func (m *TableGroupSpec) XXX_Size() int { + return xxx_messageInfo_TableGroupSpec.Size(m) +} +func (m *TableGroupSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TableGroupSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TableGroupSpec proto.InternalMessageInfo + +func (m *TableGroupSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TableGroupSpec) GetTableNamesOrPrefixes() []string { + if m != nil { + return m.TableNamesOrPrefixes + } + return nil +} + +func (m *TableGroupSpec) GetReaders() []string { + if m != nil { + return m.Readers + } + return nil +} + +func (m *TableGroupSpec) GetWriters() []string { + if m != nil { + return m.Writers + } + return nil +} + +func (m *TableGroupSpec) GetAdmins() []string { + if m != nil { + return m.Admins + } + return nil +} + +type Config struct { + TableGroups []*TableGroupSpec `protobuf:"bytes,1,rep,name=table_groups,json=tableGroups,proto3" json:"table_groups,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Config) Reset() { *m = Config{} } +func (m *Config) String() string { return proto.CompactTextString(m) } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { + return fileDescriptor_7d0bedb248a1632e, []int{1} +} + +func (m *Config) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Config.Unmarshal(m, b) +} +func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Config.Marshal(b, m, deterministic) +} +func (m *Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Config.Merge(m, src) +} +func (m *Config) XXX_Size() int { + return xxx_messageInfo_Config.Size(m) +} +func (m *Config) XXX_DiscardUnknown() { + xxx_messageInfo_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Config proto.InternalMessageInfo + +func (m *Config) GetTableGroups() []*TableGroupSpec { + if m != nil { + return m.TableGroups + } + return nil +} + +func init() { + proto.RegisterType((*TableGroupSpec)(nil), "tableacl.TableGroupSpec") + proto.RegisterType((*Config)(nil), "tableacl.Config") +} + +func init() { proto.RegisterFile("tableacl.proto", fileDescriptor_7d0bedb248a1632e) } + +var fileDescriptor_7d0bedb248a1632e = []byte{ + // 232 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xc1, 0x4b, 0xc3, 0x30, + 0x14, 0xc6, 0x89, 0x9d, 0xd5, 0xbd, 0xc9, 0x0e, 0x41, 0x34, 0xc7, 0x32, 0x10, 0x7b, 0x6a, 0x40, + 0xf1, 0xe4, 0x4d, 0x11, 0x6f, 0x2a, 0xd5, 0x93, 0x97, 0x92, 0x6d, 0x6f, 0x25, 0xb0, 0x35, 0xe1, + 0xbd, 0x38, 0xfd, 0x8f, 0xfc, 0x37, 0x25, 0x69, 0x3b, 0xf0, 0xf6, 0xfd, 0xf8, 0x25, 0xe1, 0xfb, + 0x02, 0xf3, 0x60, 0x96, 0x5b, 0x34, 0xab, 0x6d, 0xe5, 0xc9, 0x05, 0x27, 0x4f, 0x47, 0x5e, 0xfc, + 0x0a, 0x98, 0x7f, 0x44, 0x78, 0x26, 0xf7, 0xe5, 0xdf, 0x3d, 0xae, 0xa4, 0x84, 0x49, 0x67, 0x76, + 0xa8, 0x44, 0x21, 0xca, 0x69, 0x9d, 0xb2, 0xbc, 0x83, 0xcb, 0x74, 0xa5, 0x89, 0xc4, 0x8d, 0xa3, + 0xc6, 0x13, 0x6e, 0xec, 0x0f, 0xb2, 0x3a, 0x2a, 0xb2, 0x72, 0x5a, 0x9f, 0x27, 0xfd, 0x12, 0xed, + 0x2b, 0xbd, 0x0d, 0x4e, 0x2a, 0x38, 0x21, 0x34, 0x6b, 0x24, 0x56, 0x59, 0x3a, 0x36, 0x62, 0x34, + 0xdf, 0x64, 0x43, 0x34, 0x93, 0xde, 0x0c, 0x28, 0x2f, 0x20, 0x37, 0xeb, 0x9d, 0xed, 0x58, 0x1d, + 0x27, 0x31, 0xd0, 0xe2, 0x09, 0xf2, 0x47, 0xd7, 0x6d, 0x6c, 0x2b, 0xef, 0xe1, 0xac, 0x2f, 0xd3, + 0xc6, 0xce, 0xac, 0x44, 0x91, 0x95, 0xb3, 0x1b, 0x55, 0x1d, 0x46, 0xfe, 0x1f, 0x54, 0xcf, 0xc2, + 0x81, 0xf9, 0xe1, 0xfa, 0xf3, 0x6a, 0x6f, 0x03, 0x32, 0x57, 0xd6, 0xe9, 0x3e, 0xe9, 0xd6, 0xe9, + 0x7d, 0xd0, 0xe9, 0x6b, 0xf4, 0xf8, 0xc8, 0x32, 0x4f, 0x7c, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, + 0x09, 0x82, 0xf5, 0x82, 0x3c, 0x01, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/internal/stackql-parser-fork/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go new file mode 100644 index 00000000..9ad5c112 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -0,0 +1,4529 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tabletmanagerdata.proto + +package tabletmanagerdata + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + logutil "github.com/stackql/stackql-parser/go/vt/proto/logutil" + query "github.com/stackql/stackql-parser/go/vt/proto/query" + replicationdata "github.com/stackql/stackql-parser/go/vt/proto/replicationdata" + topodata "github.com/stackql/stackql-parser/go/vt/proto/topodata" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type TableDefinition struct { + // the table name + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // the SQL to run to create the table + Schema string `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + // the columns in the order that will be used to dump and load the data + Columns []string `protobuf:"bytes,3,rep,name=columns,proto3" json:"columns,omitempty"` + // the primary key columns in the primary key order + PrimaryKeyColumns []string `protobuf:"bytes,4,rep,name=primary_key_columns,json=primaryKeyColumns,proto3" json:"primary_key_columns,omitempty"` + // type is either mysqlctl.TableBaseTable or mysqlctl.TableView + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + // how much space the data file takes. + DataLength uint64 `protobuf:"varint,6,opt,name=data_length,json=dataLength,proto3" json:"data_length,omitempty"` + // approximate number of rows + RowCount uint64 `protobuf:"varint,7,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` + // column names along with their types. + // NOTE: this is a superset of columns. + Fields []*query.Field `protobuf:"bytes,8,rep,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TableDefinition) Reset() { *m = TableDefinition{} } +func (m *TableDefinition) String() string { return proto.CompactTextString(m) } +func (*TableDefinition) ProtoMessage() {} +func (*TableDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{0} +} + +func (m *TableDefinition) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TableDefinition.Unmarshal(m, b) +} +func (m *TableDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TableDefinition.Marshal(b, m, deterministic) +} +func (m *TableDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_TableDefinition.Merge(m, src) +} +func (m *TableDefinition) XXX_Size() int { + return xxx_messageInfo_TableDefinition.Size(m) +} +func (m *TableDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_TableDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_TableDefinition proto.InternalMessageInfo + +func (m *TableDefinition) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TableDefinition) GetSchema() string { + if m != nil { + return m.Schema + } + return "" +} + +func (m *TableDefinition) GetColumns() []string { + if m != nil { + return m.Columns + } + return nil +} + +func (m *TableDefinition) GetPrimaryKeyColumns() []string { + if m != nil { + return m.PrimaryKeyColumns + } + return nil +} + +func (m *TableDefinition) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *TableDefinition) GetDataLength() uint64 { + if m != nil { + return m.DataLength + } + return 0 +} + +func (m *TableDefinition) GetRowCount() uint64 { + if m != nil { + return m.RowCount + } + return 0 +} + +func (m *TableDefinition) GetFields() []*query.Field { + if m != nil { + return m.Fields + } + return nil +} + +type SchemaDefinition struct { + DatabaseSchema string `protobuf:"bytes,1,opt,name=database_schema,json=databaseSchema,proto3" json:"database_schema,omitempty"` + TableDefinitions []*TableDefinition `protobuf:"bytes,2,rep,name=table_definitions,json=tableDefinitions,proto3" json:"table_definitions,omitempty"` + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SchemaDefinition) Reset() { *m = SchemaDefinition{} } +func (m *SchemaDefinition) String() string { return proto.CompactTextString(m) } +func (*SchemaDefinition) ProtoMessage() {} +func (*SchemaDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{1} +} + +func (m *SchemaDefinition) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SchemaDefinition.Unmarshal(m, b) +} +func (m *SchemaDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SchemaDefinition.Marshal(b, m, deterministic) +} +func (m *SchemaDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_SchemaDefinition.Merge(m, src) +} +func (m *SchemaDefinition) XXX_Size() int { + return xxx_messageInfo_SchemaDefinition.Size(m) +} +func (m *SchemaDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_SchemaDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_SchemaDefinition proto.InternalMessageInfo + +func (m *SchemaDefinition) GetDatabaseSchema() string { + if m != nil { + return m.DatabaseSchema + } + return "" +} + +func (m *SchemaDefinition) GetTableDefinitions() []*TableDefinition { + if m != nil { + return m.TableDefinitions + } + return nil +} + +func (m *SchemaDefinition) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type SchemaChangeResult struct { + // before_schema holds the schema before each change. + BeforeSchema *SchemaDefinition `protobuf:"bytes,1,opt,name=before_schema,json=beforeSchema,proto3" json:"before_schema,omitempty"` + // after_schema holds the schema after each change. + AfterSchema *SchemaDefinition `protobuf:"bytes,2,opt,name=after_schema,json=afterSchema,proto3" json:"after_schema,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SchemaChangeResult) Reset() { *m = SchemaChangeResult{} } +func (m *SchemaChangeResult) String() string { return proto.CompactTextString(m) } +func (*SchemaChangeResult) ProtoMessage() {} +func (*SchemaChangeResult) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{2} +} + +func (m *SchemaChangeResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SchemaChangeResult.Unmarshal(m, b) +} +func (m *SchemaChangeResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SchemaChangeResult.Marshal(b, m, deterministic) +} +func (m *SchemaChangeResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_SchemaChangeResult.Merge(m, src) +} +func (m *SchemaChangeResult) XXX_Size() int { + return xxx_messageInfo_SchemaChangeResult.Size(m) +} +func (m *SchemaChangeResult) XXX_DiscardUnknown() { + xxx_messageInfo_SchemaChangeResult.DiscardUnknown(m) +} + +var xxx_messageInfo_SchemaChangeResult proto.InternalMessageInfo + +func (m *SchemaChangeResult) GetBeforeSchema() *SchemaDefinition { + if m != nil { + return m.BeforeSchema + } + return nil +} + +func (m *SchemaChangeResult) GetAfterSchema() *SchemaDefinition { + if m != nil { + return m.AfterSchema + } + return nil +} + +// UserPermission describes a single row in the mysql.user table +// Primary key is Host+User +// PasswordChecksum is the crc64 of the password, for security reasons +type UserPermission struct { + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + User string `protobuf:"bytes,2,opt,name=user,proto3" json:"user,omitempty"` + PasswordChecksum uint64 `protobuf:"varint,3,opt,name=password_checksum,json=passwordChecksum,proto3" json:"password_checksum,omitempty"` + Privileges map[string]string `protobuf:"bytes,4,rep,name=privileges,proto3" json:"privileges,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserPermission) Reset() { *m = UserPermission{} } +func (m *UserPermission) String() string { return proto.CompactTextString(m) } +func (*UserPermission) ProtoMessage() {} +func (*UserPermission) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{3} +} + +func (m *UserPermission) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UserPermission.Unmarshal(m, b) +} +func (m *UserPermission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UserPermission.Marshal(b, m, deterministic) +} +func (m *UserPermission) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserPermission.Merge(m, src) +} +func (m *UserPermission) XXX_Size() int { + return xxx_messageInfo_UserPermission.Size(m) +} +func (m *UserPermission) XXX_DiscardUnknown() { + xxx_messageInfo_UserPermission.DiscardUnknown(m) +} + +var xxx_messageInfo_UserPermission proto.InternalMessageInfo + +func (m *UserPermission) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *UserPermission) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *UserPermission) GetPasswordChecksum() uint64 { + if m != nil { + return m.PasswordChecksum + } + return 0 +} + +func (m *UserPermission) GetPrivileges() map[string]string { + if m != nil { + return m.Privileges + } + return nil +} + +// DbPermission describes a single row in the mysql.db table +// Primary key is Host+Db+User +type DbPermission struct { + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + Db string `protobuf:"bytes,2,opt,name=db,proto3" json:"db,omitempty"` + User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"` + Privileges map[string]string `protobuf:"bytes,4,rep,name=privileges,proto3" json:"privileges,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DbPermission) Reset() { *m = DbPermission{} } +func (m *DbPermission) String() string { return proto.CompactTextString(m) } +func (*DbPermission) ProtoMessage() {} +func (*DbPermission) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{4} +} + +func (m *DbPermission) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DbPermission.Unmarshal(m, b) +} +func (m *DbPermission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DbPermission.Marshal(b, m, deterministic) +} +func (m *DbPermission) XXX_Merge(src proto.Message) { + xxx_messageInfo_DbPermission.Merge(m, src) +} +func (m *DbPermission) XXX_Size() int { + return xxx_messageInfo_DbPermission.Size(m) +} +func (m *DbPermission) XXX_DiscardUnknown() { + xxx_messageInfo_DbPermission.DiscardUnknown(m) +} + +var xxx_messageInfo_DbPermission proto.InternalMessageInfo + +func (m *DbPermission) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *DbPermission) GetDb() string { + if m != nil { + return m.Db + } + return "" +} + +func (m *DbPermission) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *DbPermission) GetPrivileges() map[string]string { + if m != nil { + return m.Privileges + } + return nil +} + +// Permissions have all the rows in mysql.{user,db} tables, +// (all rows are sorted by primary key) +type Permissions struct { + UserPermissions []*UserPermission `protobuf:"bytes,1,rep,name=user_permissions,json=userPermissions,proto3" json:"user_permissions,omitempty"` + DbPermissions []*DbPermission `protobuf:"bytes,2,rep,name=db_permissions,json=dbPermissions,proto3" json:"db_permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Permissions) Reset() { *m = Permissions{} } +func (m *Permissions) String() string { return proto.CompactTextString(m) } +func (*Permissions) ProtoMessage() {} +func (*Permissions) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{5} +} + +func (m *Permissions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Permissions.Unmarshal(m, b) +} +func (m *Permissions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Permissions.Marshal(b, m, deterministic) +} +func (m *Permissions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Permissions.Merge(m, src) +} +func (m *Permissions) XXX_Size() int { + return xxx_messageInfo_Permissions.Size(m) +} +func (m *Permissions) XXX_DiscardUnknown() { + xxx_messageInfo_Permissions.DiscardUnknown(m) +} + +var xxx_messageInfo_Permissions proto.InternalMessageInfo + +func (m *Permissions) GetUserPermissions() []*UserPermission { + if m != nil { + return m.UserPermissions + } + return nil +} + +func (m *Permissions) GetDbPermissions() []*DbPermission { + if m != nil { + return m.DbPermissions + } + return nil +} + +type PingRequest struct { + Payload string `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PingRequest) Reset() { *m = PingRequest{} } +func (m *PingRequest) String() string { return proto.CompactTextString(m) } +func (*PingRequest) ProtoMessage() {} +func (*PingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{6} +} + +func (m *PingRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PingRequest.Unmarshal(m, b) +} +func (m *PingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PingRequest.Marshal(b, m, deterministic) +} +func (m *PingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingRequest.Merge(m, src) +} +func (m *PingRequest) XXX_Size() int { + return xxx_messageInfo_PingRequest.Size(m) +} +func (m *PingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PingRequest proto.InternalMessageInfo + +func (m *PingRequest) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + +type PingResponse struct { + Payload string `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PingResponse) Reset() { *m = PingResponse{} } +func (m *PingResponse) String() string { return proto.CompactTextString(m) } +func (*PingResponse) ProtoMessage() {} +func (*PingResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{7} +} + +func (m *PingResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PingResponse.Unmarshal(m, b) +} +func (m *PingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PingResponse.Marshal(b, m, deterministic) +} +func (m *PingResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingResponse.Merge(m, src) +} +func (m *PingResponse) XXX_Size() int { + return xxx_messageInfo_PingResponse.Size(m) +} +func (m *PingResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PingResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PingResponse proto.InternalMessageInfo + +func (m *PingResponse) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + +type SleepRequest struct { + // duration is in nanoseconds + Duration int64 `protobuf:"varint,1,opt,name=duration,proto3" json:"duration,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SleepRequest) Reset() { *m = SleepRequest{} } +func (m *SleepRequest) String() string { return proto.CompactTextString(m) } +func (*SleepRequest) ProtoMessage() {} +func (*SleepRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{8} +} + +func (m *SleepRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SleepRequest.Unmarshal(m, b) +} +func (m *SleepRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SleepRequest.Marshal(b, m, deterministic) +} +func (m *SleepRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SleepRequest.Merge(m, src) +} +func (m *SleepRequest) XXX_Size() int { + return xxx_messageInfo_SleepRequest.Size(m) +} +func (m *SleepRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SleepRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SleepRequest proto.InternalMessageInfo + +func (m *SleepRequest) GetDuration() int64 { + if m != nil { + return m.Duration + } + return 0 +} + +type SleepResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SleepResponse) Reset() { *m = SleepResponse{} } +func (m *SleepResponse) String() string { return proto.CompactTextString(m) } +func (*SleepResponse) ProtoMessage() {} +func (*SleepResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{9} +} + +func (m *SleepResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SleepResponse.Unmarshal(m, b) +} +func (m *SleepResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SleepResponse.Marshal(b, m, deterministic) +} +func (m *SleepResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SleepResponse.Merge(m, src) +} +func (m *SleepResponse) XXX_Size() int { + return xxx_messageInfo_SleepResponse.Size(m) +} +func (m *SleepResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SleepResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SleepResponse proto.InternalMessageInfo + +type ExecuteHookRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Parameters []string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty"` + ExtraEnv map[string]string `protobuf:"bytes,3,rep,name=extra_env,json=extraEnv,proto3" json:"extra_env,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteHookRequest) Reset() { *m = ExecuteHookRequest{} } +func (m *ExecuteHookRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteHookRequest) ProtoMessage() {} +func (*ExecuteHookRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{10} +} + +func (m *ExecuteHookRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteHookRequest.Unmarshal(m, b) +} +func (m *ExecuteHookRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteHookRequest.Marshal(b, m, deterministic) +} +func (m *ExecuteHookRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteHookRequest.Merge(m, src) +} +func (m *ExecuteHookRequest) XXX_Size() int { + return xxx_messageInfo_ExecuteHookRequest.Size(m) +} +func (m *ExecuteHookRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteHookRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteHookRequest proto.InternalMessageInfo + +func (m *ExecuteHookRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ExecuteHookRequest) GetParameters() []string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *ExecuteHookRequest) GetExtraEnv() map[string]string { + if m != nil { + return m.ExtraEnv + } + return nil +} + +type ExecuteHookResponse struct { + ExitStatus int64 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + Stdout string `protobuf:"bytes,2,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,3,opt,name=stderr,proto3" json:"stderr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteHookResponse) Reset() { *m = ExecuteHookResponse{} } +func (m *ExecuteHookResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteHookResponse) ProtoMessage() {} +func (*ExecuteHookResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{11} +} + +func (m *ExecuteHookResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteHookResponse.Unmarshal(m, b) +} +func (m *ExecuteHookResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteHookResponse.Marshal(b, m, deterministic) +} +func (m *ExecuteHookResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteHookResponse.Merge(m, src) +} +func (m *ExecuteHookResponse) XXX_Size() int { + return xxx_messageInfo_ExecuteHookResponse.Size(m) +} +func (m *ExecuteHookResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteHookResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteHookResponse proto.InternalMessageInfo + +func (m *ExecuteHookResponse) GetExitStatus() int64 { + if m != nil { + return m.ExitStatus + } + return 0 +} + +func (m *ExecuteHookResponse) GetStdout() string { + if m != nil { + return m.Stdout + } + return "" +} + +func (m *ExecuteHookResponse) GetStderr() string { + if m != nil { + return m.Stderr + } + return "" +} + +type GetSchemaRequest struct { + Tables []string `protobuf:"bytes,1,rep,name=tables,proto3" json:"tables,omitempty"` + IncludeViews bool `protobuf:"varint,2,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` + ExcludeTables []string `protobuf:"bytes,3,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSchemaRequest) Reset() { *m = GetSchemaRequest{} } +func (m *GetSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*GetSchemaRequest) ProtoMessage() {} +func (*GetSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{12} +} + +func (m *GetSchemaRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSchemaRequest.Unmarshal(m, b) +} +func (m *GetSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSchemaRequest.Marshal(b, m, deterministic) +} +func (m *GetSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSchemaRequest.Merge(m, src) +} +func (m *GetSchemaRequest) XXX_Size() int { + return xxx_messageInfo_GetSchemaRequest.Size(m) +} +func (m *GetSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSchemaRequest proto.InternalMessageInfo + +func (m *GetSchemaRequest) GetTables() []string { + if m != nil { + return m.Tables + } + return nil +} + +func (m *GetSchemaRequest) GetIncludeViews() bool { + if m != nil { + return m.IncludeViews + } + return false +} + +func (m *GetSchemaRequest) GetExcludeTables() []string { + if m != nil { + return m.ExcludeTables + } + return nil +} + +type GetSchemaResponse struct { + SchemaDefinition *SchemaDefinition `protobuf:"bytes,1,opt,name=schema_definition,json=schemaDefinition,proto3" json:"schema_definition,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSchemaResponse) Reset() { *m = GetSchemaResponse{} } +func (m *GetSchemaResponse) String() string { return proto.CompactTextString(m) } +func (*GetSchemaResponse) ProtoMessage() {} +func (*GetSchemaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{13} +} + +func (m *GetSchemaResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSchemaResponse.Unmarshal(m, b) +} +func (m *GetSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSchemaResponse.Marshal(b, m, deterministic) +} +func (m *GetSchemaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSchemaResponse.Merge(m, src) +} +func (m *GetSchemaResponse) XXX_Size() int { + return xxx_messageInfo_GetSchemaResponse.Size(m) +} +func (m *GetSchemaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetSchemaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSchemaResponse proto.InternalMessageInfo + +func (m *GetSchemaResponse) GetSchemaDefinition() *SchemaDefinition { + if m != nil { + return m.SchemaDefinition + } + return nil +} + +type GetPermissionsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPermissionsRequest) Reset() { *m = GetPermissionsRequest{} } +func (m *GetPermissionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetPermissionsRequest) ProtoMessage() {} +func (*GetPermissionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{14} +} + +func (m *GetPermissionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPermissionsRequest.Unmarshal(m, b) +} +func (m *GetPermissionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPermissionsRequest.Marshal(b, m, deterministic) +} +func (m *GetPermissionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPermissionsRequest.Merge(m, src) +} +func (m *GetPermissionsRequest) XXX_Size() int { + return xxx_messageInfo_GetPermissionsRequest.Size(m) +} +func (m *GetPermissionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPermissionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPermissionsRequest proto.InternalMessageInfo + +type GetPermissionsResponse struct { + Permissions *Permissions `protobuf:"bytes,1,opt,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPermissionsResponse) Reset() { *m = GetPermissionsResponse{} } +func (m *GetPermissionsResponse) String() string { return proto.CompactTextString(m) } +func (*GetPermissionsResponse) ProtoMessage() {} +func (*GetPermissionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{15} +} + +func (m *GetPermissionsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPermissionsResponse.Unmarshal(m, b) +} +func (m *GetPermissionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPermissionsResponse.Marshal(b, m, deterministic) +} +func (m *GetPermissionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPermissionsResponse.Merge(m, src) +} +func (m *GetPermissionsResponse) XXX_Size() int { + return xxx_messageInfo_GetPermissionsResponse.Size(m) +} +func (m *GetPermissionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPermissionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPermissionsResponse proto.InternalMessageInfo + +func (m *GetPermissionsResponse) GetPermissions() *Permissions { + if m != nil { + return m.Permissions + } + return nil +} + +type SetReadOnlyRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetReadOnlyRequest) Reset() { *m = SetReadOnlyRequest{} } +func (m *SetReadOnlyRequest) String() string { return proto.CompactTextString(m) } +func (*SetReadOnlyRequest) ProtoMessage() {} +func (*SetReadOnlyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{16} +} + +func (m *SetReadOnlyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetReadOnlyRequest.Unmarshal(m, b) +} +func (m *SetReadOnlyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetReadOnlyRequest.Marshal(b, m, deterministic) +} +func (m *SetReadOnlyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetReadOnlyRequest.Merge(m, src) +} +func (m *SetReadOnlyRequest) XXX_Size() int { + return xxx_messageInfo_SetReadOnlyRequest.Size(m) +} +func (m *SetReadOnlyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetReadOnlyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetReadOnlyRequest proto.InternalMessageInfo + +type SetReadOnlyResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetReadOnlyResponse) Reset() { *m = SetReadOnlyResponse{} } +func (m *SetReadOnlyResponse) String() string { return proto.CompactTextString(m) } +func (*SetReadOnlyResponse) ProtoMessage() {} +func (*SetReadOnlyResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{17} +} + +func (m *SetReadOnlyResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetReadOnlyResponse.Unmarshal(m, b) +} +func (m *SetReadOnlyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetReadOnlyResponse.Marshal(b, m, deterministic) +} +func (m *SetReadOnlyResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetReadOnlyResponse.Merge(m, src) +} +func (m *SetReadOnlyResponse) XXX_Size() int { + return xxx_messageInfo_SetReadOnlyResponse.Size(m) +} +func (m *SetReadOnlyResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetReadOnlyResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetReadOnlyResponse proto.InternalMessageInfo + +type SetReadWriteRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetReadWriteRequest) Reset() { *m = SetReadWriteRequest{} } +func (m *SetReadWriteRequest) String() string { return proto.CompactTextString(m) } +func (*SetReadWriteRequest) ProtoMessage() {} +func (*SetReadWriteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{18} +} + +func (m *SetReadWriteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetReadWriteRequest.Unmarshal(m, b) +} +func (m *SetReadWriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetReadWriteRequest.Marshal(b, m, deterministic) +} +func (m *SetReadWriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetReadWriteRequest.Merge(m, src) +} +func (m *SetReadWriteRequest) XXX_Size() int { + return xxx_messageInfo_SetReadWriteRequest.Size(m) +} +func (m *SetReadWriteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetReadWriteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetReadWriteRequest proto.InternalMessageInfo + +type SetReadWriteResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetReadWriteResponse) Reset() { *m = SetReadWriteResponse{} } +func (m *SetReadWriteResponse) String() string { return proto.CompactTextString(m) } +func (*SetReadWriteResponse) ProtoMessage() {} +func (*SetReadWriteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{19} +} + +func (m *SetReadWriteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetReadWriteResponse.Unmarshal(m, b) +} +func (m *SetReadWriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetReadWriteResponse.Marshal(b, m, deterministic) +} +func (m *SetReadWriteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetReadWriteResponse.Merge(m, src) +} +func (m *SetReadWriteResponse) XXX_Size() int { + return xxx_messageInfo_SetReadWriteResponse.Size(m) +} +func (m *SetReadWriteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetReadWriteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetReadWriteResponse proto.InternalMessageInfo + +type ChangeTypeRequest struct { + TabletType topodata.TabletType `protobuf:"varint,1,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChangeTypeRequest) Reset() { *m = ChangeTypeRequest{} } +func (m *ChangeTypeRequest) String() string { return proto.CompactTextString(m) } +func (*ChangeTypeRequest) ProtoMessage() {} +func (*ChangeTypeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{20} +} + +func (m *ChangeTypeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ChangeTypeRequest.Unmarshal(m, b) +} +func (m *ChangeTypeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ChangeTypeRequest.Marshal(b, m, deterministic) +} +func (m *ChangeTypeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChangeTypeRequest.Merge(m, src) +} +func (m *ChangeTypeRequest) XXX_Size() int { + return xxx_messageInfo_ChangeTypeRequest.Size(m) +} +func (m *ChangeTypeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ChangeTypeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ChangeTypeRequest proto.InternalMessageInfo + +func (m *ChangeTypeRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +type ChangeTypeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChangeTypeResponse) Reset() { *m = ChangeTypeResponse{} } +func (m *ChangeTypeResponse) String() string { return proto.CompactTextString(m) } +func (*ChangeTypeResponse) ProtoMessage() {} +func (*ChangeTypeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{21} +} + +func (m *ChangeTypeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ChangeTypeResponse.Unmarshal(m, b) +} +func (m *ChangeTypeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ChangeTypeResponse.Marshal(b, m, deterministic) +} +func (m *ChangeTypeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChangeTypeResponse.Merge(m, src) +} +func (m *ChangeTypeResponse) XXX_Size() int { + return xxx_messageInfo_ChangeTypeResponse.Size(m) +} +func (m *ChangeTypeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ChangeTypeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ChangeTypeResponse proto.InternalMessageInfo + +type RefreshStateRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RefreshStateRequest) Reset() { *m = RefreshStateRequest{} } +func (m *RefreshStateRequest) String() string { return proto.CompactTextString(m) } +func (*RefreshStateRequest) ProtoMessage() {} +func (*RefreshStateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{22} +} + +func (m *RefreshStateRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RefreshStateRequest.Unmarshal(m, b) +} +func (m *RefreshStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RefreshStateRequest.Marshal(b, m, deterministic) +} +func (m *RefreshStateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RefreshStateRequest.Merge(m, src) +} +func (m *RefreshStateRequest) XXX_Size() int { + return xxx_messageInfo_RefreshStateRequest.Size(m) +} +func (m *RefreshStateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RefreshStateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RefreshStateRequest proto.InternalMessageInfo + +type RefreshStateResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RefreshStateResponse) Reset() { *m = RefreshStateResponse{} } +func (m *RefreshStateResponse) String() string { return proto.CompactTextString(m) } +func (*RefreshStateResponse) ProtoMessage() {} +func (*RefreshStateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{23} +} + +func (m *RefreshStateResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RefreshStateResponse.Unmarshal(m, b) +} +func (m *RefreshStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RefreshStateResponse.Marshal(b, m, deterministic) +} +func (m *RefreshStateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RefreshStateResponse.Merge(m, src) +} +func (m *RefreshStateResponse) XXX_Size() int { + return xxx_messageInfo_RefreshStateResponse.Size(m) +} +func (m *RefreshStateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RefreshStateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RefreshStateResponse proto.InternalMessageInfo + +type RunHealthCheckRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunHealthCheckRequest) Reset() { *m = RunHealthCheckRequest{} } +func (m *RunHealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*RunHealthCheckRequest) ProtoMessage() {} +func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{24} +} + +func (m *RunHealthCheckRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunHealthCheckRequest.Unmarshal(m, b) +} +func (m *RunHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunHealthCheckRequest.Marshal(b, m, deterministic) +} +func (m *RunHealthCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunHealthCheckRequest.Merge(m, src) +} +func (m *RunHealthCheckRequest) XXX_Size() int { + return xxx_messageInfo_RunHealthCheckRequest.Size(m) +} +func (m *RunHealthCheckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RunHealthCheckRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RunHealthCheckRequest proto.InternalMessageInfo + +type RunHealthCheckResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunHealthCheckResponse) Reset() { *m = RunHealthCheckResponse{} } +func (m *RunHealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*RunHealthCheckResponse) ProtoMessage() {} +func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{25} +} + +func (m *RunHealthCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RunHealthCheckResponse.Unmarshal(m, b) +} +func (m *RunHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RunHealthCheckResponse.Marshal(b, m, deterministic) +} +func (m *RunHealthCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunHealthCheckResponse.Merge(m, src) +} +func (m *RunHealthCheckResponse) XXX_Size() int { + return xxx_messageInfo_RunHealthCheckResponse.Size(m) +} +func (m *RunHealthCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RunHealthCheckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RunHealthCheckResponse proto.InternalMessageInfo + +type IgnoreHealthErrorRequest struct { + Pattern string `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IgnoreHealthErrorRequest) Reset() { *m = IgnoreHealthErrorRequest{} } +func (m *IgnoreHealthErrorRequest) String() string { return proto.CompactTextString(m) } +func (*IgnoreHealthErrorRequest) ProtoMessage() {} +func (*IgnoreHealthErrorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{26} +} + +func (m *IgnoreHealthErrorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IgnoreHealthErrorRequest.Unmarshal(m, b) +} +func (m *IgnoreHealthErrorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IgnoreHealthErrorRequest.Marshal(b, m, deterministic) +} +func (m *IgnoreHealthErrorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IgnoreHealthErrorRequest.Merge(m, src) +} +func (m *IgnoreHealthErrorRequest) XXX_Size() int { + return xxx_messageInfo_IgnoreHealthErrorRequest.Size(m) +} +func (m *IgnoreHealthErrorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IgnoreHealthErrorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_IgnoreHealthErrorRequest proto.InternalMessageInfo + +func (m *IgnoreHealthErrorRequest) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +type IgnoreHealthErrorResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IgnoreHealthErrorResponse) Reset() { *m = IgnoreHealthErrorResponse{} } +func (m *IgnoreHealthErrorResponse) String() string { return proto.CompactTextString(m) } +func (*IgnoreHealthErrorResponse) ProtoMessage() {} +func (*IgnoreHealthErrorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{27} +} + +func (m *IgnoreHealthErrorResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IgnoreHealthErrorResponse.Unmarshal(m, b) +} +func (m *IgnoreHealthErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IgnoreHealthErrorResponse.Marshal(b, m, deterministic) +} +func (m *IgnoreHealthErrorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_IgnoreHealthErrorResponse.Merge(m, src) +} +func (m *IgnoreHealthErrorResponse) XXX_Size() int { + return xxx_messageInfo_IgnoreHealthErrorResponse.Size(m) +} +func (m *IgnoreHealthErrorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_IgnoreHealthErrorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_IgnoreHealthErrorResponse proto.InternalMessageInfo + +type ReloadSchemaRequest struct { + // wait_position allows scheduling a schema reload to occur after a + // given DDL has replicated to this server, by specifying a replication + // position to wait for. Leave empty to trigger the reload immediately. + WaitPosition string `protobuf:"bytes,1,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReloadSchemaRequest) Reset() { *m = ReloadSchemaRequest{} } +func (m *ReloadSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*ReloadSchemaRequest) ProtoMessage() {} +func (*ReloadSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{28} +} + +func (m *ReloadSchemaRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReloadSchemaRequest.Unmarshal(m, b) +} +func (m *ReloadSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReloadSchemaRequest.Marshal(b, m, deterministic) +} +func (m *ReloadSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReloadSchemaRequest.Merge(m, src) +} +func (m *ReloadSchemaRequest) XXX_Size() int { + return xxx_messageInfo_ReloadSchemaRequest.Size(m) +} +func (m *ReloadSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReloadSchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReloadSchemaRequest proto.InternalMessageInfo + +func (m *ReloadSchemaRequest) GetWaitPosition() string { + if m != nil { + return m.WaitPosition + } + return "" +} + +type ReloadSchemaResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReloadSchemaResponse) Reset() { *m = ReloadSchemaResponse{} } +func (m *ReloadSchemaResponse) String() string { return proto.CompactTextString(m) } +func (*ReloadSchemaResponse) ProtoMessage() {} +func (*ReloadSchemaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{29} +} + +func (m *ReloadSchemaResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReloadSchemaResponse.Unmarshal(m, b) +} +func (m *ReloadSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReloadSchemaResponse.Marshal(b, m, deterministic) +} +func (m *ReloadSchemaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReloadSchemaResponse.Merge(m, src) +} +func (m *ReloadSchemaResponse) XXX_Size() int { + return xxx_messageInfo_ReloadSchemaResponse.Size(m) +} +func (m *ReloadSchemaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReloadSchemaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReloadSchemaResponse proto.InternalMessageInfo + +type PreflightSchemaRequest struct { + Changes []string `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PreflightSchemaRequest) Reset() { *m = PreflightSchemaRequest{} } +func (m *PreflightSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*PreflightSchemaRequest) ProtoMessage() {} +func (*PreflightSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{30} +} + +func (m *PreflightSchemaRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PreflightSchemaRequest.Unmarshal(m, b) +} +func (m *PreflightSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PreflightSchemaRequest.Marshal(b, m, deterministic) +} +func (m *PreflightSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreflightSchemaRequest.Merge(m, src) +} +func (m *PreflightSchemaRequest) XXX_Size() int { + return xxx_messageInfo_PreflightSchemaRequest.Size(m) +} +func (m *PreflightSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PreflightSchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PreflightSchemaRequest proto.InternalMessageInfo + +func (m *PreflightSchemaRequest) GetChanges() []string { + if m != nil { + return m.Changes + } + return nil +} + +type PreflightSchemaResponse struct { + // change_results has for each change the schema before and after it. + // The number of elements is identical to the length of "changes" in the request. + ChangeResults []*SchemaChangeResult `protobuf:"bytes,1,rep,name=change_results,json=changeResults,proto3" json:"change_results,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PreflightSchemaResponse) Reset() { *m = PreflightSchemaResponse{} } +func (m *PreflightSchemaResponse) String() string { return proto.CompactTextString(m) } +func (*PreflightSchemaResponse) ProtoMessage() {} +func (*PreflightSchemaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{31} +} + +func (m *PreflightSchemaResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PreflightSchemaResponse.Unmarshal(m, b) +} +func (m *PreflightSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PreflightSchemaResponse.Marshal(b, m, deterministic) +} +func (m *PreflightSchemaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreflightSchemaResponse.Merge(m, src) +} +func (m *PreflightSchemaResponse) XXX_Size() int { + return xxx_messageInfo_PreflightSchemaResponse.Size(m) +} +func (m *PreflightSchemaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PreflightSchemaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PreflightSchemaResponse proto.InternalMessageInfo + +func (m *PreflightSchemaResponse) GetChangeResults() []*SchemaChangeResult { + if m != nil { + return m.ChangeResults + } + return nil +} + +type ApplySchemaRequest struct { + Sql string `protobuf:"bytes,1,opt,name=sql,proto3" json:"sql,omitempty"` + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + AllowReplication bool `protobuf:"varint,3,opt,name=allow_replication,json=allowReplication,proto3" json:"allow_replication,omitempty"` + BeforeSchema *SchemaDefinition `protobuf:"bytes,4,opt,name=before_schema,json=beforeSchema,proto3" json:"before_schema,omitempty"` + AfterSchema *SchemaDefinition `protobuf:"bytes,5,opt,name=after_schema,json=afterSchema,proto3" json:"after_schema,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplySchemaRequest) Reset() { *m = ApplySchemaRequest{} } +func (m *ApplySchemaRequest) String() string { return proto.CompactTextString(m) } +func (*ApplySchemaRequest) ProtoMessage() {} +func (*ApplySchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{32} +} + +func (m *ApplySchemaRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplySchemaRequest.Unmarshal(m, b) +} +func (m *ApplySchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplySchemaRequest.Marshal(b, m, deterministic) +} +func (m *ApplySchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplySchemaRequest.Merge(m, src) +} +func (m *ApplySchemaRequest) XXX_Size() int { + return xxx_messageInfo_ApplySchemaRequest.Size(m) +} +func (m *ApplySchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ApplySchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplySchemaRequest proto.InternalMessageInfo + +func (m *ApplySchemaRequest) GetSql() string { + if m != nil { + return m.Sql + } + return "" +} + +func (m *ApplySchemaRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +func (m *ApplySchemaRequest) GetAllowReplication() bool { + if m != nil { + return m.AllowReplication + } + return false +} + +func (m *ApplySchemaRequest) GetBeforeSchema() *SchemaDefinition { + if m != nil { + return m.BeforeSchema + } + return nil +} + +func (m *ApplySchemaRequest) GetAfterSchema() *SchemaDefinition { + if m != nil { + return m.AfterSchema + } + return nil +} + +type ApplySchemaResponse struct { + BeforeSchema *SchemaDefinition `protobuf:"bytes,1,opt,name=before_schema,json=beforeSchema,proto3" json:"before_schema,omitempty"` + AfterSchema *SchemaDefinition `protobuf:"bytes,2,opt,name=after_schema,json=afterSchema,proto3" json:"after_schema,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplySchemaResponse) Reset() { *m = ApplySchemaResponse{} } +func (m *ApplySchemaResponse) String() string { return proto.CompactTextString(m) } +func (*ApplySchemaResponse) ProtoMessage() {} +func (*ApplySchemaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{33} +} + +func (m *ApplySchemaResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplySchemaResponse.Unmarshal(m, b) +} +func (m *ApplySchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplySchemaResponse.Marshal(b, m, deterministic) +} +func (m *ApplySchemaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplySchemaResponse.Merge(m, src) +} +func (m *ApplySchemaResponse) XXX_Size() int { + return xxx_messageInfo_ApplySchemaResponse.Size(m) +} +func (m *ApplySchemaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ApplySchemaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplySchemaResponse proto.InternalMessageInfo + +func (m *ApplySchemaResponse) GetBeforeSchema() *SchemaDefinition { + if m != nil { + return m.BeforeSchema + } + return nil +} + +func (m *ApplySchemaResponse) GetAfterSchema() *SchemaDefinition { + if m != nil { + return m.AfterSchema + } + return nil +} + +type LockTablesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LockTablesRequest) Reset() { *m = LockTablesRequest{} } +func (m *LockTablesRequest) String() string { return proto.CompactTextString(m) } +func (*LockTablesRequest) ProtoMessage() {} +func (*LockTablesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{34} +} + +func (m *LockTablesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LockTablesRequest.Unmarshal(m, b) +} +func (m *LockTablesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LockTablesRequest.Marshal(b, m, deterministic) +} +func (m *LockTablesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LockTablesRequest.Merge(m, src) +} +func (m *LockTablesRequest) XXX_Size() int { + return xxx_messageInfo_LockTablesRequest.Size(m) +} +func (m *LockTablesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LockTablesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LockTablesRequest proto.InternalMessageInfo + +type LockTablesResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LockTablesResponse) Reset() { *m = LockTablesResponse{} } +func (m *LockTablesResponse) String() string { return proto.CompactTextString(m) } +func (*LockTablesResponse) ProtoMessage() {} +func (*LockTablesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{35} +} + +func (m *LockTablesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LockTablesResponse.Unmarshal(m, b) +} +func (m *LockTablesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LockTablesResponse.Marshal(b, m, deterministic) +} +func (m *LockTablesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LockTablesResponse.Merge(m, src) +} +func (m *LockTablesResponse) XXX_Size() int { + return xxx_messageInfo_LockTablesResponse.Size(m) +} +func (m *LockTablesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LockTablesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LockTablesResponse proto.InternalMessageInfo + +type UnlockTablesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UnlockTablesRequest) Reset() { *m = UnlockTablesRequest{} } +func (m *UnlockTablesRequest) String() string { return proto.CompactTextString(m) } +func (*UnlockTablesRequest) ProtoMessage() {} +func (*UnlockTablesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{36} +} + +func (m *UnlockTablesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UnlockTablesRequest.Unmarshal(m, b) +} +func (m *UnlockTablesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UnlockTablesRequest.Marshal(b, m, deterministic) +} +func (m *UnlockTablesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UnlockTablesRequest.Merge(m, src) +} +func (m *UnlockTablesRequest) XXX_Size() int { + return xxx_messageInfo_UnlockTablesRequest.Size(m) +} +func (m *UnlockTablesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UnlockTablesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UnlockTablesRequest proto.InternalMessageInfo + +type UnlockTablesResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UnlockTablesResponse) Reset() { *m = UnlockTablesResponse{} } +func (m *UnlockTablesResponse) String() string { return proto.CompactTextString(m) } +func (*UnlockTablesResponse) ProtoMessage() {} +func (*UnlockTablesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{37} +} + +func (m *UnlockTablesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UnlockTablesResponse.Unmarshal(m, b) +} +func (m *UnlockTablesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UnlockTablesResponse.Marshal(b, m, deterministic) +} +func (m *UnlockTablesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UnlockTablesResponse.Merge(m, src) +} +func (m *UnlockTablesResponse) XXX_Size() int { + return xxx_messageInfo_UnlockTablesResponse.Size(m) +} +func (m *UnlockTablesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UnlockTablesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UnlockTablesResponse proto.InternalMessageInfo + +type ExecuteFetchAsDbaRequest struct { + Query []byte `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + DbName string `protobuf:"bytes,2,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` + MaxRows uint64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` + DisableBinlogs bool `protobuf:"varint,4,opt,name=disable_binlogs,json=disableBinlogs,proto3" json:"disable_binlogs,omitempty"` + ReloadSchema bool `protobuf:"varint,5,opt,name=reload_schema,json=reloadSchema,proto3" json:"reload_schema,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteFetchAsDbaRequest) Reset() { *m = ExecuteFetchAsDbaRequest{} } +func (m *ExecuteFetchAsDbaRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteFetchAsDbaRequest) ProtoMessage() {} +func (*ExecuteFetchAsDbaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{38} +} + +func (m *ExecuteFetchAsDbaRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteFetchAsDbaRequest.Unmarshal(m, b) +} +func (m *ExecuteFetchAsDbaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteFetchAsDbaRequest.Marshal(b, m, deterministic) +} +func (m *ExecuteFetchAsDbaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteFetchAsDbaRequest.Merge(m, src) +} +func (m *ExecuteFetchAsDbaRequest) XXX_Size() int { + return xxx_messageInfo_ExecuteFetchAsDbaRequest.Size(m) +} +func (m *ExecuteFetchAsDbaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteFetchAsDbaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteFetchAsDbaRequest proto.InternalMessageInfo + +func (m *ExecuteFetchAsDbaRequest) GetQuery() []byte { + if m != nil { + return m.Query + } + return nil +} + +func (m *ExecuteFetchAsDbaRequest) GetDbName() string { + if m != nil { + return m.DbName + } + return "" +} + +func (m *ExecuteFetchAsDbaRequest) GetMaxRows() uint64 { + if m != nil { + return m.MaxRows + } + return 0 +} + +func (m *ExecuteFetchAsDbaRequest) GetDisableBinlogs() bool { + if m != nil { + return m.DisableBinlogs + } + return false +} + +func (m *ExecuteFetchAsDbaRequest) GetReloadSchema() bool { + if m != nil { + return m.ReloadSchema + } + return false +} + +type ExecuteFetchAsDbaResponse struct { + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteFetchAsDbaResponse) Reset() { *m = ExecuteFetchAsDbaResponse{} } +func (m *ExecuteFetchAsDbaResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteFetchAsDbaResponse) ProtoMessage() {} +func (*ExecuteFetchAsDbaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{39} +} + +func (m *ExecuteFetchAsDbaResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteFetchAsDbaResponse.Unmarshal(m, b) +} +func (m *ExecuteFetchAsDbaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteFetchAsDbaResponse.Marshal(b, m, deterministic) +} +func (m *ExecuteFetchAsDbaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteFetchAsDbaResponse.Merge(m, src) +} +func (m *ExecuteFetchAsDbaResponse) XXX_Size() int { + return xxx_messageInfo_ExecuteFetchAsDbaResponse.Size(m) +} +func (m *ExecuteFetchAsDbaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteFetchAsDbaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteFetchAsDbaResponse proto.InternalMessageInfo + +func (m *ExecuteFetchAsDbaResponse) GetResult() *query.QueryResult { + if m != nil { + return m.Result + } + return nil +} + +type ExecuteFetchAsAllPrivsRequest struct { + Query []byte `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + DbName string `protobuf:"bytes,2,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` + MaxRows uint64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` + ReloadSchema bool `protobuf:"varint,4,opt,name=reload_schema,json=reloadSchema,proto3" json:"reload_schema,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteFetchAsAllPrivsRequest) Reset() { *m = ExecuteFetchAsAllPrivsRequest{} } +func (m *ExecuteFetchAsAllPrivsRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteFetchAsAllPrivsRequest) ProtoMessage() {} +func (*ExecuteFetchAsAllPrivsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{40} +} + +func (m *ExecuteFetchAsAllPrivsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteFetchAsAllPrivsRequest.Unmarshal(m, b) +} +func (m *ExecuteFetchAsAllPrivsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteFetchAsAllPrivsRequest.Marshal(b, m, deterministic) +} +func (m *ExecuteFetchAsAllPrivsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteFetchAsAllPrivsRequest.Merge(m, src) +} +func (m *ExecuteFetchAsAllPrivsRequest) XXX_Size() int { + return xxx_messageInfo_ExecuteFetchAsAllPrivsRequest.Size(m) +} +func (m *ExecuteFetchAsAllPrivsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteFetchAsAllPrivsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteFetchAsAllPrivsRequest proto.InternalMessageInfo + +func (m *ExecuteFetchAsAllPrivsRequest) GetQuery() []byte { + if m != nil { + return m.Query + } + return nil +} + +func (m *ExecuteFetchAsAllPrivsRequest) GetDbName() string { + if m != nil { + return m.DbName + } + return "" +} + +func (m *ExecuteFetchAsAllPrivsRequest) GetMaxRows() uint64 { + if m != nil { + return m.MaxRows + } + return 0 +} + +func (m *ExecuteFetchAsAllPrivsRequest) GetReloadSchema() bool { + if m != nil { + return m.ReloadSchema + } + return false +} + +type ExecuteFetchAsAllPrivsResponse struct { + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteFetchAsAllPrivsResponse) Reset() { *m = ExecuteFetchAsAllPrivsResponse{} } +func (m *ExecuteFetchAsAllPrivsResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteFetchAsAllPrivsResponse) ProtoMessage() {} +func (*ExecuteFetchAsAllPrivsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{41} +} + +func (m *ExecuteFetchAsAllPrivsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteFetchAsAllPrivsResponse.Unmarshal(m, b) +} +func (m *ExecuteFetchAsAllPrivsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteFetchAsAllPrivsResponse.Marshal(b, m, deterministic) +} +func (m *ExecuteFetchAsAllPrivsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteFetchAsAllPrivsResponse.Merge(m, src) +} +func (m *ExecuteFetchAsAllPrivsResponse) XXX_Size() int { + return xxx_messageInfo_ExecuteFetchAsAllPrivsResponse.Size(m) +} +func (m *ExecuteFetchAsAllPrivsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteFetchAsAllPrivsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteFetchAsAllPrivsResponse proto.InternalMessageInfo + +func (m *ExecuteFetchAsAllPrivsResponse) GetResult() *query.QueryResult { + if m != nil { + return m.Result + } + return nil +} + +type ExecuteFetchAsAppRequest struct { + Query []byte `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + MaxRows uint64 `protobuf:"varint,2,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteFetchAsAppRequest) Reset() { *m = ExecuteFetchAsAppRequest{} } +func (m *ExecuteFetchAsAppRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteFetchAsAppRequest) ProtoMessage() {} +func (*ExecuteFetchAsAppRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{42} +} + +func (m *ExecuteFetchAsAppRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteFetchAsAppRequest.Unmarshal(m, b) +} +func (m *ExecuteFetchAsAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteFetchAsAppRequest.Marshal(b, m, deterministic) +} +func (m *ExecuteFetchAsAppRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteFetchAsAppRequest.Merge(m, src) +} +func (m *ExecuteFetchAsAppRequest) XXX_Size() int { + return xxx_messageInfo_ExecuteFetchAsAppRequest.Size(m) +} +func (m *ExecuteFetchAsAppRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteFetchAsAppRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteFetchAsAppRequest proto.InternalMessageInfo + +func (m *ExecuteFetchAsAppRequest) GetQuery() []byte { + if m != nil { + return m.Query + } + return nil +} + +func (m *ExecuteFetchAsAppRequest) GetMaxRows() uint64 { + if m != nil { + return m.MaxRows + } + return 0 +} + +type ExecuteFetchAsAppResponse struct { + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteFetchAsAppResponse) Reset() { *m = ExecuteFetchAsAppResponse{} } +func (m *ExecuteFetchAsAppResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteFetchAsAppResponse) ProtoMessage() {} +func (*ExecuteFetchAsAppResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{43} +} + +func (m *ExecuteFetchAsAppResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteFetchAsAppResponse.Unmarshal(m, b) +} +func (m *ExecuteFetchAsAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteFetchAsAppResponse.Marshal(b, m, deterministic) +} +func (m *ExecuteFetchAsAppResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteFetchAsAppResponse.Merge(m, src) +} +func (m *ExecuteFetchAsAppResponse) XXX_Size() int { + return xxx_messageInfo_ExecuteFetchAsAppResponse.Size(m) +} +func (m *ExecuteFetchAsAppResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteFetchAsAppResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteFetchAsAppResponse proto.InternalMessageInfo + +func (m *ExecuteFetchAsAppResponse) GetResult() *query.QueryResult { + if m != nil { + return m.Result + } + return nil +} + +type ReplicationStatusRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReplicationStatusRequest) Reset() { *m = ReplicationStatusRequest{} } +func (m *ReplicationStatusRequest) String() string { return proto.CompactTextString(m) } +func (*ReplicationStatusRequest) ProtoMessage() {} +func (*ReplicationStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{44} +} + +func (m *ReplicationStatusRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReplicationStatusRequest.Unmarshal(m, b) +} +func (m *ReplicationStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReplicationStatusRequest.Marshal(b, m, deterministic) +} +func (m *ReplicationStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationStatusRequest.Merge(m, src) +} +func (m *ReplicationStatusRequest) XXX_Size() int { + return xxx_messageInfo_ReplicationStatusRequest.Size(m) +} +func (m *ReplicationStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationStatusRequest proto.InternalMessageInfo + +type ReplicationStatusResponse struct { + Status *replicationdata.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReplicationStatusResponse) Reset() { *m = ReplicationStatusResponse{} } +func (m *ReplicationStatusResponse) String() string { return proto.CompactTextString(m) } +func (*ReplicationStatusResponse) ProtoMessage() {} +func (*ReplicationStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{45} +} + +func (m *ReplicationStatusResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReplicationStatusResponse.Unmarshal(m, b) +} +func (m *ReplicationStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReplicationStatusResponse.Marshal(b, m, deterministic) +} +func (m *ReplicationStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationStatusResponse.Merge(m, src) +} +func (m *ReplicationStatusResponse) XXX_Size() int { + return xxx_messageInfo_ReplicationStatusResponse.Size(m) +} +func (m *ReplicationStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationStatusResponse proto.InternalMessageInfo + +func (m *ReplicationStatusResponse) GetStatus() *replicationdata.Status { + if m != nil { + return m.Status + } + return nil +} + +type MasterPositionRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MasterPositionRequest) Reset() { *m = MasterPositionRequest{} } +func (m *MasterPositionRequest) String() string { return proto.CompactTextString(m) } +func (*MasterPositionRequest) ProtoMessage() {} +func (*MasterPositionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{46} +} + +func (m *MasterPositionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MasterPositionRequest.Unmarshal(m, b) +} +func (m *MasterPositionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MasterPositionRequest.Marshal(b, m, deterministic) +} +func (m *MasterPositionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MasterPositionRequest.Merge(m, src) +} +func (m *MasterPositionRequest) XXX_Size() int { + return xxx_messageInfo_MasterPositionRequest.Size(m) +} +func (m *MasterPositionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MasterPositionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MasterPositionRequest proto.InternalMessageInfo + +type MasterPositionResponse struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MasterPositionResponse) Reset() { *m = MasterPositionResponse{} } +func (m *MasterPositionResponse) String() string { return proto.CompactTextString(m) } +func (*MasterPositionResponse) ProtoMessage() {} +func (*MasterPositionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{47} +} + +func (m *MasterPositionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MasterPositionResponse.Unmarshal(m, b) +} +func (m *MasterPositionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MasterPositionResponse.Marshal(b, m, deterministic) +} +func (m *MasterPositionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MasterPositionResponse.Merge(m, src) +} +func (m *MasterPositionResponse) XXX_Size() int { + return xxx_messageInfo_MasterPositionResponse.Size(m) +} +func (m *MasterPositionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MasterPositionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MasterPositionResponse proto.InternalMessageInfo + +func (m *MasterPositionResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +type WaitForPositionRequest struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WaitForPositionRequest) Reset() { *m = WaitForPositionRequest{} } +func (m *WaitForPositionRequest) String() string { return proto.CompactTextString(m) } +func (*WaitForPositionRequest) ProtoMessage() {} +func (*WaitForPositionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{48} +} + +func (m *WaitForPositionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WaitForPositionRequest.Unmarshal(m, b) +} +func (m *WaitForPositionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WaitForPositionRequest.Marshal(b, m, deterministic) +} +func (m *WaitForPositionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WaitForPositionRequest.Merge(m, src) +} +func (m *WaitForPositionRequest) XXX_Size() int { + return xxx_messageInfo_WaitForPositionRequest.Size(m) +} +func (m *WaitForPositionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WaitForPositionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WaitForPositionRequest proto.InternalMessageInfo + +func (m *WaitForPositionRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +type WaitForPositionResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WaitForPositionResponse) Reset() { *m = WaitForPositionResponse{} } +func (m *WaitForPositionResponse) String() string { return proto.CompactTextString(m) } +func (*WaitForPositionResponse) ProtoMessage() {} +func (*WaitForPositionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{49} +} + +func (m *WaitForPositionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WaitForPositionResponse.Unmarshal(m, b) +} +func (m *WaitForPositionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WaitForPositionResponse.Marshal(b, m, deterministic) +} +func (m *WaitForPositionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WaitForPositionResponse.Merge(m, src) +} +func (m *WaitForPositionResponse) XXX_Size() int { + return xxx_messageInfo_WaitForPositionResponse.Size(m) +} +func (m *WaitForPositionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WaitForPositionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WaitForPositionResponse proto.InternalMessageInfo + +type StopReplicationRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopReplicationRequest) Reset() { *m = StopReplicationRequest{} } +func (m *StopReplicationRequest) String() string { return proto.CompactTextString(m) } +func (*StopReplicationRequest) ProtoMessage() {} +func (*StopReplicationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{50} +} + +func (m *StopReplicationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopReplicationRequest.Unmarshal(m, b) +} +func (m *StopReplicationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopReplicationRequest.Marshal(b, m, deterministic) +} +func (m *StopReplicationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopReplicationRequest.Merge(m, src) +} +func (m *StopReplicationRequest) XXX_Size() int { + return xxx_messageInfo_StopReplicationRequest.Size(m) +} +func (m *StopReplicationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopReplicationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopReplicationRequest proto.InternalMessageInfo + +type StopReplicationResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopReplicationResponse) Reset() { *m = StopReplicationResponse{} } +func (m *StopReplicationResponse) String() string { return proto.CompactTextString(m) } +func (*StopReplicationResponse) ProtoMessage() {} +func (*StopReplicationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{51} +} + +func (m *StopReplicationResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopReplicationResponse.Unmarshal(m, b) +} +func (m *StopReplicationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopReplicationResponse.Marshal(b, m, deterministic) +} +func (m *StopReplicationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopReplicationResponse.Merge(m, src) +} +func (m *StopReplicationResponse) XXX_Size() int { + return xxx_messageInfo_StopReplicationResponse.Size(m) +} +func (m *StopReplicationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopReplicationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopReplicationResponse proto.InternalMessageInfo + +type StopReplicationMinimumRequest struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + WaitTimeout int64 `protobuf:"varint,2,opt,name=wait_timeout,json=waitTimeout,proto3" json:"wait_timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopReplicationMinimumRequest) Reset() { *m = StopReplicationMinimumRequest{} } +func (m *StopReplicationMinimumRequest) String() string { return proto.CompactTextString(m) } +func (*StopReplicationMinimumRequest) ProtoMessage() {} +func (*StopReplicationMinimumRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{52} +} + +func (m *StopReplicationMinimumRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopReplicationMinimumRequest.Unmarshal(m, b) +} +func (m *StopReplicationMinimumRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopReplicationMinimumRequest.Marshal(b, m, deterministic) +} +func (m *StopReplicationMinimumRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopReplicationMinimumRequest.Merge(m, src) +} +func (m *StopReplicationMinimumRequest) XXX_Size() int { + return xxx_messageInfo_StopReplicationMinimumRequest.Size(m) +} +func (m *StopReplicationMinimumRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopReplicationMinimumRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopReplicationMinimumRequest proto.InternalMessageInfo + +func (m *StopReplicationMinimumRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *StopReplicationMinimumRequest) GetWaitTimeout() int64 { + if m != nil { + return m.WaitTimeout + } + return 0 +} + +type StopReplicationMinimumResponse struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopReplicationMinimumResponse) Reset() { *m = StopReplicationMinimumResponse{} } +func (m *StopReplicationMinimumResponse) String() string { return proto.CompactTextString(m) } +func (*StopReplicationMinimumResponse) ProtoMessage() {} +func (*StopReplicationMinimumResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{53} +} + +func (m *StopReplicationMinimumResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopReplicationMinimumResponse.Unmarshal(m, b) +} +func (m *StopReplicationMinimumResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopReplicationMinimumResponse.Marshal(b, m, deterministic) +} +func (m *StopReplicationMinimumResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopReplicationMinimumResponse.Merge(m, src) +} +func (m *StopReplicationMinimumResponse) XXX_Size() int { + return xxx_messageInfo_StopReplicationMinimumResponse.Size(m) +} +func (m *StopReplicationMinimumResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopReplicationMinimumResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopReplicationMinimumResponse proto.InternalMessageInfo + +func (m *StopReplicationMinimumResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +type StartReplicationRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartReplicationRequest) Reset() { *m = StartReplicationRequest{} } +func (m *StartReplicationRequest) String() string { return proto.CompactTextString(m) } +func (*StartReplicationRequest) ProtoMessage() {} +func (*StartReplicationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{54} +} + +func (m *StartReplicationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartReplicationRequest.Unmarshal(m, b) +} +func (m *StartReplicationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartReplicationRequest.Marshal(b, m, deterministic) +} +func (m *StartReplicationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartReplicationRequest.Merge(m, src) +} +func (m *StartReplicationRequest) XXX_Size() int { + return xxx_messageInfo_StartReplicationRequest.Size(m) +} +func (m *StartReplicationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartReplicationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartReplicationRequest proto.InternalMessageInfo + +type StartReplicationResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartReplicationResponse) Reset() { *m = StartReplicationResponse{} } +func (m *StartReplicationResponse) String() string { return proto.CompactTextString(m) } +func (*StartReplicationResponse) ProtoMessage() {} +func (*StartReplicationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{55} +} + +func (m *StartReplicationResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartReplicationResponse.Unmarshal(m, b) +} +func (m *StartReplicationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartReplicationResponse.Marshal(b, m, deterministic) +} +func (m *StartReplicationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartReplicationResponse.Merge(m, src) +} +func (m *StartReplicationResponse) XXX_Size() int { + return xxx_messageInfo_StartReplicationResponse.Size(m) +} +func (m *StartReplicationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartReplicationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartReplicationResponse proto.InternalMessageInfo + +type StartReplicationUntilAfterRequest struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + WaitTimeout int64 `protobuf:"varint,2,opt,name=wait_timeout,json=waitTimeout,proto3" json:"wait_timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartReplicationUntilAfterRequest) Reset() { *m = StartReplicationUntilAfterRequest{} } +func (m *StartReplicationUntilAfterRequest) String() string { return proto.CompactTextString(m) } +func (*StartReplicationUntilAfterRequest) ProtoMessage() {} +func (*StartReplicationUntilAfterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{56} +} + +func (m *StartReplicationUntilAfterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartReplicationUntilAfterRequest.Unmarshal(m, b) +} +func (m *StartReplicationUntilAfterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartReplicationUntilAfterRequest.Marshal(b, m, deterministic) +} +func (m *StartReplicationUntilAfterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartReplicationUntilAfterRequest.Merge(m, src) +} +func (m *StartReplicationUntilAfterRequest) XXX_Size() int { + return xxx_messageInfo_StartReplicationUntilAfterRequest.Size(m) +} +func (m *StartReplicationUntilAfterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartReplicationUntilAfterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartReplicationUntilAfterRequest proto.InternalMessageInfo + +func (m *StartReplicationUntilAfterRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *StartReplicationUntilAfterRequest) GetWaitTimeout() int64 { + if m != nil { + return m.WaitTimeout + } + return 0 +} + +type StartReplicationUntilAfterResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartReplicationUntilAfterResponse) Reset() { *m = StartReplicationUntilAfterResponse{} } +func (m *StartReplicationUntilAfterResponse) String() string { return proto.CompactTextString(m) } +func (*StartReplicationUntilAfterResponse) ProtoMessage() {} +func (*StartReplicationUntilAfterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{57} +} + +func (m *StartReplicationUntilAfterResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartReplicationUntilAfterResponse.Unmarshal(m, b) +} +func (m *StartReplicationUntilAfterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartReplicationUntilAfterResponse.Marshal(b, m, deterministic) +} +func (m *StartReplicationUntilAfterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartReplicationUntilAfterResponse.Merge(m, src) +} +func (m *StartReplicationUntilAfterResponse) XXX_Size() int { + return xxx_messageInfo_StartReplicationUntilAfterResponse.Size(m) +} +func (m *StartReplicationUntilAfterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartReplicationUntilAfterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartReplicationUntilAfterResponse proto.InternalMessageInfo + +type GetReplicasRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetReplicasRequest) Reset() { *m = GetReplicasRequest{} } +func (m *GetReplicasRequest) String() string { return proto.CompactTextString(m) } +func (*GetReplicasRequest) ProtoMessage() {} +func (*GetReplicasRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{58} +} + +func (m *GetReplicasRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetReplicasRequest.Unmarshal(m, b) +} +func (m *GetReplicasRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetReplicasRequest.Marshal(b, m, deterministic) +} +func (m *GetReplicasRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetReplicasRequest.Merge(m, src) +} +func (m *GetReplicasRequest) XXX_Size() int { + return xxx_messageInfo_GetReplicasRequest.Size(m) +} +func (m *GetReplicasRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetReplicasRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetReplicasRequest proto.InternalMessageInfo + +type GetReplicasResponse struct { + Addrs []string `protobuf:"bytes,1,rep,name=addrs,proto3" json:"addrs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetReplicasResponse) Reset() { *m = GetReplicasResponse{} } +func (m *GetReplicasResponse) String() string { return proto.CompactTextString(m) } +func (*GetReplicasResponse) ProtoMessage() {} +func (*GetReplicasResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{59} +} + +func (m *GetReplicasResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetReplicasResponse.Unmarshal(m, b) +} +func (m *GetReplicasResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetReplicasResponse.Marshal(b, m, deterministic) +} +func (m *GetReplicasResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetReplicasResponse.Merge(m, src) +} +func (m *GetReplicasResponse) XXX_Size() int { + return xxx_messageInfo_GetReplicasResponse.Size(m) +} +func (m *GetReplicasResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetReplicasResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetReplicasResponse proto.InternalMessageInfo + +func (m *GetReplicasResponse) GetAddrs() []string { + if m != nil { + return m.Addrs + } + return nil +} + +type ResetReplicationRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResetReplicationRequest) Reset() { *m = ResetReplicationRequest{} } +func (m *ResetReplicationRequest) String() string { return proto.CompactTextString(m) } +func (*ResetReplicationRequest) ProtoMessage() {} +func (*ResetReplicationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{60} +} + +func (m *ResetReplicationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResetReplicationRequest.Unmarshal(m, b) +} +func (m *ResetReplicationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResetReplicationRequest.Marshal(b, m, deterministic) +} +func (m *ResetReplicationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetReplicationRequest.Merge(m, src) +} +func (m *ResetReplicationRequest) XXX_Size() int { + return xxx_messageInfo_ResetReplicationRequest.Size(m) +} +func (m *ResetReplicationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResetReplicationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetReplicationRequest proto.InternalMessageInfo + +type ResetReplicationResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResetReplicationResponse) Reset() { *m = ResetReplicationResponse{} } +func (m *ResetReplicationResponse) String() string { return proto.CompactTextString(m) } +func (*ResetReplicationResponse) ProtoMessage() {} +func (*ResetReplicationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{61} +} + +func (m *ResetReplicationResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResetReplicationResponse.Unmarshal(m, b) +} +func (m *ResetReplicationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResetReplicationResponse.Marshal(b, m, deterministic) +} +func (m *ResetReplicationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetReplicationResponse.Merge(m, src) +} +func (m *ResetReplicationResponse) XXX_Size() int { + return xxx_messageInfo_ResetReplicationResponse.Size(m) +} +func (m *ResetReplicationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResetReplicationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetReplicationResponse proto.InternalMessageInfo + +type VReplicationExecRequest struct { + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VReplicationExecRequest) Reset() { *m = VReplicationExecRequest{} } +func (m *VReplicationExecRequest) String() string { return proto.CompactTextString(m) } +func (*VReplicationExecRequest) ProtoMessage() {} +func (*VReplicationExecRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{62} +} + +func (m *VReplicationExecRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VReplicationExecRequest.Unmarshal(m, b) +} +func (m *VReplicationExecRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VReplicationExecRequest.Marshal(b, m, deterministic) +} +func (m *VReplicationExecRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VReplicationExecRequest.Merge(m, src) +} +func (m *VReplicationExecRequest) XXX_Size() int { + return xxx_messageInfo_VReplicationExecRequest.Size(m) +} +func (m *VReplicationExecRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VReplicationExecRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VReplicationExecRequest proto.InternalMessageInfo + +func (m *VReplicationExecRequest) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +type VReplicationExecResponse struct { + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VReplicationExecResponse) Reset() { *m = VReplicationExecResponse{} } +func (m *VReplicationExecResponse) String() string { return proto.CompactTextString(m) } +func (*VReplicationExecResponse) ProtoMessage() {} +func (*VReplicationExecResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{63} +} + +func (m *VReplicationExecResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VReplicationExecResponse.Unmarshal(m, b) +} +func (m *VReplicationExecResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VReplicationExecResponse.Marshal(b, m, deterministic) +} +func (m *VReplicationExecResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VReplicationExecResponse.Merge(m, src) +} +func (m *VReplicationExecResponse) XXX_Size() int { + return xxx_messageInfo_VReplicationExecResponse.Size(m) +} +func (m *VReplicationExecResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VReplicationExecResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VReplicationExecResponse proto.InternalMessageInfo + +func (m *VReplicationExecResponse) GetResult() *query.QueryResult { + if m != nil { + return m.Result + } + return nil +} + +type VReplicationWaitForPosRequest struct { + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Position string `protobuf:"bytes,2,opt,name=position,proto3" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VReplicationWaitForPosRequest) Reset() { *m = VReplicationWaitForPosRequest{} } +func (m *VReplicationWaitForPosRequest) String() string { return proto.CompactTextString(m) } +func (*VReplicationWaitForPosRequest) ProtoMessage() {} +func (*VReplicationWaitForPosRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{64} +} + +func (m *VReplicationWaitForPosRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VReplicationWaitForPosRequest.Unmarshal(m, b) +} +func (m *VReplicationWaitForPosRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VReplicationWaitForPosRequest.Marshal(b, m, deterministic) +} +func (m *VReplicationWaitForPosRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VReplicationWaitForPosRequest.Merge(m, src) +} +func (m *VReplicationWaitForPosRequest) XXX_Size() int { + return xxx_messageInfo_VReplicationWaitForPosRequest.Size(m) +} +func (m *VReplicationWaitForPosRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VReplicationWaitForPosRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VReplicationWaitForPosRequest proto.InternalMessageInfo + +func (m *VReplicationWaitForPosRequest) GetId() int64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *VReplicationWaitForPosRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +type VReplicationWaitForPosResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VReplicationWaitForPosResponse) Reset() { *m = VReplicationWaitForPosResponse{} } +func (m *VReplicationWaitForPosResponse) String() string { return proto.CompactTextString(m) } +func (*VReplicationWaitForPosResponse) ProtoMessage() {} +func (*VReplicationWaitForPosResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{65} +} + +func (m *VReplicationWaitForPosResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VReplicationWaitForPosResponse.Unmarshal(m, b) +} +func (m *VReplicationWaitForPosResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VReplicationWaitForPosResponse.Marshal(b, m, deterministic) +} +func (m *VReplicationWaitForPosResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VReplicationWaitForPosResponse.Merge(m, src) +} +func (m *VReplicationWaitForPosResponse) XXX_Size() int { + return xxx_messageInfo_VReplicationWaitForPosResponse.Size(m) +} +func (m *VReplicationWaitForPosResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VReplicationWaitForPosResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VReplicationWaitForPosResponse proto.InternalMessageInfo + +type InitMasterRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitMasterRequest) Reset() { *m = InitMasterRequest{} } +func (m *InitMasterRequest) String() string { return proto.CompactTextString(m) } +func (*InitMasterRequest) ProtoMessage() {} +func (*InitMasterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{66} +} + +func (m *InitMasterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitMasterRequest.Unmarshal(m, b) +} +func (m *InitMasterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitMasterRequest.Marshal(b, m, deterministic) +} +func (m *InitMasterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitMasterRequest.Merge(m, src) +} +func (m *InitMasterRequest) XXX_Size() int { + return xxx_messageInfo_InitMasterRequest.Size(m) +} +func (m *InitMasterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitMasterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitMasterRequest proto.InternalMessageInfo + +type InitMasterResponse struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitMasterResponse) Reset() { *m = InitMasterResponse{} } +func (m *InitMasterResponse) String() string { return proto.CompactTextString(m) } +func (*InitMasterResponse) ProtoMessage() {} +func (*InitMasterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{67} +} + +func (m *InitMasterResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitMasterResponse.Unmarshal(m, b) +} +func (m *InitMasterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitMasterResponse.Marshal(b, m, deterministic) +} +func (m *InitMasterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitMasterResponse.Merge(m, src) +} +func (m *InitMasterResponse) XXX_Size() int { + return xxx_messageInfo_InitMasterResponse.Size(m) +} +func (m *InitMasterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InitMasterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InitMasterResponse proto.InternalMessageInfo + +func (m *InitMasterResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +type PopulateReparentJournalRequest struct { + TimeCreatedNs int64 `protobuf:"varint,1,opt,name=time_created_ns,json=timeCreatedNs,proto3" json:"time_created_ns,omitempty"` + ActionName string `protobuf:"bytes,2,opt,name=action_name,json=actionName,proto3" json:"action_name,omitempty"` + MasterAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=master_alias,json=masterAlias,proto3" json:"master_alias,omitempty"` + ReplicationPosition string `protobuf:"bytes,4,opt,name=replication_position,json=replicationPosition,proto3" json:"replication_position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PopulateReparentJournalRequest) Reset() { *m = PopulateReparentJournalRequest{} } +func (m *PopulateReparentJournalRequest) String() string { return proto.CompactTextString(m) } +func (*PopulateReparentJournalRequest) ProtoMessage() {} +func (*PopulateReparentJournalRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{68} +} + +func (m *PopulateReparentJournalRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PopulateReparentJournalRequest.Unmarshal(m, b) +} +func (m *PopulateReparentJournalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PopulateReparentJournalRequest.Marshal(b, m, deterministic) +} +func (m *PopulateReparentJournalRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PopulateReparentJournalRequest.Merge(m, src) +} +func (m *PopulateReparentJournalRequest) XXX_Size() int { + return xxx_messageInfo_PopulateReparentJournalRequest.Size(m) +} +func (m *PopulateReparentJournalRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PopulateReparentJournalRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PopulateReparentJournalRequest proto.InternalMessageInfo + +func (m *PopulateReparentJournalRequest) GetTimeCreatedNs() int64 { + if m != nil { + return m.TimeCreatedNs + } + return 0 +} + +func (m *PopulateReparentJournalRequest) GetActionName() string { + if m != nil { + return m.ActionName + } + return "" +} + +func (m *PopulateReparentJournalRequest) GetMasterAlias() *topodata.TabletAlias { + if m != nil { + return m.MasterAlias + } + return nil +} + +func (m *PopulateReparentJournalRequest) GetReplicationPosition() string { + if m != nil { + return m.ReplicationPosition + } + return "" +} + +type PopulateReparentJournalResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PopulateReparentJournalResponse) Reset() { *m = PopulateReparentJournalResponse{} } +func (m *PopulateReparentJournalResponse) String() string { return proto.CompactTextString(m) } +func (*PopulateReparentJournalResponse) ProtoMessage() {} +func (*PopulateReparentJournalResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{69} +} + +func (m *PopulateReparentJournalResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PopulateReparentJournalResponse.Unmarshal(m, b) +} +func (m *PopulateReparentJournalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PopulateReparentJournalResponse.Marshal(b, m, deterministic) +} +func (m *PopulateReparentJournalResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PopulateReparentJournalResponse.Merge(m, src) +} +func (m *PopulateReparentJournalResponse) XXX_Size() int { + return xxx_messageInfo_PopulateReparentJournalResponse.Size(m) +} +func (m *PopulateReparentJournalResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PopulateReparentJournalResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PopulateReparentJournalResponse proto.InternalMessageInfo + +type InitReplicaRequest struct { + Parent *topodata.TabletAlias `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + ReplicationPosition string `protobuf:"bytes,2,opt,name=replication_position,json=replicationPosition,proto3" json:"replication_position,omitempty"` + TimeCreatedNs int64 `protobuf:"varint,3,opt,name=time_created_ns,json=timeCreatedNs,proto3" json:"time_created_ns,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitReplicaRequest) Reset() { *m = InitReplicaRequest{} } +func (m *InitReplicaRequest) String() string { return proto.CompactTextString(m) } +func (*InitReplicaRequest) ProtoMessage() {} +func (*InitReplicaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{70} +} + +func (m *InitReplicaRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitReplicaRequest.Unmarshal(m, b) +} +func (m *InitReplicaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitReplicaRequest.Marshal(b, m, deterministic) +} +func (m *InitReplicaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitReplicaRequest.Merge(m, src) +} +func (m *InitReplicaRequest) XXX_Size() int { + return xxx_messageInfo_InitReplicaRequest.Size(m) +} +func (m *InitReplicaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitReplicaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitReplicaRequest proto.InternalMessageInfo + +func (m *InitReplicaRequest) GetParent() *topodata.TabletAlias { + if m != nil { + return m.Parent + } + return nil +} + +func (m *InitReplicaRequest) GetReplicationPosition() string { + if m != nil { + return m.ReplicationPosition + } + return "" +} + +func (m *InitReplicaRequest) GetTimeCreatedNs() int64 { + if m != nil { + return m.TimeCreatedNs + } + return 0 +} + +type InitReplicaResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitReplicaResponse) Reset() { *m = InitReplicaResponse{} } +func (m *InitReplicaResponse) String() string { return proto.CompactTextString(m) } +func (*InitReplicaResponse) ProtoMessage() {} +func (*InitReplicaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{71} +} + +func (m *InitReplicaResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitReplicaResponse.Unmarshal(m, b) +} +func (m *InitReplicaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitReplicaResponse.Marshal(b, m, deterministic) +} +func (m *InitReplicaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitReplicaResponse.Merge(m, src) +} +func (m *InitReplicaResponse) XXX_Size() int { + return xxx_messageInfo_InitReplicaResponse.Size(m) +} +func (m *InitReplicaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InitReplicaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InitReplicaResponse proto.InternalMessageInfo + +type DemoteMasterRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DemoteMasterRequest) Reset() { *m = DemoteMasterRequest{} } +func (m *DemoteMasterRequest) String() string { return proto.CompactTextString(m) } +func (*DemoteMasterRequest) ProtoMessage() {} +func (*DemoteMasterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{72} +} + +func (m *DemoteMasterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DemoteMasterRequest.Unmarshal(m, b) +} +func (m *DemoteMasterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DemoteMasterRequest.Marshal(b, m, deterministic) +} +func (m *DemoteMasterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DemoteMasterRequest.Merge(m, src) +} +func (m *DemoteMasterRequest) XXX_Size() int { + return xxx_messageInfo_DemoteMasterRequest.Size(m) +} +func (m *DemoteMasterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DemoteMasterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DemoteMasterRequest proto.InternalMessageInfo + +type DemoteMasterResponse struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DemoteMasterResponse) Reset() { *m = DemoteMasterResponse{} } +func (m *DemoteMasterResponse) String() string { return proto.CompactTextString(m) } +func (*DemoteMasterResponse) ProtoMessage() {} +func (*DemoteMasterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{73} +} + +func (m *DemoteMasterResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DemoteMasterResponse.Unmarshal(m, b) +} +func (m *DemoteMasterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DemoteMasterResponse.Marshal(b, m, deterministic) +} +func (m *DemoteMasterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DemoteMasterResponse.Merge(m, src) +} +func (m *DemoteMasterResponse) XXX_Size() int { + return xxx_messageInfo_DemoteMasterResponse.Size(m) +} +func (m *DemoteMasterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DemoteMasterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DemoteMasterResponse proto.InternalMessageInfo + +func (m *DemoteMasterResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +type UndoDemoteMasterRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UndoDemoteMasterRequest) Reset() { *m = UndoDemoteMasterRequest{} } +func (m *UndoDemoteMasterRequest) String() string { return proto.CompactTextString(m) } +func (*UndoDemoteMasterRequest) ProtoMessage() {} +func (*UndoDemoteMasterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{74} +} + +func (m *UndoDemoteMasterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UndoDemoteMasterRequest.Unmarshal(m, b) +} +func (m *UndoDemoteMasterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UndoDemoteMasterRequest.Marshal(b, m, deterministic) +} +func (m *UndoDemoteMasterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UndoDemoteMasterRequest.Merge(m, src) +} +func (m *UndoDemoteMasterRequest) XXX_Size() int { + return xxx_messageInfo_UndoDemoteMasterRequest.Size(m) +} +func (m *UndoDemoteMasterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UndoDemoteMasterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UndoDemoteMasterRequest proto.InternalMessageInfo + +type UndoDemoteMasterResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UndoDemoteMasterResponse) Reset() { *m = UndoDemoteMasterResponse{} } +func (m *UndoDemoteMasterResponse) String() string { return proto.CompactTextString(m) } +func (*UndoDemoteMasterResponse) ProtoMessage() {} +func (*UndoDemoteMasterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{75} +} + +func (m *UndoDemoteMasterResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UndoDemoteMasterResponse.Unmarshal(m, b) +} +func (m *UndoDemoteMasterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UndoDemoteMasterResponse.Marshal(b, m, deterministic) +} +func (m *UndoDemoteMasterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UndoDemoteMasterResponse.Merge(m, src) +} +func (m *UndoDemoteMasterResponse) XXX_Size() int { + return xxx_messageInfo_UndoDemoteMasterResponse.Size(m) +} +func (m *UndoDemoteMasterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UndoDemoteMasterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UndoDemoteMasterResponse proto.InternalMessageInfo + +type ReplicaWasPromotedRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReplicaWasPromotedRequest) Reset() { *m = ReplicaWasPromotedRequest{} } +func (m *ReplicaWasPromotedRequest) String() string { return proto.CompactTextString(m) } +func (*ReplicaWasPromotedRequest) ProtoMessage() {} +func (*ReplicaWasPromotedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{76} +} + +func (m *ReplicaWasPromotedRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReplicaWasPromotedRequest.Unmarshal(m, b) +} +func (m *ReplicaWasPromotedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReplicaWasPromotedRequest.Marshal(b, m, deterministic) +} +func (m *ReplicaWasPromotedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaWasPromotedRequest.Merge(m, src) +} +func (m *ReplicaWasPromotedRequest) XXX_Size() int { + return xxx_messageInfo_ReplicaWasPromotedRequest.Size(m) +} +func (m *ReplicaWasPromotedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaWasPromotedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaWasPromotedRequest proto.InternalMessageInfo + +type ReplicaWasPromotedResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReplicaWasPromotedResponse) Reset() { *m = ReplicaWasPromotedResponse{} } +func (m *ReplicaWasPromotedResponse) String() string { return proto.CompactTextString(m) } +func (*ReplicaWasPromotedResponse) ProtoMessage() {} +func (*ReplicaWasPromotedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{77} +} + +func (m *ReplicaWasPromotedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReplicaWasPromotedResponse.Unmarshal(m, b) +} +func (m *ReplicaWasPromotedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReplicaWasPromotedResponse.Marshal(b, m, deterministic) +} +func (m *ReplicaWasPromotedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaWasPromotedResponse.Merge(m, src) +} +func (m *ReplicaWasPromotedResponse) XXX_Size() int { + return xxx_messageInfo_ReplicaWasPromotedResponse.Size(m) +} +func (m *ReplicaWasPromotedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaWasPromotedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaWasPromotedResponse proto.InternalMessageInfo + +type SetMasterRequest struct { + Parent *topodata.TabletAlias `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + TimeCreatedNs int64 `protobuf:"varint,2,opt,name=time_created_ns,json=timeCreatedNs,proto3" json:"time_created_ns,omitempty"` + ForceStartReplication bool `protobuf:"varint,3,opt,name=force_start_replication,json=forceStartReplication,proto3" json:"force_start_replication,omitempty"` + WaitPosition string `protobuf:"bytes,4,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetMasterRequest) Reset() { *m = SetMasterRequest{} } +func (m *SetMasterRequest) String() string { return proto.CompactTextString(m) } +func (*SetMasterRequest) ProtoMessage() {} +func (*SetMasterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{78} +} + +func (m *SetMasterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetMasterRequest.Unmarshal(m, b) +} +func (m *SetMasterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetMasterRequest.Marshal(b, m, deterministic) +} +func (m *SetMasterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetMasterRequest.Merge(m, src) +} +func (m *SetMasterRequest) XXX_Size() int { + return xxx_messageInfo_SetMasterRequest.Size(m) +} +func (m *SetMasterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetMasterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetMasterRequest proto.InternalMessageInfo + +func (m *SetMasterRequest) GetParent() *topodata.TabletAlias { + if m != nil { + return m.Parent + } + return nil +} + +func (m *SetMasterRequest) GetTimeCreatedNs() int64 { + if m != nil { + return m.TimeCreatedNs + } + return 0 +} + +func (m *SetMasterRequest) GetForceStartReplication() bool { + if m != nil { + return m.ForceStartReplication + } + return false +} + +func (m *SetMasterRequest) GetWaitPosition() string { + if m != nil { + return m.WaitPosition + } + return "" +} + +type SetMasterResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetMasterResponse) Reset() { *m = SetMasterResponse{} } +func (m *SetMasterResponse) String() string { return proto.CompactTextString(m) } +func (*SetMasterResponse) ProtoMessage() {} +func (*SetMasterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{79} +} + +func (m *SetMasterResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetMasterResponse.Unmarshal(m, b) +} +func (m *SetMasterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetMasterResponse.Marshal(b, m, deterministic) +} +func (m *SetMasterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetMasterResponse.Merge(m, src) +} +func (m *SetMasterResponse) XXX_Size() int { + return xxx_messageInfo_SetMasterResponse.Size(m) +} +func (m *SetMasterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetMasterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetMasterResponse proto.InternalMessageInfo + +type ReplicaWasRestartedRequest struct { + // the parent alias the tablet should have + Parent *topodata.TabletAlias `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReplicaWasRestartedRequest) Reset() { *m = ReplicaWasRestartedRequest{} } +func (m *ReplicaWasRestartedRequest) String() string { return proto.CompactTextString(m) } +func (*ReplicaWasRestartedRequest) ProtoMessage() {} +func (*ReplicaWasRestartedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{80} +} + +func (m *ReplicaWasRestartedRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReplicaWasRestartedRequest.Unmarshal(m, b) +} +func (m *ReplicaWasRestartedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReplicaWasRestartedRequest.Marshal(b, m, deterministic) +} +func (m *ReplicaWasRestartedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaWasRestartedRequest.Merge(m, src) +} +func (m *ReplicaWasRestartedRequest) XXX_Size() int { + return xxx_messageInfo_ReplicaWasRestartedRequest.Size(m) +} +func (m *ReplicaWasRestartedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaWasRestartedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaWasRestartedRequest proto.InternalMessageInfo + +func (m *ReplicaWasRestartedRequest) GetParent() *topodata.TabletAlias { + if m != nil { + return m.Parent + } + return nil +} + +type ReplicaWasRestartedResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReplicaWasRestartedResponse) Reset() { *m = ReplicaWasRestartedResponse{} } +func (m *ReplicaWasRestartedResponse) String() string { return proto.CompactTextString(m) } +func (*ReplicaWasRestartedResponse) ProtoMessage() {} +func (*ReplicaWasRestartedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{81} +} + +func (m *ReplicaWasRestartedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReplicaWasRestartedResponse.Unmarshal(m, b) +} +func (m *ReplicaWasRestartedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReplicaWasRestartedResponse.Marshal(b, m, deterministic) +} +func (m *ReplicaWasRestartedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaWasRestartedResponse.Merge(m, src) +} +func (m *ReplicaWasRestartedResponse) XXX_Size() int { + return xxx_messageInfo_ReplicaWasRestartedResponse.Size(m) +} +func (m *ReplicaWasRestartedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaWasRestartedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaWasRestartedResponse proto.InternalMessageInfo + +type StopReplicationAndGetStatusRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopReplicationAndGetStatusRequest) Reset() { *m = StopReplicationAndGetStatusRequest{} } +func (m *StopReplicationAndGetStatusRequest) String() string { return proto.CompactTextString(m) } +func (*StopReplicationAndGetStatusRequest) ProtoMessage() {} +func (*StopReplicationAndGetStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{82} +} + +func (m *StopReplicationAndGetStatusRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopReplicationAndGetStatusRequest.Unmarshal(m, b) +} +func (m *StopReplicationAndGetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopReplicationAndGetStatusRequest.Marshal(b, m, deterministic) +} +func (m *StopReplicationAndGetStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopReplicationAndGetStatusRequest.Merge(m, src) +} +func (m *StopReplicationAndGetStatusRequest) XXX_Size() int { + return xxx_messageInfo_StopReplicationAndGetStatusRequest.Size(m) +} +func (m *StopReplicationAndGetStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopReplicationAndGetStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopReplicationAndGetStatusRequest proto.InternalMessageInfo + +type StopReplicationAndGetStatusResponse struct { + Status *replicationdata.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopReplicationAndGetStatusResponse) Reset() { *m = StopReplicationAndGetStatusResponse{} } +func (m *StopReplicationAndGetStatusResponse) String() string { return proto.CompactTextString(m) } +func (*StopReplicationAndGetStatusResponse) ProtoMessage() {} +func (*StopReplicationAndGetStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{83} +} + +func (m *StopReplicationAndGetStatusResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopReplicationAndGetStatusResponse.Unmarshal(m, b) +} +func (m *StopReplicationAndGetStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopReplicationAndGetStatusResponse.Marshal(b, m, deterministic) +} +func (m *StopReplicationAndGetStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopReplicationAndGetStatusResponse.Merge(m, src) +} +func (m *StopReplicationAndGetStatusResponse) XXX_Size() int { + return xxx_messageInfo_StopReplicationAndGetStatusResponse.Size(m) +} +func (m *StopReplicationAndGetStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopReplicationAndGetStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopReplicationAndGetStatusResponse proto.InternalMessageInfo + +func (m *StopReplicationAndGetStatusResponse) GetStatus() *replicationdata.Status { + if m != nil { + return m.Status + } + return nil +} + +type PromoteReplicaRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PromoteReplicaRequest) Reset() { *m = PromoteReplicaRequest{} } +func (m *PromoteReplicaRequest) String() string { return proto.CompactTextString(m) } +func (*PromoteReplicaRequest) ProtoMessage() {} +func (*PromoteReplicaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{84} +} + +func (m *PromoteReplicaRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PromoteReplicaRequest.Unmarshal(m, b) +} +func (m *PromoteReplicaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PromoteReplicaRequest.Marshal(b, m, deterministic) +} +func (m *PromoteReplicaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PromoteReplicaRequest.Merge(m, src) +} +func (m *PromoteReplicaRequest) XXX_Size() int { + return xxx_messageInfo_PromoteReplicaRequest.Size(m) +} +func (m *PromoteReplicaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PromoteReplicaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PromoteReplicaRequest proto.InternalMessageInfo + +type PromoteReplicaResponse struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PromoteReplicaResponse) Reset() { *m = PromoteReplicaResponse{} } +func (m *PromoteReplicaResponse) String() string { return proto.CompactTextString(m) } +func (*PromoteReplicaResponse) ProtoMessage() {} +func (*PromoteReplicaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{85} +} + +func (m *PromoteReplicaResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PromoteReplicaResponse.Unmarshal(m, b) +} +func (m *PromoteReplicaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PromoteReplicaResponse.Marshal(b, m, deterministic) +} +func (m *PromoteReplicaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PromoteReplicaResponse.Merge(m, src) +} +func (m *PromoteReplicaResponse) XXX_Size() int { + return xxx_messageInfo_PromoteReplicaResponse.Size(m) +} +func (m *PromoteReplicaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PromoteReplicaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PromoteReplicaResponse proto.InternalMessageInfo + +func (m *PromoteReplicaResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +type BackupRequest struct { + Concurrency int64 `protobuf:"varint,1,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + AllowMaster bool `protobuf:"varint,2,opt,name=allowMaster,proto3" json:"allowMaster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BackupRequest) Reset() { *m = BackupRequest{} } +func (m *BackupRequest) String() string { return proto.CompactTextString(m) } +func (*BackupRequest) ProtoMessage() {} +func (*BackupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{86} +} + +func (m *BackupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BackupRequest.Unmarshal(m, b) +} +func (m *BackupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BackupRequest.Marshal(b, m, deterministic) +} +func (m *BackupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupRequest.Merge(m, src) +} +func (m *BackupRequest) XXX_Size() int { + return xxx_messageInfo_BackupRequest.Size(m) +} +func (m *BackupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BackupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupRequest proto.InternalMessageInfo + +func (m *BackupRequest) GetConcurrency() int64 { + if m != nil { + return m.Concurrency + } + return 0 +} + +func (m *BackupRequest) GetAllowMaster() bool { + if m != nil { + return m.AllowMaster + } + return false +} + +type BackupResponse struct { + Event *logutil.Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BackupResponse) Reset() { *m = BackupResponse{} } +func (m *BackupResponse) String() string { return proto.CompactTextString(m) } +func (*BackupResponse) ProtoMessage() {} +func (*BackupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{87} +} + +func (m *BackupResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BackupResponse.Unmarshal(m, b) +} +func (m *BackupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BackupResponse.Marshal(b, m, deterministic) +} +func (m *BackupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupResponse.Merge(m, src) +} +func (m *BackupResponse) XXX_Size() int { + return xxx_messageInfo_BackupResponse.Size(m) +} +func (m *BackupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BackupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupResponse proto.InternalMessageInfo + +func (m *BackupResponse) GetEvent() *logutil.Event { + if m != nil { + return m.Event + } + return nil +} + +type RestoreFromBackupRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreFromBackupRequest) Reset() { *m = RestoreFromBackupRequest{} } +func (m *RestoreFromBackupRequest) String() string { return proto.CompactTextString(m) } +func (*RestoreFromBackupRequest) ProtoMessage() {} +func (*RestoreFromBackupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{88} +} + +func (m *RestoreFromBackupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreFromBackupRequest.Unmarshal(m, b) +} +func (m *RestoreFromBackupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreFromBackupRequest.Marshal(b, m, deterministic) +} +func (m *RestoreFromBackupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreFromBackupRequest.Merge(m, src) +} +func (m *RestoreFromBackupRequest) XXX_Size() int { + return xxx_messageInfo_RestoreFromBackupRequest.Size(m) +} +func (m *RestoreFromBackupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreFromBackupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreFromBackupRequest proto.InternalMessageInfo + +type RestoreFromBackupResponse struct { + Event *logutil.Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreFromBackupResponse) Reset() { *m = RestoreFromBackupResponse{} } +func (m *RestoreFromBackupResponse) String() string { return proto.CompactTextString(m) } +func (*RestoreFromBackupResponse) ProtoMessage() {} +func (*RestoreFromBackupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{89} +} + +func (m *RestoreFromBackupResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreFromBackupResponse.Unmarshal(m, b) +} +func (m *RestoreFromBackupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreFromBackupResponse.Marshal(b, m, deterministic) +} +func (m *RestoreFromBackupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreFromBackupResponse.Merge(m, src) +} +func (m *RestoreFromBackupResponse) XXX_Size() int { + return xxx_messageInfo_RestoreFromBackupResponse.Size(m) +} +func (m *RestoreFromBackupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreFromBackupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreFromBackupResponse proto.InternalMessageInfo + +func (m *RestoreFromBackupResponse) GetEvent() *logutil.Event { + if m != nil { + return m.Event + } + return nil +} + +// Deprecated +type SlaveStatusRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SlaveStatusRequest) Reset() { *m = SlaveStatusRequest{} } +func (m *SlaveStatusRequest) String() string { return proto.CompactTextString(m) } +func (*SlaveStatusRequest) ProtoMessage() {} +func (*SlaveStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{90} +} + +func (m *SlaveStatusRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SlaveStatusRequest.Unmarshal(m, b) +} +func (m *SlaveStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SlaveStatusRequest.Marshal(b, m, deterministic) +} +func (m *SlaveStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SlaveStatusRequest.Merge(m, src) +} +func (m *SlaveStatusRequest) XXX_Size() int { + return xxx_messageInfo_SlaveStatusRequest.Size(m) +} +func (m *SlaveStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SlaveStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SlaveStatusRequest proto.InternalMessageInfo + +// Deprecated +type SlaveStatusResponse struct { + Status *replicationdata.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SlaveStatusResponse) Reset() { *m = SlaveStatusResponse{} } +func (m *SlaveStatusResponse) String() string { return proto.CompactTextString(m) } +func (*SlaveStatusResponse) ProtoMessage() {} +func (*SlaveStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{91} +} + +func (m *SlaveStatusResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SlaveStatusResponse.Unmarshal(m, b) +} +func (m *SlaveStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SlaveStatusResponse.Marshal(b, m, deterministic) +} +func (m *SlaveStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SlaveStatusResponse.Merge(m, src) +} +func (m *SlaveStatusResponse) XXX_Size() int { + return xxx_messageInfo_SlaveStatusResponse.Size(m) +} +func (m *SlaveStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SlaveStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SlaveStatusResponse proto.InternalMessageInfo + +func (m *SlaveStatusResponse) GetStatus() *replicationdata.Status { + if m != nil { + return m.Status + } + return nil +} + +// Deprecated +type StopSlaveRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopSlaveRequest) Reset() { *m = StopSlaveRequest{} } +func (m *StopSlaveRequest) String() string { return proto.CompactTextString(m) } +func (*StopSlaveRequest) ProtoMessage() {} +func (*StopSlaveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{92} +} + +func (m *StopSlaveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopSlaveRequest.Unmarshal(m, b) +} +func (m *StopSlaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopSlaveRequest.Marshal(b, m, deterministic) +} +func (m *StopSlaveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopSlaveRequest.Merge(m, src) +} +func (m *StopSlaveRequest) XXX_Size() int { + return xxx_messageInfo_StopSlaveRequest.Size(m) +} +func (m *StopSlaveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopSlaveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopSlaveRequest proto.InternalMessageInfo + +// Deprecated +type StopSlaveResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopSlaveResponse) Reset() { *m = StopSlaveResponse{} } +func (m *StopSlaveResponse) String() string { return proto.CompactTextString(m) } +func (*StopSlaveResponse) ProtoMessage() {} +func (*StopSlaveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{93} +} + +func (m *StopSlaveResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopSlaveResponse.Unmarshal(m, b) +} +func (m *StopSlaveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopSlaveResponse.Marshal(b, m, deterministic) +} +func (m *StopSlaveResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopSlaveResponse.Merge(m, src) +} +func (m *StopSlaveResponse) XXX_Size() int { + return xxx_messageInfo_StopSlaveResponse.Size(m) +} +func (m *StopSlaveResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopSlaveResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopSlaveResponse proto.InternalMessageInfo + +// Deprecated +type StopSlaveMinimumRequest struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + WaitTimeout int64 `protobuf:"varint,2,opt,name=wait_timeout,json=waitTimeout,proto3" json:"wait_timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopSlaveMinimumRequest) Reset() { *m = StopSlaveMinimumRequest{} } +func (m *StopSlaveMinimumRequest) String() string { return proto.CompactTextString(m) } +func (*StopSlaveMinimumRequest) ProtoMessage() {} +func (*StopSlaveMinimumRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{94} +} + +func (m *StopSlaveMinimumRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopSlaveMinimumRequest.Unmarshal(m, b) +} +func (m *StopSlaveMinimumRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopSlaveMinimumRequest.Marshal(b, m, deterministic) +} +func (m *StopSlaveMinimumRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopSlaveMinimumRequest.Merge(m, src) +} +func (m *StopSlaveMinimumRequest) XXX_Size() int { + return xxx_messageInfo_StopSlaveMinimumRequest.Size(m) +} +func (m *StopSlaveMinimumRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopSlaveMinimumRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopSlaveMinimumRequest proto.InternalMessageInfo + +func (m *StopSlaveMinimumRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *StopSlaveMinimumRequest) GetWaitTimeout() int64 { + if m != nil { + return m.WaitTimeout + } + return 0 +} + +// Deprecated +type StopSlaveMinimumResponse struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopSlaveMinimumResponse) Reset() { *m = StopSlaveMinimumResponse{} } +func (m *StopSlaveMinimumResponse) String() string { return proto.CompactTextString(m) } +func (*StopSlaveMinimumResponse) ProtoMessage() {} +func (*StopSlaveMinimumResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{95} +} + +func (m *StopSlaveMinimumResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopSlaveMinimumResponse.Unmarshal(m, b) +} +func (m *StopSlaveMinimumResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopSlaveMinimumResponse.Marshal(b, m, deterministic) +} +func (m *StopSlaveMinimumResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopSlaveMinimumResponse.Merge(m, src) +} +func (m *StopSlaveMinimumResponse) XXX_Size() int { + return xxx_messageInfo_StopSlaveMinimumResponse.Size(m) +} +func (m *StopSlaveMinimumResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopSlaveMinimumResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopSlaveMinimumResponse proto.InternalMessageInfo + +func (m *StopSlaveMinimumResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +// Deprecated +type StartSlaveRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartSlaveRequest) Reset() { *m = StartSlaveRequest{} } +func (m *StartSlaveRequest) String() string { return proto.CompactTextString(m) } +func (*StartSlaveRequest) ProtoMessage() {} +func (*StartSlaveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{96} +} + +func (m *StartSlaveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartSlaveRequest.Unmarshal(m, b) +} +func (m *StartSlaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartSlaveRequest.Marshal(b, m, deterministic) +} +func (m *StartSlaveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartSlaveRequest.Merge(m, src) +} +func (m *StartSlaveRequest) XXX_Size() int { + return xxx_messageInfo_StartSlaveRequest.Size(m) +} +func (m *StartSlaveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartSlaveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartSlaveRequest proto.InternalMessageInfo + +// Deprecated +type StartSlaveResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartSlaveResponse) Reset() { *m = StartSlaveResponse{} } +func (m *StartSlaveResponse) String() string { return proto.CompactTextString(m) } +func (*StartSlaveResponse) ProtoMessage() {} +func (*StartSlaveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{97} +} + +func (m *StartSlaveResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartSlaveResponse.Unmarshal(m, b) +} +func (m *StartSlaveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartSlaveResponse.Marshal(b, m, deterministic) +} +func (m *StartSlaveResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartSlaveResponse.Merge(m, src) +} +func (m *StartSlaveResponse) XXX_Size() int { + return xxx_messageInfo_StartSlaveResponse.Size(m) +} +func (m *StartSlaveResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartSlaveResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartSlaveResponse proto.InternalMessageInfo + +// Deprecated +type StartSlaveUntilAfterRequest struct { + Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + WaitTimeout int64 `protobuf:"varint,2,opt,name=wait_timeout,json=waitTimeout,proto3" json:"wait_timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartSlaveUntilAfterRequest) Reset() { *m = StartSlaveUntilAfterRequest{} } +func (m *StartSlaveUntilAfterRequest) String() string { return proto.CompactTextString(m) } +func (*StartSlaveUntilAfterRequest) ProtoMessage() {} +func (*StartSlaveUntilAfterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{98} +} + +func (m *StartSlaveUntilAfterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartSlaveUntilAfterRequest.Unmarshal(m, b) +} +func (m *StartSlaveUntilAfterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartSlaveUntilAfterRequest.Marshal(b, m, deterministic) +} +func (m *StartSlaveUntilAfterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartSlaveUntilAfterRequest.Merge(m, src) +} +func (m *StartSlaveUntilAfterRequest) XXX_Size() int { + return xxx_messageInfo_StartSlaveUntilAfterRequest.Size(m) +} +func (m *StartSlaveUntilAfterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartSlaveUntilAfterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartSlaveUntilAfterRequest proto.InternalMessageInfo + +func (m *StartSlaveUntilAfterRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *StartSlaveUntilAfterRequest) GetWaitTimeout() int64 { + if m != nil { + return m.WaitTimeout + } + return 0 +} + +// Deprecated +type StartSlaveUntilAfterResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartSlaveUntilAfterResponse) Reset() { *m = StartSlaveUntilAfterResponse{} } +func (m *StartSlaveUntilAfterResponse) String() string { return proto.CompactTextString(m) } +func (*StartSlaveUntilAfterResponse) ProtoMessage() {} +func (*StartSlaveUntilAfterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{99} +} + +func (m *StartSlaveUntilAfterResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartSlaveUntilAfterResponse.Unmarshal(m, b) +} +func (m *StartSlaveUntilAfterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartSlaveUntilAfterResponse.Marshal(b, m, deterministic) +} +func (m *StartSlaveUntilAfterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartSlaveUntilAfterResponse.Merge(m, src) +} +func (m *StartSlaveUntilAfterResponse) XXX_Size() int { + return xxx_messageInfo_StartSlaveUntilAfterResponse.Size(m) +} +func (m *StartSlaveUntilAfterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartSlaveUntilAfterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartSlaveUntilAfterResponse proto.InternalMessageInfo + +// Deprecated +type GetSlavesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSlavesRequest) Reset() { *m = GetSlavesRequest{} } +func (m *GetSlavesRequest) String() string { return proto.CompactTextString(m) } +func (*GetSlavesRequest) ProtoMessage() {} +func (*GetSlavesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{100} +} + +func (m *GetSlavesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSlavesRequest.Unmarshal(m, b) +} +func (m *GetSlavesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSlavesRequest.Marshal(b, m, deterministic) +} +func (m *GetSlavesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSlavesRequest.Merge(m, src) +} +func (m *GetSlavesRequest) XXX_Size() int { + return xxx_messageInfo_GetSlavesRequest.Size(m) +} +func (m *GetSlavesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSlavesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSlavesRequest proto.InternalMessageInfo + +// Deprecated +type GetSlavesResponse struct { + Addrs []string `protobuf:"bytes,1,rep,name=addrs,proto3" json:"addrs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSlavesResponse) Reset() { *m = GetSlavesResponse{} } +func (m *GetSlavesResponse) String() string { return proto.CompactTextString(m) } +func (*GetSlavesResponse) ProtoMessage() {} +func (*GetSlavesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{101} +} + +func (m *GetSlavesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSlavesResponse.Unmarshal(m, b) +} +func (m *GetSlavesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSlavesResponse.Marshal(b, m, deterministic) +} +func (m *GetSlavesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSlavesResponse.Merge(m, src) +} +func (m *GetSlavesResponse) XXX_Size() int { + return xxx_messageInfo_GetSlavesResponse.Size(m) +} +func (m *GetSlavesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetSlavesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSlavesResponse proto.InternalMessageInfo + +func (m *GetSlavesResponse) GetAddrs() []string { + if m != nil { + return m.Addrs + } + return nil +} + +// Deprecated +type InitSlaveRequest struct { + Parent *topodata.TabletAlias `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + ReplicationPosition string `protobuf:"bytes,2,opt,name=replication_position,json=replicationPosition,proto3" json:"replication_position,omitempty"` + TimeCreatedNs int64 `protobuf:"varint,3,opt,name=time_created_ns,json=timeCreatedNs,proto3" json:"time_created_ns,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitSlaveRequest) Reset() { *m = InitSlaveRequest{} } +func (m *InitSlaveRequest) String() string { return proto.CompactTextString(m) } +func (*InitSlaveRequest) ProtoMessage() {} +func (*InitSlaveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{102} +} + +func (m *InitSlaveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitSlaveRequest.Unmarshal(m, b) +} +func (m *InitSlaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitSlaveRequest.Marshal(b, m, deterministic) +} +func (m *InitSlaveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitSlaveRequest.Merge(m, src) +} +func (m *InitSlaveRequest) XXX_Size() int { + return xxx_messageInfo_InitSlaveRequest.Size(m) +} +func (m *InitSlaveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitSlaveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitSlaveRequest proto.InternalMessageInfo + +func (m *InitSlaveRequest) GetParent() *topodata.TabletAlias { + if m != nil { + return m.Parent + } + return nil +} + +func (m *InitSlaveRequest) GetReplicationPosition() string { + if m != nil { + return m.ReplicationPosition + } + return "" +} + +func (m *InitSlaveRequest) GetTimeCreatedNs() int64 { + if m != nil { + return m.TimeCreatedNs + } + return 0 +} + +// Deprecated +type InitSlaveResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitSlaveResponse) Reset() { *m = InitSlaveResponse{} } +func (m *InitSlaveResponse) String() string { return proto.CompactTextString(m) } +func (*InitSlaveResponse) ProtoMessage() {} +func (*InitSlaveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{103} +} + +func (m *InitSlaveResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitSlaveResponse.Unmarshal(m, b) +} +func (m *InitSlaveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitSlaveResponse.Marshal(b, m, deterministic) +} +func (m *InitSlaveResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitSlaveResponse.Merge(m, src) +} +func (m *InitSlaveResponse) XXX_Size() int { + return xxx_messageInfo_InitSlaveResponse.Size(m) +} +func (m *InitSlaveResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InitSlaveResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InitSlaveResponse proto.InternalMessageInfo + +// Deprecated +type SlaveWasPromotedRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SlaveWasPromotedRequest) Reset() { *m = SlaveWasPromotedRequest{} } +func (m *SlaveWasPromotedRequest) String() string { return proto.CompactTextString(m) } +func (*SlaveWasPromotedRequest) ProtoMessage() {} +func (*SlaveWasPromotedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{104} +} + +func (m *SlaveWasPromotedRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SlaveWasPromotedRequest.Unmarshal(m, b) +} +func (m *SlaveWasPromotedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SlaveWasPromotedRequest.Marshal(b, m, deterministic) +} +func (m *SlaveWasPromotedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SlaveWasPromotedRequest.Merge(m, src) +} +func (m *SlaveWasPromotedRequest) XXX_Size() int { + return xxx_messageInfo_SlaveWasPromotedRequest.Size(m) +} +func (m *SlaveWasPromotedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SlaveWasPromotedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SlaveWasPromotedRequest proto.InternalMessageInfo + +// Deprecated +type SlaveWasPromotedResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SlaveWasPromotedResponse) Reset() { *m = SlaveWasPromotedResponse{} } +func (m *SlaveWasPromotedResponse) String() string { return proto.CompactTextString(m) } +func (*SlaveWasPromotedResponse) ProtoMessage() {} +func (*SlaveWasPromotedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{105} +} + +func (m *SlaveWasPromotedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SlaveWasPromotedResponse.Unmarshal(m, b) +} +func (m *SlaveWasPromotedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SlaveWasPromotedResponse.Marshal(b, m, deterministic) +} +func (m *SlaveWasPromotedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SlaveWasPromotedResponse.Merge(m, src) +} +func (m *SlaveWasPromotedResponse) XXX_Size() int { + return xxx_messageInfo_SlaveWasPromotedResponse.Size(m) +} +func (m *SlaveWasPromotedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SlaveWasPromotedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SlaveWasPromotedResponse proto.InternalMessageInfo + +// Deprecated +type SlaveWasRestartedRequest struct { + // the parent alias the tablet should have + Parent *topodata.TabletAlias `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SlaveWasRestartedRequest) Reset() { *m = SlaveWasRestartedRequest{} } +func (m *SlaveWasRestartedRequest) String() string { return proto.CompactTextString(m) } +func (*SlaveWasRestartedRequest) ProtoMessage() {} +func (*SlaveWasRestartedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{106} +} + +func (m *SlaveWasRestartedRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SlaveWasRestartedRequest.Unmarshal(m, b) +} +func (m *SlaveWasRestartedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SlaveWasRestartedRequest.Marshal(b, m, deterministic) +} +func (m *SlaveWasRestartedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SlaveWasRestartedRequest.Merge(m, src) +} +func (m *SlaveWasRestartedRequest) XXX_Size() int { + return xxx_messageInfo_SlaveWasRestartedRequest.Size(m) +} +func (m *SlaveWasRestartedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SlaveWasRestartedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SlaveWasRestartedRequest proto.InternalMessageInfo + +func (m *SlaveWasRestartedRequest) GetParent() *topodata.TabletAlias { + if m != nil { + return m.Parent + } + return nil +} + +// Deprecated +type SlaveWasRestartedResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SlaveWasRestartedResponse) Reset() { *m = SlaveWasRestartedResponse{} } +func (m *SlaveWasRestartedResponse) String() string { return proto.CompactTextString(m) } +func (*SlaveWasRestartedResponse) ProtoMessage() {} +func (*SlaveWasRestartedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9ac4f89e61ffa4, []int{107} +} + +func (m *SlaveWasRestartedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SlaveWasRestartedResponse.Unmarshal(m, b) +} +func (m *SlaveWasRestartedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SlaveWasRestartedResponse.Marshal(b, m, deterministic) +} +func (m *SlaveWasRestartedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SlaveWasRestartedResponse.Merge(m, src) +} +func (m *SlaveWasRestartedResponse) XXX_Size() int { + return xxx_messageInfo_SlaveWasRestartedResponse.Size(m) +} +func (m *SlaveWasRestartedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SlaveWasRestartedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SlaveWasRestartedResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*TableDefinition)(nil), "tabletmanagerdata.TableDefinition") + proto.RegisterType((*SchemaDefinition)(nil), "tabletmanagerdata.SchemaDefinition") + proto.RegisterType((*SchemaChangeResult)(nil), "tabletmanagerdata.SchemaChangeResult") + proto.RegisterType((*UserPermission)(nil), "tabletmanagerdata.UserPermission") + proto.RegisterMapType((map[string]string)(nil), "tabletmanagerdata.UserPermission.PrivilegesEntry") + proto.RegisterType((*DbPermission)(nil), "tabletmanagerdata.DbPermission") + proto.RegisterMapType((map[string]string)(nil), "tabletmanagerdata.DbPermission.PrivilegesEntry") + proto.RegisterType((*Permissions)(nil), "tabletmanagerdata.Permissions") + proto.RegisterType((*PingRequest)(nil), "tabletmanagerdata.PingRequest") + proto.RegisterType((*PingResponse)(nil), "tabletmanagerdata.PingResponse") + proto.RegisterType((*SleepRequest)(nil), "tabletmanagerdata.SleepRequest") + proto.RegisterType((*SleepResponse)(nil), "tabletmanagerdata.SleepResponse") + proto.RegisterType((*ExecuteHookRequest)(nil), "tabletmanagerdata.ExecuteHookRequest") + proto.RegisterMapType((map[string]string)(nil), "tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry") + proto.RegisterType((*ExecuteHookResponse)(nil), "tabletmanagerdata.ExecuteHookResponse") + proto.RegisterType((*GetSchemaRequest)(nil), "tabletmanagerdata.GetSchemaRequest") + proto.RegisterType((*GetSchemaResponse)(nil), "tabletmanagerdata.GetSchemaResponse") + proto.RegisterType((*GetPermissionsRequest)(nil), "tabletmanagerdata.GetPermissionsRequest") + proto.RegisterType((*GetPermissionsResponse)(nil), "tabletmanagerdata.GetPermissionsResponse") + proto.RegisterType((*SetReadOnlyRequest)(nil), "tabletmanagerdata.SetReadOnlyRequest") + proto.RegisterType((*SetReadOnlyResponse)(nil), "tabletmanagerdata.SetReadOnlyResponse") + proto.RegisterType((*SetReadWriteRequest)(nil), "tabletmanagerdata.SetReadWriteRequest") + proto.RegisterType((*SetReadWriteResponse)(nil), "tabletmanagerdata.SetReadWriteResponse") + proto.RegisterType((*ChangeTypeRequest)(nil), "tabletmanagerdata.ChangeTypeRequest") + proto.RegisterType((*ChangeTypeResponse)(nil), "tabletmanagerdata.ChangeTypeResponse") + proto.RegisterType((*RefreshStateRequest)(nil), "tabletmanagerdata.RefreshStateRequest") + proto.RegisterType((*RefreshStateResponse)(nil), "tabletmanagerdata.RefreshStateResponse") + proto.RegisterType((*RunHealthCheckRequest)(nil), "tabletmanagerdata.RunHealthCheckRequest") + proto.RegisterType((*RunHealthCheckResponse)(nil), "tabletmanagerdata.RunHealthCheckResponse") + proto.RegisterType((*IgnoreHealthErrorRequest)(nil), "tabletmanagerdata.IgnoreHealthErrorRequest") + proto.RegisterType((*IgnoreHealthErrorResponse)(nil), "tabletmanagerdata.IgnoreHealthErrorResponse") + proto.RegisterType((*ReloadSchemaRequest)(nil), "tabletmanagerdata.ReloadSchemaRequest") + proto.RegisterType((*ReloadSchemaResponse)(nil), "tabletmanagerdata.ReloadSchemaResponse") + proto.RegisterType((*PreflightSchemaRequest)(nil), "tabletmanagerdata.PreflightSchemaRequest") + proto.RegisterType((*PreflightSchemaResponse)(nil), "tabletmanagerdata.PreflightSchemaResponse") + proto.RegisterType((*ApplySchemaRequest)(nil), "tabletmanagerdata.ApplySchemaRequest") + proto.RegisterType((*ApplySchemaResponse)(nil), "tabletmanagerdata.ApplySchemaResponse") + proto.RegisterType((*LockTablesRequest)(nil), "tabletmanagerdata.LockTablesRequest") + proto.RegisterType((*LockTablesResponse)(nil), "tabletmanagerdata.LockTablesResponse") + proto.RegisterType((*UnlockTablesRequest)(nil), "tabletmanagerdata.UnlockTablesRequest") + proto.RegisterType((*UnlockTablesResponse)(nil), "tabletmanagerdata.UnlockTablesResponse") + proto.RegisterType((*ExecuteFetchAsDbaRequest)(nil), "tabletmanagerdata.ExecuteFetchAsDbaRequest") + proto.RegisterType((*ExecuteFetchAsDbaResponse)(nil), "tabletmanagerdata.ExecuteFetchAsDbaResponse") + proto.RegisterType((*ExecuteFetchAsAllPrivsRequest)(nil), "tabletmanagerdata.ExecuteFetchAsAllPrivsRequest") + proto.RegisterType((*ExecuteFetchAsAllPrivsResponse)(nil), "tabletmanagerdata.ExecuteFetchAsAllPrivsResponse") + proto.RegisterType((*ExecuteFetchAsAppRequest)(nil), "tabletmanagerdata.ExecuteFetchAsAppRequest") + proto.RegisterType((*ExecuteFetchAsAppResponse)(nil), "tabletmanagerdata.ExecuteFetchAsAppResponse") + proto.RegisterType((*ReplicationStatusRequest)(nil), "tabletmanagerdata.ReplicationStatusRequest") + proto.RegisterType((*ReplicationStatusResponse)(nil), "tabletmanagerdata.ReplicationStatusResponse") + proto.RegisterType((*MasterPositionRequest)(nil), "tabletmanagerdata.MasterPositionRequest") + proto.RegisterType((*MasterPositionResponse)(nil), "tabletmanagerdata.MasterPositionResponse") + proto.RegisterType((*WaitForPositionRequest)(nil), "tabletmanagerdata.WaitForPositionRequest") + proto.RegisterType((*WaitForPositionResponse)(nil), "tabletmanagerdata.WaitForPositionResponse") + proto.RegisterType((*StopReplicationRequest)(nil), "tabletmanagerdata.StopReplicationRequest") + proto.RegisterType((*StopReplicationResponse)(nil), "tabletmanagerdata.StopReplicationResponse") + proto.RegisterType((*StopReplicationMinimumRequest)(nil), "tabletmanagerdata.StopReplicationMinimumRequest") + proto.RegisterType((*StopReplicationMinimumResponse)(nil), "tabletmanagerdata.StopReplicationMinimumResponse") + proto.RegisterType((*StartReplicationRequest)(nil), "tabletmanagerdata.StartReplicationRequest") + proto.RegisterType((*StartReplicationResponse)(nil), "tabletmanagerdata.StartReplicationResponse") + proto.RegisterType((*StartReplicationUntilAfterRequest)(nil), "tabletmanagerdata.StartReplicationUntilAfterRequest") + proto.RegisterType((*StartReplicationUntilAfterResponse)(nil), "tabletmanagerdata.StartReplicationUntilAfterResponse") + proto.RegisterType((*GetReplicasRequest)(nil), "tabletmanagerdata.GetReplicasRequest") + proto.RegisterType((*GetReplicasResponse)(nil), "tabletmanagerdata.GetReplicasResponse") + proto.RegisterType((*ResetReplicationRequest)(nil), "tabletmanagerdata.ResetReplicationRequest") + proto.RegisterType((*ResetReplicationResponse)(nil), "tabletmanagerdata.ResetReplicationResponse") + proto.RegisterType((*VReplicationExecRequest)(nil), "tabletmanagerdata.VReplicationExecRequest") + proto.RegisterType((*VReplicationExecResponse)(nil), "tabletmanagerdata.VReplicationExecResponse") + proto.RegisterType((*VReplicationWaitForPosRequest)(nil), "tabletmanagerdata.VReplicationWaitForPosRequest") + proto.RegisterType((*VReplicationWaitForPosResponse)(nil), "tabletmanagerdata.VReplicationWaitForPosResponse") + proto.RegisterType((*InitMasterRequest)(nil), "tabletmanagerdata.InitMasterRequest") + proto.RegisterType((*InitMasterResponse)(nil), "tabletmanagerdata.InitMasterResponse") + proto.RegisterType((*PopulateReparentJournalRequest)(nil), "tabletmanagerdata.PopulateReparentJournalRequest") + proto.RegisterType((*PopulateReparentJournalResponse)(nil), "tabletmanagerdata.PopulateReparentJournalResponse") + proto.RegisterType((*InitReplicaRequest)(nil), "tabletmanagerdata.InitReplicaRequest") + proto.RegisterType((*InitReplicaResponse)(nil), "tabletmanagerdata.InitReplicaResponse") + proto.RegisterType((*DemoteMasterRequest)(nil), "tabletmanagerdata.DemoteMasterRequest") + proto.RegisterType((*DemoteMasterResponse)(nil), "tabletmanagerdata.DemoteMasterResponse") + proto.RegisterType((*UndoDemoteMasterRequest)(nil), "tabletmanagerdata.UndoDemoteMasterRequest") + proto.RegisterType((*UndoDemoteMasterResponse)(nil), "tabletmanagerdata.UndoDemoteMasterResponse") + proto.RegisterType((*ReplicaWasPromotedRequest)(nil), "tabletmanagerdata.ReplicaWasPromotedRequest") + proto.RegisterType((*ReplicaWasPromotedResponse)(nil), "tabletmanagerdata.ReplicaWasPromotedResponse") + proto.RegisterType((*SetMasterRequest)(nil), "tabletmanagerdata.SetMasterRequest") + proto.RegisterType((*SetMasterResponse)(nil), "tabletmanagerdata.SetMasterResponse") + proto.RegisterType((*ReplicaWasRestartedRequest)(nil), "tabletmanagerdata.ReplicaWasRestartedRequest") + proto.RegisterType((*ReplicaWasRestartedResponse)(nil), "tabletmanagerdata.ReplicaWasRestartedResponse") + proto.RegisterType((*StopReplicationAndGetStatusRequest)(nil), "tabletmanagerdata.StopReplicationAndGetStatusRequest") + proto.RegisterType((*StopReplicationAndGetStatusResponse)(nil), "tabletmanagerdata.StopReplicationAndGetStatusResponse") + proto.RegisterType((*PromoteReplicaRequest)(nil), "tabletmanagerdata.PromoteReplicaRequest") + proto.RegisterType((*PromoteReplicaResponse)(nil), "tabletmanagerdata.PromoteReplicaResponse") + proto.RegisterType((*BackupRequest)(nil), "tabletmanagerdata.BackupRequest") + proto.RegisterType((*BackupResponse)(nil), "tabletmanagerdata.BackupResponse") + proto.RegisterType((*RestoreFromBackupRequest)(nil), "tabletmanagerdata.RestoreFromBackupRequest") + proto.RegisterType((*RestoreFromBackupResponse)(nil), "tabletmanagerdata.RestoreFromBackupResponse") + proto.RegisterType((*SlaveStatusRequest)(nil), "tabletmanagerdata.SlaveStatusRequest") + proto.RegisterType((*SlaveStatusResponse)(nil), "tabletmanagerdata.SlaveStatusResponse") + proto.RegisterType((*StopSlaveRequest)(nil), "tabletmanagerdata.StopSlaveRequest") + proto.RegisterType((*StopSlaveResponse)(nil), "tabletmanagerdata.StopSlaveResponse") + proto.RegisterType((*StopSlaveMinimumRequest)(nil), "tabletmanagerdata.StopSlaveMinimumRequest") + proto.RegisterType((*StopSlaveMinimumResponse)(nil), "tabletmanagerdata.StopSlaveMinimumResponse") + proto.RegisterType((*StartSlaveRequest)(nil), "tabletmanagerdata.StartSlaveRequest") + proto.RegisterType((*StartSlaveResponse)(nil), "tabletmanagerdata.StartSlaveResponse") + proto.RegisterType((*StartSlaveUntilAfterRequest)(nil), "tabletmanagerdata.StartSlaveUntilAfterRequest") + proto.RegisterType((*StartSlaveUntilAfterResponse)(nil), "tabletmanagerdata.StartSlaveUntilAfterResponse") + proto.RegisterType((*GetSlavesRequest)(nil), "tabletmanagerdata.GetSlavesRequest") + proto.RegisterType((*GetSlavesResponse)(nil), "tabletmanagerdata.GetSlavesResponse") + proto.RegisterType((*InitSlaveRequest)(nil), "tabletmanagerdata.InitSlaveRequest") + proto.RegisterType((*InitSlaveResponse)(nil), "tabletmanagerdata.InitSlaveResponse") + proto.RegisterType((*SlaveWasPromotedRequest)(nil), "tabletmanagerdata.SlaveWasPromotedRequest") + proto.RegisterType((*SlaveWasPromotedResponse)(nil), "tabletmanagerdata.SlaveWasPromotedResponse") + proto.RegisterType((*SlaveWasRestartedRequest)(nil), "tabletmanagerdata.SlaveWasRestartedRequest") + proto.RegisterType((*SlaveWasRestartedResponse)(nil), "tabletmanagerdata.SlaveWasRestartedResponse") +} + +func init() { proto.RegisterFile("tabletmanagerdata.proto", fileDescriptor_ff9ac4f89e61ffa4) } + +var fileDescriptor_ff9ac4f89e61ffa4 = []byte{ + // 2154 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, + 0x15, 0x06, 0xa9, 0x1f, 0x4b, 0x87, 0x3f, 0xa2, 0x56, 0x94, 0x48, 0x51, 0xb1, 0x2c, 0xaf, 0x9d, + 0xc6, 0x4d, 0x50, 0x2a, 0x51, 0x52, 0x23, 0x48, 0x5b, 0xa0, 0xb2, 0x2d, 0xd9, 0x8e, 0x95, 0x58, + 0x59, 0xf9, 0xa7, 0x08, 0x8a, 0x2e, 0x86, 0xdc, 0x11, 0xb5, 0xd0, 0x72, 0x67, 0x3d, 0x33, 0x4b, + 0x89, 0x2f, 0xd1, 0x27, 0x28, 0x7a, 0x53, 0xa0, 0xbd, 0xef, 0x43, 0xf4, 0x11, 0xd2, 0x47, 0xe9, + 0x45, 0x2f, 0x5a, 0xcc, 0xcc, 0x59, 0x72, 0x97, 0x5c, 0xc9, 0xb2, 0xa3, 0x00, 0xb9, 0x11, 0x76, + 0xbe, 0xf3, 0x7f, 0xe6, 0xcc, 0x99, 0x33, 0x14, 0x34, 0x24, 0xe9, 0x04, 0x54, 0xf6, 0x49, 0x48, + 0x7a, 0x94, 0x7b, 0x44, 0x92, 0x76, 0xc4, 0x99, 0x64, 0xd6, 0xf2, 0x14, 0xa1, 0x55, 0x7a, 0x13, + 0x53, 0x3e, 0x34, 0xf4, 0x56, 0x55, 0xb2, 0x88, 0x8d, 0xf9, 0x5b, 0xab, 0x9c, 0x46, 0x81, 0xdf, + 0x25, 0xd2, 0x67, 0x61, 0x0a, 0xae, 0x04, 0xac, 0x17, 0x4b, 0x3f, 0x30, 0x4b, 0xfb, 0x7f, 0x05, + 0x58, 0x7a, 0xa1, 0x14, 0x3f, 0xa2, 0xc7, 0x7e, 0xe8, 0x2b, 0x66, 0xcb, 0x82, 0xd9, 0x90, 0xf4, + 0x69, 0xb3, 0xb0, 0x55, 0xb8, 0xb7, 0xe8, 0xe8, 0x6f, 0x6b, 0x0d, 0xe6, 0x45, 0xf7, 0x84, 0xf6, + 0x49, 0xb3, 0xa8, 0x51, 0x5c, 0x59, 0x4d, 0xb8, 0xd1, 0x65, 0x41, 0xdc, 0x0f, 0x45, 0x73, 0x66, + 0x6b, 0xe6, 0xde, 0xa2, 0x93, 0x2c, 0xad, 0x36, 0xac, 0x44, 0xdc, 0xef, 0x13, 0x3e, 0x74, 0x4f, + 0xe9, 0xd0, 0x4d, 0xb8, 0x66, 0x35, 0xd7, 0x32, 0x92, 0x9e, 0xd1, 0xe1, 0x43, 0xe4, 0xb7, 0x60, + 0x56, 0x0e, 0x23, 0xda, 0x9c, 0x33, 0x56, 0xd5, 0xb7, 0x75, 0x0b, 0x4a, 0xca, 0x75, 0x37, 0xa0, + 0x61, 0x4f, 0x9e, 0x34, 0xe7, 0xb7, 0x0a, 0xf7, 0x66, 0x1d, 0x50, 0xd0, 0x81, 0x46, 0xac, 0x0d, + 0x58, 0xe4, 0xec, 0xcc, 0xed, 0xb2, 0x38, 0x94, 0xcd, 0x1b, 0x9a, 0xbc, 0xc0, 0xd9, 0xd9, 0x43, + 0xb5, 0xb6, 0xee, 0xc2, 0xfc, 0xb1, 0x4f, 0x03, 0x4f, 0x34, 0x17, 0xb6, 0x66, 0xee, 0x95, 0x76, + 0xca, 0x6d, 0x93, 0xaf, 0x7d, 0x05, 0x3a, 0x48, 0xb3, 0xff, 0x5e, 0x80, 0xda, 0x91, 0x0e, 0x26, + 0x95, 0x82, 0x8f, 0x60, 0x49, 0x59, 0xe9, 0x10, 0x41, 0x5d, 0x8c, 0xdb, 0x64, 0xa3, 0x9a, 0xc0, + 0x46, 0xc4, 0x7a, 0x0e, 0x66, 0x5f, 0x5c, 0x6f, 0x24, 0x2c, 0x9a, 0x45, 0x6d, 0xce, 0x6e, 0x4f, + 0x6f, 0xe5, 0x44, 0xaa, 0x9d, 0x9a, 0xcc, 0x02, 0x42, 0x25, 0x74, 0x40, 0xb9, 0xf0, 0x59, 0xd8, + 0x9c, 0xd1, 0x16, 0x93, 0xa5, 0x72, 0xd4, 0x32, 0x56, 0x1f, 0x9e, 0x90, 0xb0, 0x47, 0x1d, 0x2a, + 0xe2, 0x40, 0x5a, 0x4f, 0xa0, 0xd2, 0xa1, 0xc7, 0x8c, 0x67, 0x1c, 0x2d, 0xed, 0xdc, 0xc9, 0xb1, + 0x3e, 0x19, 0xa6, 0x53, 0x36, 0x92, 0x18, 0xcb, 0x3e, 0x94, 0xc9, 0xb1, 0xa4, 0xdc, 0x4d, 0xed, + 0xf4, 0x15, 0x15, 0x95, 0xb4, 0xa0, 0x81, 0xed, 0xff, 0x14, 0xa0, 0xfa, 0x52, 0x50, 0x7e, 0x48, + 0x79, 0xdf, 0x17, 0x02, 0x4b, 0xea, 0x84, 0x09, 0x99, 0x94, 0x94, 0xfa, 0x56, 0x58, 0x2c, 0x28, + 0xc7, 0x82, 0xd2, 0xdf, 0xd6, 0x27, 0xb0, 0x1c, 0x11, 0x21, 0xce, 0x18, 0xf7, 0xdc, 0xee, 0x09, + 0xed, 0x9e, 0x8a, 0xb8, 0xaf, 0xf3, 0x30, 0xeb, 0xd4, 0x12, 0xc2, 0x43, 0xc4, 0xad, 0xef, 0x00, + 0x22, 0xee, 0x0f, 0xfc, 0x80, 0xf6, 0xa8, 0x29, 0xac, 0xd2, 0xce, 0x67, 0x39, 0xde, 0x66, 0x7d, + 0x69, 0x1f, 0x8e, 0x64, 0xf6, 0x42, 0xc9, 0x87, 0x4e, 0x4a, 0x49, 0xeb, 0x77, 0xb0, 0x34, 0x41, + 0xb6, 0x6a, 0x30, 0x73, 0x4a, 0x87, 0xe8, 0xb9, 0xfa, 0xb4, 0xea, 0x30, 0x37, 0x20, 0x41, 0x4c, + 0xd1, 0x73, 0xb3, 0xf8, 0xaa, 0xf8, 0x65, 0xc1, 0xfe, 0xa1, 0x00, 0xe5, 0x47, 0x9d, 0xb7, 0xc4, + 0x5d, 0x85, 0xa2, 0xd7, 0x41, 0xd9, 0xa2, 0xd7, 0x19, 0xe5, 0x61, 0x26, 0x95, 0x87, 0xe7, 0x39, + 0xa1, 0x6d, 0xe7, 0x84, 0x96, 0x36, 0xf6, 0x53, 0x06, 0xf6, 0xb7, 0x02, 0x94, 0xc6, 0x96, 0x84, + 0x75, 0x00, 0x35, 0xe5, 0xa7, 0x1b, 0x8d, 0xb1, 0x66, 0x41, 0x7b, 0x79, 0xfb, 0xad, 0x1b, 0xe0, + 0x2c, 0xc5, 0x99, 0xb5, 0xb0, 0xf6, 0xa1, 0xea, 0x75, 0x32, 0xba, 0xcc, 0x09, 0xba, 0xf5, 0x96, + 0x88, 0x9d, 0x8a, 0x97, 0x5a, 0x09, 0xfb, 0x23, 0x28, 0x1d, 0xfa, 0x61, 0xcf, 0xa1, 0x6f, 0x62, + 0x2a, 0xa4, 0x3a, 0x4a, 0x11, 0x19, 0x06, 0x8c, 0x78, 0x18, 0x64, 0xb2, 0xb4, 0xef, 0x41, 0xd9, + 0x30, 0x8a, 0x88, 0x85, 0x82, 0x5e, 0xc2, 0xf9, 0x31, 0x94, 0x8f, 0x02, 0x4a, 0xa3, 0x44, 0x67, + 0x0b, 0x16, 0xbc, 0x98, 0xeb, 0xa6, 0xaa, 0x59, 0x67, 0x9c, 0xd1, 0xda, 0x5e, 0x82, 0x0a, 0xf2, + 0x1a, 0xb5, 0xf6, 0xbf, 0x0b, 0x60, 0xed, 0x9d, 0xd3, 0x6e, 0x2c, 0xe9, 0x13, 0xc6, 0x4e, 0x13, + 0x1d, 0x79, 0xfd, 0x75, 0x13, 0x20, 0x22, 0x9c, 0xf4, 0xa9, 0xa4, 0xdc, 0x84, 0xbf, 0xe8, 0xa4, + 0x10, 0xeb, 0x10, 0x16, 0xe9, 0xb9, 0xe4, 0xc4, 0xa5, 0xe1, 0x40, 0x77, 0xda, 0xd2, 0xce, 0xe7, + 0x39, 0xd9, 0x99, 0xb6, 0xd6, 0xde, 0x53, 0x62, 0x7b, 0xe1, 0xc0, 0xd4, 0xc4, 0x02, 0xc5, 0x65, + 0xeb, 0x37, 0x50, 0xc9, 0x90, 0xde, 0xa9, 0x1e, 0x8e, 0x61, 0x25, 0x63, 0x0a, 0xf3, 0x78, 0x0b, + 0x4a, 0xf4, 0xdc, 0x97, 0xae, 0x90, 0x44, 0xc6, 0x02, 0x13, 0x04, 0x0a, 0x3a, 0xd2, 0x88, 0xbe, + 0x46, 0xa4, 0xc7, 0x62, 0x39, 0xba, 0x46, 0xf4, 0x0a, 0x71, 0xca, 0x93, 0x53, 0x80, 0x2b, 0x7b, + 0x00, 0xb5, 0xc7, 0x54, 0x9a, 0xbe, 0x92, 0xa4, 0x6f, 0x0d, 0xe6, 0x75, 0xe0, 0xa6, 0xe2, 0x16, + 0x1d, 0x5c, 0x59, 0x77, 0xa0, 0xe2, 0x87, 0xdd, 0x20, 0xf6, 0xa8, 0x3b, 0xf0, 0xe9, 0x99, 0xd0, + 0x26, 0x16, 0x9c, 0x32, 0x82, 0xaf, 0x14, 0x66, 0x7d, 0x08, 0x55, 0x7a, 0x6e, 0x98, 0x50, 0x89, + 0xb9, 0xb6, 0x2a, 0x88, 0xea, 0x06, 0x2d, 0x6c, 0x0a, 0xcb, 0x29, 0xbb, 0x18, 0xdd, 0x21, 0x2c, + 0x9b, 0xce, 0x98, 0x6a, 0xf6, 0xef, 0xd2, 0x6d, 0x6b, 0x62, 0x02, 0xb1, 0x1b, 0xb0, 0xfa, 0x98, + 0xca, 0x54, 0x09, 0x63, 0x8c, 0xf6, 0xf7, 0xb0, 0x36, 0x49, 0x40, 0x27, 0x7e, 0x0f, 0xa5, 0xec, + 0xa1, 0x53, 0xe6, 0x37, 0x73, 0xcc, 0xa7, 0x85, 0xd3, 0x22, 0x76, 0x1d, 0xac, 0x23, 0x2a, 0x1d, + 0x4a, 0xbc, 0xe7, 0x61, 0x30, 0x4c, 0x2c, 0xae, 0xc2, 0x4a, 0x06, 0xc5, 0x12, 0x1e, 0xc3, 0xaf, + 0xb9, 0x2f, 0x69, 0xc2, 0xbd, 0x06, 0xf5, 0x2c, 0x8c, 0xec, 0x5f, 0xc3, 0xb2, 0xb9, 0x9c, 0x5e, + 0x0c, 0xa3, 0x84, 0xd9, 0xfa, 0x35, 0x94, 0x8c, 0x7b, 0xae, 0xbe, 0xe0, 0x95, 0xcb, 0xd5, 0x9d, + 0x7a, 0x7b, 0x34, 0xaf, 0xe8, 0x9c, 0x4b, 0x2d, 0x01, 0x72, 0xf4, 0xad, 0xfc, 0x4c, 0xeb, 0x1a, + 0x3b, 0xe4, 0xd0, 0x63, 0x4e, 0xc5, 0x89, 0x2a, 0xa9, 0xb4, 0x43, 0x59, 0x18, 0xd9, 0x1b, 0xb0, + 0xea, 0xc4, 0xe1, 0x13, 0x4a, 0x02, 0x79, 0xa2, 0x2f, 0x8e, 0x44, 0xa0, 0x09, 0x6b, 0x93, 0x04, + 0x14, 0xf9, 0x02, 0x9a, 0x4f, 0x7b, 0x21, 0xe3, 0xd4, 0x10, 0xf7, 0x38, 0x67, 0x3c, 0xd3, 0x52, + 0xa4, 0xa4, 0x3c, 0x1c, 0x37, 0x0a, 0xbd, 0xb4, 0x37, 0x60, 0x3d, 0x47, 0x0a, 0x55, 0x7e, 0xa5, + 0x9c, 0x56, 0xfd, 0x24, 0x5b, 0xc9, 0x77, 0xa0, 0x72, 0x46, 0x7c, 0xe9, 0x46, 0x4c, 0x8c, 0x8b, + 0x69, 0xd1, 0x29, 0x2b, 0xf0, 0x10, 0x31, 0x13, 0x59, 0x5a, 0x16, 0x75, 0xee, 0xc0, 0xda, 0x21, + 0xa7, 0xc7, 0x81, 0xdf, 0x3b, 0x99, 0x38, 0x20, 0x6a, 0x26, 0xd3, 0x89, 0x4b, 0x4e, 0x48, 0xb2, + 0xb4, 0x7b, 0xd0, 0x98, 0x92, 0xc1, 0xba, 0x3a, 0x80, 0xaa, 0xe1, 0x72, 0xb9, 0x9e, 0x2b, 0x92, + 0x7e, 0xfe, 0xe1, 0x85, 0x95, 0x9d, 0x9e, 0x42, 0x9c, 0x4a, 0x37, 0xb5, 0x12, 0xf6, 0x7f, 0x0b, + 0x60, 0xed, 0x46, 0x51, 0x30, 0xcc, 0x7a, 0x56, 0x83, 0x19, 0xf1, 0x26, 0x48, 0x5a, 0x8c, 0x78, + 0x13, 0xa8, 0x16, 0x73, 0xcc, 0x78, 0x97, 0xe2, 0x61, 0x35, 0x0b, 0x35, 0x06, 0x90, 0x20, 0x60, + 0x67, 0x6e, 0x6a, 0x86, 0xd5, 0x9d, 0x61, 0xc1, 0xa9, 0x69, 0x82, 0x33, 0xc6, 0xa7, 0x07, 0xa0, + 0xd9, 0xeb, 0x1a, 0x80, 0xe6, 0xde, 0x73, 0x00, 0xfa, 0x47, 0x01, 0x56, 0x32, 0xd1, 0x63, 0x8e, + 0x7f, 0x7e, 0xa3, 0xda, 0x0a, 0x2c, 0x1f, 0xb0, 0xee, 0xa9, 0xe9, 0x7a, 0xc9, 0xd1, 0xa8, 0x83, + 0x95, 0x06, 0xc7, 0x07, 0xef, 0x65, 0x18, 0x4c, 0x31, 0xaf, 0x41, 0x3d, 0x0b, 0x23, 0xfb, 0x3f, + 0x0b, 0xd0, 0xc4, 0x2b, 0x62, 0x9f, 0xca, 0xee, 0xc9, 0xae, 0x78, 0xd4, 0x19, 0xd5, 0x41, 0x1d, + 0xe6, 0xf4, 0x28, 0xae, 0x13, 0x50, 0x76, 0xcc, 0xc2, 0x6a, 0xc0, 0x0d, 0xaf, 0xe3, 0xea, 0xab, + 0x11, 0x6f, 0x07, 0xaf, 0xf3, 0xad, 0xba, 0x1c, 0xd7, 0x61, 0xa1, 0x4f, 0xce, 0x5d, 0xce, 0xce, + 0x04, 0x0e, 0x83, 0x37, 0xfa, 0xe4, 0xdc, 0x61, 0x67, 0x42, 0x0f, 0xea, 0xbe, 0xd0, 0x13, 0x78, + 0xc7, 0x0f, 0x03, 0xd6, 0x13, 0x7a, 0xfb, 0x17, 0x9c, 0x2a, 0xc2, 0x0f, 0x0c, 0xaa, 0xce, 0x1a, + 0xd7, 0xc7, 0x28, 0xbd, 0xb9, 0x0b, 0x4e, 0x99, 0xa7, 0xce, 0x96, 0xfd, 0x18, 0xd6, 0x73, 0x7c, + 0xc6, 0xdd, 0xfb, 0x18, 0xe6, 0xcd, 0xd1, 0xc0, 0x6d, 0xb3, 0xf0, 0x39, 0xf1, 0x9d, 0xfa, 0x8b, + 0xc7, 0x00, 0x39, 0xec, 0x3f, 0x17, 0xe0, 0x66, 0x56, 0xd3, 0x6e, 0x10, 0xa8, 0x01, 0x4c, 0x5c, + 0x7f, 0x0a, 0xa6, 0x22, 0x9b, 0xcd, 0x89, 0xec, 0x00, 0x36, 0x2f, 0xf2, 0xe7, 0x3d, 0xc2, 0x7b, + 0x36, 0xb9, 0xb7, 0xbb, 0x51, 0x74, 0x79, 0x60, 0x69, 0xff, 0x8b, 0x19, 0xff, 0xa7, 0x93, 0xae, + 0x95, 0xbd, 0x87, 0x57, 0x2d, 0x68, 0xa6, 0xfa, 0x82, 0x99, 0x38, 0x92, 0x32, 0x3d, 0x80, 0xf5, + 0x1c, 0x1a, 0x1a, 0xd9, 0x56, 0xd3, 0xc7, 0x68, 0x62, 0x29, 0xed, 0x34, 0xda, 0x93, 0x6f, 0x67, + 0x14, 0x40, 0x36, 0x75, 0xab, 0x7c, 0x43, 0x84, 0xa4, 0x3c, 0xe9, 0xd2, 0x89, 0x99, 0x2f, 0x60, + 0x6d, 0x92, 0x80, 0x36, 0x5a, 0xb0, 0x30, 0xd1, 0xe6, 0x47, 0x6b, 0x25, 0xf5, 0x9a, 0xf8, 0x72, + 0x9f, 0x4d, 0xea, 0xbb, 0x54, 0x6a, 0x1d, 0x1a, 0x53, 0x52, 0x78, 0xf8, 0x9a, 0xb0, 0x76, 0x24, + 0x59, 0x94, 0x8a, 0x38, 0x71, 0x70, 0x1d, 0x1a, 0x53, 0x14, 0x14, 0xfa, 0x13, 0xdc, 0x9c, 0x20, + 0x7d, 0xe3, 0x87, 0x7e, 0x3f, 0xee, 0x5f, 0xc1, 0x19, 0xeb, 0x36, 0xe8, 0x5b, 0xcb, 0x95, 0x7e, + 0x9f, 0x26, 0xe3, 0xdd, 0x8c, 0x53, 0x52, 0xd8, 0x0b, 0x03, 0xd9, 0xbf, 0x85, 0xcd, 0x8b, 0xf4, + 0x5f, 0x21, 0x47, 0xda, 0x71, 0xc2, 0x65, 0x4e, 0x4c, 0x2d, 0x68, 0x4e, 0x93, 0x30, 0xa8, 0x0e, + 0xdc, 0x9e, 0xa4, 0xbd, 0x0c, 0xa5, 0x1f, 0xec, 0xaa, 0x26, 0x78, 0x4d, 0x81, 0xdd, 0x05, 0xfb, + 0x32, 0x1b, 0xe8, 0x49, 0x1d, 0xac, 0xc7, 0x34, 0xe1, 0x19, 0xd5, 0xe5, 0x27, 0xb0, 0x92, 0x41, + 0x31, 0x13, 0x75, 0x98, 0x23, 0x9e, 0xc7, 0x93, 0x0b, 0xdc, 0x2c, 0x54, 0x0e, 0x1c, 0x2a, 0xe8, + 0x05, 0x39, 0x98, 0x26, 0xa1, 0xe5, 0x6d, 0x68, 0xbc, 0x4a, 0xe1, 0xea, 0xb0, 0xe5, 0x1e, 0xd6, + 0x45, 0x3c, 0xac, 0xf6, 0x3e, 0x34, 0xa7, 0x05, 0xde, 0xab, 0x4d, 0xdc, 0x4c, 0xeb, 0x19, 0x57, + 0x6b, 0x62, 0xbe, 0x0a, 0x45, 0xdf, 0xc3, 0x67, 0x42, 0xd1, 0xf7, 0x32, 0x1b, 0x51, 0x9c, 0x28, + 0x80, 0x2d, 0xd8, 0xbc, 0x48, 0x19, 0xc6, 0xb9, 0x02, 0xcb, 0x4f, 0x43, 0x5f, 0x9a, 0x03, 0x98, + 0x24, 0xe6, 0x53, 0xb0, 0xd2, 0xe0, 0x15, 0x2a, 0xed, 0x87, 0x02, 0x6c, 0x1e, 0xb2, 0x28, 0x0e, + 0xf4, 0x1c, 0x19, 0x11, 0x4e, 0x43, 0xf9, 0x35, 0x8b, 0x79, 0x48, 0x82, 0xc4, 0xef, 0x5f, 0xc0, + 0x92, 0xaa, 0x07, 0xb7, 0xcb, 0x29, 0x91, 0xd4, 0x73, 0xc3, 0xe4, 0xad, 0x53, 0x51, 0xf0, 0x43, + 0x83, 0x7e, 0x2b, 0xd4, 0x7b, 0x88, 0x74, 0x95, 0xd2, 0x74, 0x4b, 0x07, 0x03, 0xe9, 0xb6, 0xfe, + 0x25, 0x94, 0xfb, 0xda, 0x33, 0x97, 0x04, 0x3e, 0x31, 0xad, 0xbd, 0xb4, 0xb3, 0x3a, 0x39, 0x1b, + 0xef, 0x2a, 0xa2, 0x53, 0x32, 0xac, 0x7a, 0x61, 0x7d, 0x06, 0xf5, 0x54, 0x93, 0x1a, 0x8f, 0x90, + 0xb3, 0xda, 0xc6, 0x4a, 0x8a, 0x36, 0x9a, 0x24, 0x6f, 0xc3, 0xad, 0x0b, 0xe3, 0xc2, 0x14, 0xfe, + 0xb5, 0x60, 0xd2, 0x85, 0x89, 0x4e, 0xe2, 0xfd, 0x15, 0xcc, 0x1b, 0x7e, 0xdc, 0xf4, 0x0b, 0x1c, + 0x44, 0xa6, 0x0b, 0x7d, 0x2b, 0x5e, 0xe8, 0x5b, 0x5e, 0x46, 0x67, 0x72, 0x32, 0xaa, 0xa6, 0x90, + 0x8c, 0x7f, 0xe3, 0xe1, 0xe4, 0x11, 0xed, 0x33, 0x49, 0xb3, 0x9b, 0xbf, 0x03, 0xf5, 0x2c, 0x7c, + 0xb5, 0x46, 0xf3, 0x32, 0xf4, 0x58, 0x9e, 0xba, 0x16, 0x34, 0xa7, 0x49, 0xe8, 0xc1, 0xc6, 0xe8, + 0x82, 0x79, 0x4d, 0xc4, 0x21, 0x67, 0x8a, 0xc5, 0x4b, 0x04, 0x3f, 0x80, 0x56, 0x1e, 0x11, 0x45, + 0xff, 0x55, 0x80, 0xda, 0x11, 0xcd, 0xd6, 0xed, 0xbb, 0xa6, 0x3c, 0x27, 0x7f, 0xc5, 0xbc, 0x8a, + 0xbc, 0x0f, 0x0d, 0x3d, 0x62, 0xab, 0x27, 0x3a, 0x97, 0x39, 0xf3, 0xf5, 0xaa, 0x26, 0x4f, 0xf6, + 0xb3, 0xe9, 0xa7, 0xca, 0x6c, 0xce, 0x53, 0x65, 0x05, 0x96, 0x53, 0x71, 0x60, 0x74, 0xcf, 0xd2, + 0xb1, 0x3b, 0x54, 0xdb, 0x1d, 0x65, 0xe6, 0x1d, 0xc3, 0xb4, 0x6f, 0xc2, 0x46, 0xae, 0x32, 0xb4, + 0xa5, 0x3b, 0x71, 0xe6, 0x8a, 0xd9, 0x0d, 0x3d, 0xf5, 0x90, 0xcf, 0xcc, 0x02, 0xaf, 0xe0, 0xce, + 0xa5, 0x5c, 0x3f, 0x62, 0x2a, 0xc0, 0xbd, 0xcd, 0x1e, 0x1f, 0x75, 0xbf, 0x4f, 0x12, 0xae, 0x50, + 0x88, 0x47, 0x50, 0x79, 0x40, 0xba, 0xa7, 0xf1, 0x68, 0xb2, 0xda, 0x82, 0x52, 0x97, 0x85, 0xdd, + 0x98, 0x73, 0x1a, 0x76, 0x87, 0xd8, 0x71, 0xd2, 0x90, 0xe2, 0xd0, 0xcf, 0x23, 0xb3, 0x05, 0xf8, + 0xa6, 0x4a, 0x43, 0xf6, 0x7d, 0xa8, 0x26, 0x4a, 0xd1, 0x85, 0xbb, 0x30, 0x47, 0x07, 0xe3, 0x0d, + 0xa8, 0xb6, 0x93, 0x7f, 0x10, 0xec, 0x29, 0xd4, 0x31, 0x44, 0xbc, 0x5f, 0x24, 0xe3, 0x74, 0x9f, + 0xb3, 0x7e, 0xc6, 0x2f, 0x7b, 0x57, 0x95, 0xfe, 0x14, 0xed, 0x9d, 0xd4, 0xd7, 0xc1, 0x3a, 0x0a, + 0xc8, 0x80, 0x66, 0x37, 0x6a, 0x1f, 0x56, 0x32, 0xe8, 0xfb, 0x6e, 0x8c, 0x05, 0x35, 0xb5, 0xe1, + 0x5a, 0x57, 0xa2, 0x5b, 0xd5, 0xea, 0x18, 0xc3, 0xfa, 0xf9, 0x83, 0x99, 0x8e, 0x34, 0x78, 0xbd, + 0xc3, 0xcf, 0x7d, 0x35, 0xa3, 0x4c, 0x6a, 0xbe, 0x42, 0x11, 0x68, 0x37, 0x09, 0x97, 0x19, 0xdf, + 0x55, 0xb6, 0x52, 0x20, 0x3a, 0xff, 0x47, 0xd8, 0x18, 0xa3, 0xd7, 0x3e, 0xe4, 0x6c, 0xc2, 0x07, + 0xf9, 0xda, 0xd1, 0xba, 0x65, 0x7e, 0xa9, 0x53, 0xd4, 0xd1, 0xfe, 0xfd, 0xd2, 0xfc, 0x8a, 0x86, + 0xd8, 0xa5, 0xa3, 0xcd, 0x5f, 0x0a, 0x50, 0x53, 0x8d, 0x3d, 0x1d, 0xe7, 0xcf, 0xe8, 0xda, 0xc1, + 0xd1, 0x22, 0x9b, 0x70, 0x35, 0x92, 0x2a, 0x20, 0xa7, 0xe1, 0xab, 0x91, 0x74, 0x8a, 0x84, 0x62, + 0x4f, 0xc7, 0xb4, 0x1f, 0xdb, 0x0e, 0x37, 0x60, 0x3d, 0x47, 0x95, 0xb1, 0xf3, 0xe0, 0xd3, 0xef, + 0xdb, 0x03, 0x5f, 0x52, 0x21, 0xda, 0x3e, 0xdb, 0x36, 0x5f, 0xdb, 0x3d, 0xb6, 0x3d, 0x90, 0xdb, + 0xfa, 0x5f, 0x7f, 0xdb, 0x53, 0xbf, 0x15, 0x74, 0xe6, 0x35, 0xe1, 0xf3, 0xff, 0x07, 0x00, 0x00, + 0xff, 0xff, 0x00, 0xaf, 0x7c, 0xe6, 0x84, 0x1c, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go b/internal/stackql-parser-fork/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go new file mode 100644 index 00000000..06629fb0 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go @@ -0,0 +1,2117 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tabletmanagerservice.proto + +package tabletmanagerservice + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + tabletmanagerdata "github.com/stackql/stackql-parser/go/vt/proto/tabletmanagerdata" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("tabletmanagerservice.proto", fileDescriptor_9ee75fe63cfd9360) } + +var fileDescriptor_9ee75fe63cfd9360 = []byte{ + // 1102 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x5b, 0x6f, 0x23, 0x35, + 0x14, 0xc7, 0xa9, 0x04, 0x2b, 0x61, 0xee, 0x66, 0xc5, 0x4a, 0x45, 0xe2, 0xb6, 0x2d, 0x2c, 0xe9, + 0x92, 0xec, 0x85, 0xe5, 0x3d, 0x7b, 0x69, 0xb7, 0x68, 0x2b, 0x42, 0xd2, 0x52, 0x04, 0x12, 0x92, + 0x9b, 0x9c, 0x26, 0x43, 0x27, 0x63, 0x63, 0x3b, 0x11, 0x7d, 0x42, 0xe2, 0x15, 0x89, 0xaf, 0xc0, + 0x57, 0x45, 0x33, 0x19, 0x7b, 0x8e, 0x67, 0xce, 0x38, 0xd3, 0xb7, 0x28, 0xff, 0x9f, 0xcf, 0xdf, + 0xf6, 0x1c, 0x1f, 0x9f, 0x19, 0xb6, 0x6b, 0xc5, 0x45, 0x0a, 0x76, 0x29, 0x32, 0x31, 0x07, 0x6d, + 0x40, 0xaf, 0x93, 0x29, 0xf4, 0x95, 0x96, 0x56, 0xf2, 0xdb, 0x94, 0xb6, 0x7b, 0x27, 0xf8, 0x77, + 0x26, 0xac, 0xd8, 0xe0, 0x8f, 0xfe, 0xeb, 0xb1, 0x77, 0x4e, 0x0b, 0xed, 0x64, 0xa3, 0xf1, 0x63, + 0xf6, 0xfa, 0x28, 0xc9, 0xe6, 0xfc, 0x93, 0x7e, 0x73, 0x4c, 0x2e, 0x8c, 0xe1, 0x8f, 0x15, 0x18, + 0xbb, 0xfb, 0x69, 0xab, 0x6e, 0x94, 0xcc, 0x0c, 0x7c, 0xf1, 0x1a, 0x7f, 0xc5, 0xde, 0x98, 0xa4, + 0x00, 0x8a, 0x53, 0x6c, 0xa1, 0xb8, 0x60, 0x9f, 0xb5, 0x03, 0x3e, 0xda, 0x6f, 0xec, 0xad, 0x17, + 0x7f, 0xc2, 0x74, 0x65, 0xe1, 0xa5, 0x94, 0x57, 0x7c, 0x9f, 0x18, 0x82, 0x74, 0x17, 0xf9, 0xcb, + 0x6d, 0x98, 0x8f, 0xff, 0x33, 0x7b, 0xf3, 0x08, 0xec, 0x64, 0xba, 0x80, 0xa5, 0xe0, 0x77, 0x89, + 0x61, 0x5e, 0x75, 0xb1, 0xf7, 0xe2, 0x90, 0x8f, 0x3c, 0x67, 0xef, 0x1e, 0x81, 0x1d, 0x81, 0x5e, + 0x26, 0xc6, 0x24, 0x32, 0x33, 0xfc, 0x1e, 0x3d, 0x12, 0x21, 0xce, 0xe3, 0xeb, 0x0e, 0x24, 0xde, + 0xa2, 0x09, 0xd8, 0x31, 0x88, 0xd9, 0x0f, 0x59, 0x7a, 0x4d, 0x6e, 0x11, 0xd2, 0x63, 0x5b, 0x14, + 0x60, 0x3e, 0xbe, 0x60, 0x6f, 0x97, 0xc2, 0xb9, 0x4e, 0x2c, 0xf0, 0xc8, 0xc8, 0x02, 0x70, 0x0e, + 0x5f, 0x6d, 0xe5, 0xbc, 0xc5, 0xaf, 0x8c, 0x3d, 0x5b, 0x88, 0x6c, 0x0e, 0xa7, 0xd7, 0x0a, 0x38, + 0xb5, 0xc3, 0x95, 0xec, 0xc2, 0xef, 0x6f, 0xa1, 0xf0, 0xfc, 0xc7, 0x70, 0xa9, 0xc1, 0x2c, 0x26, + 0x56, 0xb4, 0xcc, 0x1f, 0x03, 0xb1, 0xf9, 0x87, 0x1c, 0x7e, 0xd6, 0xe3, 0x55, 0xf6, 0x12, 0x44, + 0x6a, 0x17, 0xcf, 0x16, 0x30, 0xbd, 0x22, 0x9f, 0x75, 0x88, 0xc4, 0x9e, 0x75, 0x9d, 0xf4, 0x46, + 0x8a, 0x7d, 0x70, 0x3c, 0xcf, 0xa4, 0x86, 0x8d, 0xfc, 0x42, 0x6b, 0xa9, 0xf9, 0x01, 0x11, 0xa1, + 0x41, 0x39, 0xbb, 0xfb, 0xdd, 0xe0, 0x70, 0xf7, 0x52, 0x29, 0x66, 0xe5, 0x19, 0xa1, 0x77, 0xaf, + 0x02, 0xe2, 0xbb, 0x87, 0x39, 0x6f, 0xf1, 0x3b, 0x7b, 0x6f, 0xa4, 0xe1, 0x32, 0x4d, 0xe6, 0x0b, + 0x77, 0x12, 0xa9, 0x4d, 0xa9, 0x31, 0xce, 0xa8, 0xd7, 0x05, 0xc5, 0x87, 0x65, 0xa8, 0x54, 0x7a, + 0x5d, 0xfa, 0x50, 0x49, 0x84, 0xf4, 0xd8, 0x61, 0x09, 0x30, 0x9c, 0xc9, 0xaf, 0xe4, 0xf4, 0xaa, + 0xa8, 0xae, 0x86, 0xcc, 0xe4, 0x4a, 0x8e, 0x65, 0x32, 0xa6, 0xf0, 0xb3, 0x38, 0xcb, 0xd2, 0x2a, + 0x3c, 0x35, 0x2d, 0x0c, 0xc4, 0x9e, 0x45, 0xc8, 0xe1, 0x04, 0x2b, 0x0b, 0xe5, 0x21, 0xd8, 0xe9, + 0x62, 0x68, 0x9e, 0x5f, 0x08, 0x32, 0xc1, 0x1a, 0x54, 0x2c, 0xc1, 0x08, 0xd8, 0x3b, 0xfe, 0xc5, + 0x3e, 0x0a, 0xe5, 0x61, 0x9a, 0x8e, 0x74, 0xb2, 0x36, 0xfc, 0xc1, 0xd6, 0x48, 0x0e, 0x75, 0xde, + 0x0f, 0x6f, 0x30, 0xa2, 0x7d, 0xc9, 0x43, 0xa5, 0x3a, 0x2c, 0x79, 0xa8, 0x54, 0xf7, 0x25, 0x17, + 0x30, 0x76, 0x1c, 0x83, 0x4a, 0x93, 0xa9, 0xb0, 0x89, 0xcc, 0xf2, 0x62, 0xb2, 0x32, 0xa4, 0x63, + 0x83, 0x8a, 0x39, 0x12, 0x30, 0x2e, 0x50, 0x27, 0xc2, 0x58, 0xd0, 0x23, 0x69, 0x92, 0x9c, 0x20, + 0x0b, 0x54, 0x88, 0xc4, 0x0a, 0x54, 0x9d, 0xc4, 0x67, 0xf9, 0x5c, 0x24, 0xf6, 0x50, 0x56, 0x4e, + 0xd4, 0xf8, 0x1a, 0x13, 0x3b, 0xcb, 0x0d, 0x14, 0x7b, 0x4d, 0xac, 0x54, 0x68, 0xdd, 0xa4, 0x57, + 0x8d, 0x89, 0x79, 0x35, 0x50, 0x9c, 0xa5, 0x35, 0xf1, 0x24, 0xc9, 0x92, 0xe5, 0x6a, 0x49, 0x66, + 0x29, 0x8d, 0xc6, 0xb2, 0xb4, 0x6d, 0x84, 0x9f, 0xc0, 0x92, 0xbd, 0x3f, 0xb1, 0x42, 0x5b, 0xbc, + 0x5a, 0x7a, 0x09, 0x21, 0xe4, 0x4c, 0x0f, 0x3a, 0xb1, 0xde, 0xee, 0x9f, 0x1d, 0xb6, 0x5b, 0x97, + 0xcf, 0x32, 0x9b, 0xa4, 0xc3, 0x4b, 0x0b, 0x9a, 0x7f, 0xdb, 0x21, 0x5a, 0x85, 0xbb, 0x39, 0x3c, + 0xb9, 0xe1, 0x28, 0x5c, 0xb5, 0x8f, 0xc0, 0x51, 0x86, 0xac, 0xda, 0x48, 0x8f, 0x55, 0xed, 0x00, + 0xc3, 0x9b, 0xfb, 0x13, 0x9a, 0x43, 0x7e, 0x76, 0xc9, 0xcd, 0xad, 0x43, 0xb1, 0xcd, 0x6d, 0xb2, + 0x38, 0x99, 0xb0, 0x5a, 0x65, 0x38, 0x99, 0x4c, 0x34, 0x1a, 0x4b, 0xa6, 0xb6, 0x11, 0x78, 0xbd, + 0x63, 0x30, 0xb0, 0x35, 0x99, 0xea, 0x50, 0x6c, 0xbd, 0x4d, 0x16, 0x5f, 0x8a, 0xc7, 0x59, 0x62, + 0x37, 0x45, 0x83, 0xbc, 0x14, 0x2b, 0x39, 0x76, 0x29, 0x62, 0xca, 0x07, 0xff, 0x7b, 0x87, 0xdd, + 0x19, 0x49, 0xb5, 0x4a, 0x8b, 0x96, 0x4c, 0x09, 0x0d, 0x99, 0xfd, 0x5e, 0xae, 0x74, 0x26, 0x52, + 0x4e, 0x6d, 0x4e, 0x0b, 0xeb, 0x7c, 0x1f, 0xdd, 0x64, 0x08, 0x4e, 0xd0, 0x7c, 0x72, 0xe5, 0xf2, + 0x79, 0xdb, 0xe4, 0x4b, 0x3d, 0x96, 0xa0, 0x01, 0x86, 0x6f, 0xfe, 0xe7, 0xb0, 0x94, 0x16, 0xca, + 0x3d, 0xa4, 0x46, 0x62, 0x20, 0x76, 0xf3, 0x87, 0x1c, 0xce, 0x89, 0xb3, 0x6c, 0x26, 0x03, 0x9b, + 0x1e, 0xd9, 0x38, 0x84, 0x50, 0x2c, 0x27, 0x9a, 0xac, 0xb7, 0x33, 0x8c, 0x97, 0xcb, 0x3c, 0x17, + 0x66, 0xa4, 0x65, 0x0e, 0xcd, 0x78, 0xe4, 0x5e, 0x43, 0x98, 0xb3, 0xfc, 0xa6, 0x23, 0x8d, 0xdf, + 0xf6, 0x26, 0xe0, 0xf2, 0xf0, 0x2e, 0xfd, 0x7e, 0x12, 0xae, 0x6a, 0x2f, 0x0e, 0xf9, 0xc8, 0x6b, + 0xf6, 0x61, 0xe5, 0x3c, 0x06, 0x93, 0x57, 0x35, 0x98, 0xf1, 0xf8, 0x0c, 0x3d, 0xe7, 0xdc, 0xfa, + 0x5d, 0x71, 0xef, 0xfb, 0xef, 0x0e, 0xfb, 0xb8, 0x76, 0x77, 0x0c, 0xb3, 0x59, 0xfe, 0x3e, 0xba, + 0xe9, 0x2a, 0x9e, 0x6c, 0xbf, 0x6b, 0x30, 0xef, 0x26, 0xf2, 0xdd, 0x4d, 0x87, 0xe1, 0x4e, 0xa3, + 0xdc, 0x78, 0x77, 0x18, 0xee, 0x91, 0x0d, 0x3a, 0x46, 0x62, 0x9d, 0x46, 0x9d, 0xf4, 0x46, 0x3f, + 0xb2, 0x5b, 0x4f, 0xc5, 0xf4, 0x6a, 0xa5, 0x38, 0xf5, 0x1d, 0x61, 0x23, 0xb9, 0xc0, 0x9f, 0x47, + 0x08, 0x17, 0xf0, 0xc1, 0x0e, 0xd7, 0x79, 0x5f, 0x66, 0xac, 0xd4, 0x70, 0xa8, 0xe5, 0xb2, 0x8c, + 0xde, 0x52, 0xeb, 0x42, 0x2a, 0xde, 0x97, 0x35, 0x60, 0xe4, 0x99, 0xbf, 0xbd, 0xa7, 0x62, 0x0d, + 0xe5, 0xf3, 0x22, 0xdf, 0xde, 0x2b, 0x3d, 0xfa, 0xf6, 0x8e, 0xb1, 0x20, 0xe5, 0xad, 0x54, 0x85, + 0x48, 0xa7, 0xbc, 0x53, 0xa3, 0x29, 0x5f, 0x41, 0x61, 0x47, 0x52, 0xfe, 0xed, 0x9a, 0xa1, 0x5e, + 0x6c, 0x6c, 0xad, 0x0d, 0x3a, 0xe8, 0xc4, 0xe2, 0x4b, 0xa4, 0xe8, 0x15, 0x36, 0x2b, 0xd9, 0x6b, + 0x6b, 0x25, 0x82, 0xa5, 0xec, 0x6f, 0xa1, 0x7c, 0xf0, 0x6b, 0x76, 0xbb, 0xfa, 0x1f, 0xf5, 0x39, + 0xfd, 0x68, 0x80, 0x66, 0x87, 0x33, 0xe8, 0xcc, 0xd7, 0xbf, 0x40, 0xe5, 0xba, 0x69, 0xfd, 0x02, + 0x55, 0xa8, 0xdb, 0xbe, 0x40, 0x95, 0x10, 0x8e, 0x9c, 0xdf, 0x26, 0xed, 0x8f, 0xde, 0xab, 0xb1, + 0xc8, 0x08, 0x0a, 0x1e, 0x7d, 0xfe, 0x17, 0x2e, 0xdd, 0xbd, 0xb6, 0x94, 0x24, 0x0a, 0xf7, 0x41, + 0x27, 0x16, 0xbf, 0x2f, 0x39, 0xb5, 0x2a, 0xad, 0xb1, 0x18, 0x8d, 0xc2, 0x7a, 0xbf, 0x1b, 0xec, + 0x1c, 0x9f, 0x3e, 0xfe, 0xe5, 0xe1, 0x3a, 0xb1, 0x60, 0x4c, 0x3f, 0x91, 0x83, 0xcd, 0xaf, 0xc1, + 0x5c, 0x0e, 0xd6, 0x76, 0x50, 0x7c, 0x41, 0x1d, 0x50, 0xdf, 0x5b, 0x2f, 0x6e, 0x15, 0xda, 0xe3, + 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x54, 0x21, 0xd6, 0xe8, 0xaa, 0x15, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TabletManagerClient is the client API for TabletManager service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TabletManagerClient interface { + // Ping returns the input payload + Ping(ctx context.Context, in *tabletmanagerdata.PingRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PingResponse, error) + // Sleep sleeps for the provided duration + Sleep(ctx context.Context, in *tabletmanagerdata.SleepRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SleepResponse, error) + // ExecuteHook executes the hook remotely + ExecuteHook(ctx context.Context, in *tabletmanagerdata.ExecuteHookRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteHookResponse, error) + // GetSchema asks the tablet for its schema + GetSchema(ctx context.Context, in *tabletmanagerdata.GetSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.GetSchemaResponse, error) + // GetPermissions asks the tablet for its permissions + GetPermissions(ctx context.Context, in *tabletmanagerdata.GetPermissionsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.GetPermissionsResponse, error) + SetReadOnly(ctx context.Context, in *tabletmanagerdata.SetReadOnlyRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SetReadOnlyResponse, error) + SetReadWrite(ctx context.Context, in *tabletmanagerdata.SetReadWriteRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SetReadWriteResponse, error) + // ChangeType asks the remote tablet to change its type + ChangeType(ctx context.Context, in *tabletmanagerdata.ChangeTypeRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ChangeTypeResponse, error) + RefreshState(ctx context.Context, in *tabletmanagerdata.RefreshStateRequest, opts ...grpc.CallOption) (*tabletmanagerdata.RefreshStateResponse, error) + RunHealthCheck(ctx context.Context, in *tabletmanagerdata.RunHealthCheckRequest, opts ...grpc.CallOption) (*tabletmanagerdata.RunHealthCheckResponse, error) + IgnoreHealthError(ctx context.Context, in *tabletmanagerdata.IgnoreHealthErrorRequest, opts ...grpc.CallOption) (*tabletmanagerdata.IgnoreHealthErrorResponse, error) + ReloadSchema(ctx context.Context, in *tabletmanagerdata.ReloadSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReloadSchemaResponse, error) + PreflightSchema(ctx context.Context, in *tabletmanagerdata.PreflightSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PreflightSchemaResponse, error) + ApplySchema(ctx context.Context, in *tabletmanagerdata.ApplySchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ApplySchemaResponse, error) + LockTables(ctx context.Context, in *tabletmanagerdata.LockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.LockTablesResponse, error) + UnlockTables(ctx context.Context, in *tabletmanagerdata.UnlockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UnlockTablesResponse, error) + ExecuteFetchAsDba(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsDbaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error) + ExecuteFetchAsAllPrivs(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAllPrivsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse, error) + ExecuteFetchAsApp(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAppRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) + // ReplicationStatus returns the current replication status. + ReplicationStatus(ctx context.Context, in *tabletmanagerdata.ReplicationStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicationStatusResponse, error) + // MasterPosition returns the current master position + MasterPosition(ctx context.Context, in *tabletmanagerdata.MasterPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.MasterPositionResponse, error) + // WaitForPosition waits for the position to be reached + WaitForPosition(ctx context.Context, in *tabletmanagerdata.WaitForPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.WaitForPositionResponse, error) + // StopReplication makes mysql stop its replication + StopReplication(ctx context.Context, in *tabletmanagerdata.StopReplicationRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopReplicationResponse, error) + // StopReplicationMinimum stops the mysql replication after it reaches + // the provided minimum point + StopReplicationMinimum(ctx context.Context, in *tabletmanagerdata.StopReplicationMinimumRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopReplicationMinimumResponse, error) + // StartReplication starts the mysql replication + StartReplication(ctx context.Context, in *tabletmanagerdata.StartReplicationRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartReplicationResponse, error) + // StartReplicationUnitAfter starts the mysql replication until and including + // the provided position + StartReplicationUntilAfter(ctx context.Context, in *tabletmanagerdata.StartReplicationUntilAfterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartReplicationUntilAfterResponse, error) + // GetReplicas asks for the list of mysql replicas + GetReplicas(ctx context.Context, in *tabletmanagerdata.GetReplicasRequest, opts ...grpc.CallOption) (*tabletmanagerdata.GetReplicasResponse, error) + // VReplication API + VReplicationExec(ctx context.Context, in *tabletmanagerdata.VReplicationExecRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationExecResponse, error) + VReplicationWaitForPos(ctx context.Context, in *tabletmanagerdata.VReplicationWaitForPosRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationWaitForPosResponse, error) + // ResetReplication makes the target not replicating + ResetReplication(ctx context.Context, in *tabletmanagerdata.ResetReplicationRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ResetReplicationResponse, error) + // InitMaster initializes the tablet as a master + InitMaster(ctx context.Context, in *tabletmanagerdata.InitMasterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitMasterResponse, error) + // PopulateReparentJournal tells the tablet to add an entry to its + // reparent journal + PopulateReparentJournal(ctx context.Context, in *tabletmanagerdata.PopulateReparentJournalRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PopulateReparentJournalResponse, error) + // InitReplica tells the tablet to reparent to the master unconditionally + InitReplica(ctx context.Context, in *tabletmanagerdata.InitReplicaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitReplicaResponse, error) + // DemoteMaster tells the soon-to-be-former master it's gonna change + DemoteMaster(ctx context.Context, in *tabletmanagerdata.DemoteMasterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.DemoteMasterResponse, error) + // UndoDemoteMaster reverts all changes made by DemoteMaster + UndoDemoteMaster(ctx context.Context, in *tabletmanagerdata.UndoDemoteMasterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UndoDemoteMasterResponse, error) + // ReplicaWasPromoted tells the remote tablet it is now the master + ReplicaWasPromoted(ctx context.Context, in *tabletmanagerdata.ReplicaWasPromotedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicaWasPromotedResponse, error) + // SetMaster tells the replica to reparent + SetMaster(ctx context.Context, in *tabletmanagerdata.SetMasterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SetMasterResponse, error) + // ReplicaWasRestarted tells the remote tablet its master has changed + ReplicaWasRestarted(ctx context.Context, in *tabletmanagerdata.ReplicaWasRestartedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicaWasRestartedResponse, error) + // StopReplicationAndGetStatus stops MySQL replication, and returns the + // replication status + StopReplicationAndGetStatus(ctx context.Context, in *tabletmanagerdata.StopReplicationAndGetStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopReplicationAndGetStatusResponse, error) + // PromoteReplica makes the replica the new master + PromoteReplica(ctx context.Context, in *tabletmanagerdata.PromoteReplicaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PromoteReplicaResponse, error) + Backup(ctx context.Context, in *tabletmanagerdata.BackupRequest, opts ...grpc.CallOption) (TabletManager_BackupClient, error) + // RestoreFromBackup deletes all local data and restores it from the latest backup. + RestoreFromBackup(ctx context.Context, in *tabletmanagerdata.RestoreFromBackupRequest, opts ...grpc.CallOption) (TabletManager_RestoreFromBackupClient, error) + // Deprecated - remove after 7.0 + SlaveStatus(ctx context.Context, in *tabletmanagerdata.SlaveStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SlaveStatusResponse, error) + // Deprecated + StopSlave(ctx context.Context, in *tabletmanagerdata.StopSlaveRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopSlaveResponse, error) + // Deprecated + StopSlaveMinimum(ctx context.Context, in *tabletmanagerdata.StopSlaveMinimumRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopSlaveMinimumResponse, error) + // Deprecated + StartSlave(ctx context.Context, in *tabletmanagerdata.StartSlaveRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartSlaveResponse, error) + // Deprecated + StartSlaveUntilAfter(ctx context.Context, in *tabletmanagerdata.StartSlaveUntilAfterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartSlaveUntilAfterResponse, error) + // Deprecated + GetSlaves(ctx context.Context, in *tabletmanagerdata.GetSlavesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.GetSlavesResponse, error) + // Deprecated + InitSlave(ctx context.Context, in *tabletmanagerdata.InitSlaveRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitSlaveResponse, error) + // Deprecated + SlaveWasPromoted(ctx context.Context, in *tabletmanagerdata.SlaveWasPromotedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SlaveWasPromotedResponse, error) + // Deprecated + SlaveWasRestarted(ctx context.Context, in *tabletmanagerdata.SlaveWasRestartedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SlaveWasRestartedResponse, error) +} + +type tabletManagerClient struct { + cc *grpc.ClientConn +} + +func NewTabletManagerClient(cc *grpc.ClientConn) TabletManagerClient { + return &tabletManagerClient{cc} +} + +func (c *tabletManagerClient) Ping(ctx context.Context, in *tabletmanagerdata.PingRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PingResponse, error) { + out := new(tabletmanagerdata.PingResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/Ping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) Sleep(ctx context.Context, in *tabletmanagerdata.SleepRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SleepResponse, error) { + out := new(tabletmanagerdata.SleepResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/Sleep", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ExecuteHook(ctx context.Context, in *tabletmanagerdata.ExecuteHookRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteHookResponse, error) { + out := new(tabletmanagerdata.ExecuteHookResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ExecuteHook", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) GetSchema(ctx context.Context, in *tabletmanagerdata.GetSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.GetSchemaResponse, error) { + out := new(tabletmanagerdata.GetSchemaResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) GetPermissions(ctx context.Context, in *tabletmanagerdata.GetPermissionsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.GetPermissionsResponse, error) { + out := new(tabletmanagerdata.GetPermissionsResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/GetPermissions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) SetReadOnly(ctx context.Context, in *tabletmanagerdata.SetReadOnlyRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SetReadOnlyResponse, error) { + out := new(tabletmanagerdata.SetReadOnlyResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/SetReadOnly", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) SetReadWrite(ctx context.Context, in *tabletmanagerdata.SetReadWriteRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SetReadWriteResponse, error) { + out := new(tabletmanagerdata.SetReadWriteResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/SetReadWrite", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ChangeType(ctx context.Context, in *tabletmanagerdata.ChangeTypeRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ChangeTypeResponse, error) { + out := new(tabletmanagerdata.ChangeTypeResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ChangeType", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) RefreshState(ctx context.Context, in *tabletmanagerdata.RefreshStateRequest, opts ...grpc.CallOption) (*tabletmanagerdata.RefreshStateResponse, error) { + out := new(tabletmanagerdata.RefreshStateResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/RefreshState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) RunHealthCheck(ctx context.Context, in *tabletmanagerdata.RunHealthCheckRequest, opts ...grpc.CallOption) (*tabletmanagerdata.RunHealthCheckResponse, error) { + out := new(tabletmanagerdata.RunHealthCheckResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/RunHealthCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) IgnoreHealthError(ctx context.Context, in *tabletmanagerdata.IgnoreHealthErrorRequest, opts ...grpc.CallOption) (*tabletmanagerdata.IgnoreHealthErrorResponse, error) { + out := new(tabletmanagerdata.IgnoreHealthErrorResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/IgnoreHealthError", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ReloadSchema(ctx context.Context, in *tabletmanagerdata.ReloadSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReloadSchemaResponse, error) { + out := new(tabletmanagerdata.ReloadSchemaResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ReloadSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) PreflightSchema(ctx context.Context, in *tabletmanagerdata.PreflightSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PreflightSchemaResponse, error) { + out := new(tabletmanagerdata.PreflightSchemaResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/PreflightSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ApplySchema(ctx context.Context, in *tabletmanagerdata.ApplySchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ApplySchemaResponse, error) { + out := new(tabletmanagerdata.ApplySchemaResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ApplySchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) LockTables(ctx context.Context, in *tabletmanagerdata.LockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.LockTablesResponse, error) { + out := new(tabletmanagerdata.LockTablesResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/LockTables", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) UnlockTables(ctx context.Context, in *tabletmanagerdata.UnlockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UnlockTablesResponse, error) { + out := new(tabletmanagerdata.UnlockTablesResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/UnlockTables", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ExecuteFetchAsDba(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsDbaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error) { + out := new(tabletmanagerdata.ExecuteFetchAsDbaResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ExecuteFetchAsDba", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ExecuteFetchAsAllPrivs(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAllPrivsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse, error) { + out := new(tabletmanagerdata.ExecuteFetchAsAllPrivsResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ExecuteFetchAsAllPrivs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ExecuteFetchAsApp(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAppRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) { + out := new(tabletmanagerdata.ExecuteFetchAsAppResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ExecuteFetchAsApp", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ReplicationStatus(ctx context.Context, in *tabletmanagerdata.ReplicationStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicationStatusResponse, error) { + out := new(tabletmanagerdata.ReplicationStatusResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ReplicationStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) MasterPosition(ctx context.Context, in *tabletmanagerdata.MasterPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.MasterPositionResponse, error) { + out := new(tabletmanagerdata.MasterPositionResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/MasterPosition", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) WaitForPosition(ctx context.Context, in *tabletmanagerdata.WaitForPositionRequest, opts ...grpc.CallOption) (*tabletmanagerdata.WaitForPositionResponse, error) { + out := new(tabletmanagerdata.WaitForPositionResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/WaitForPosition", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) StopReplication(ctx context.Context, in *tabletmanagerdata.StopReplicationRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopReplicationResponse, error) { + out := new(tabletmanagerdata.StopReplicationResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/StopReplication", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) StopReplicationMinimum(ctx context.Context, in *tabletmanagerdata.StopReplicationMinimumRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopReplicationMinimumResponse, error) { + out := new(tabletmanagerdata.StopReplicationMinimumResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/StopReplicationMinimum", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) StartReplication(ctx context.Context, in *tabletmanagerdata.StartReplicationRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartReplicationResponse, error) { + out := new(tabletmanagerdata.StartReplicationResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/StartReplication", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) StartReplicationUntilAfter(ctx context.Context, in *tabletmanagerdata.StartReplicationUntilAfterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartReplicationUntilAfterResponse, error) { + out := new(tabletmanagerdata.StartReplicationUntilAfterResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/StartReplicationUntilAfter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) GetReplicas(ctx context.Context, in *tabletmanagerdata.GetReplicasRequest, opts ...grpc.CallOption) (*tabletmanagerdata.GetReplicasResponse, error) { + out := new(tabletmanagerdata.GetReplicasResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/GetReplicas", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) VReplicationExec(ctx context.Context, in *tabletmanagerdata.VReplicationExecRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationExecResponse, error) { + out := new(tabletmanagerdata.VReplicationExecResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/VReplicationExec", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) VReplicationWaitForPos(ctx context.Context, in *tabletmanagerdata.VReplicationWaitForPosRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationWaitForPosResponse, error) { + out := new(tabletmanagerdata.VReplicationWaitForPosResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/VReplicationWaitForPos", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ResetReplication(ctx context.Context, in *tabletmanagerdata.ResetReplicationRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ResetReplicationResponse, error) { + out := new(tabletmanagerdata.ResetReplicationResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ResetReplication", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) InitMaster(ctx context.Context, in *tabletmanagerdata.InitMasterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitMasterResponse, error) { + out := new(tabletmanagerdata.InitMasterResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/InitMaster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) PopulateReparentJournal(ctx context.Context, in *tabletmanagerdata.PopulateReparentJournalRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PopulateReparentJournalResponse, error) { + out := new(tabletmanagerdata.PopulateReparentJournalResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/PopulateReparentJournal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) InitReplica(ctx context.Context, in *tabletmanagerdata.InitReplicaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitReplicaResponse, error) { + out := new(tabletmanagerdata.InitReplicaResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/InitReplica", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) DemoteMaster(ctx context.Context, in *tabletmanagerdata.DemoteMasterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.DemoteMasterResponse, error) { + out := new(tabletmanagerdata.DemoteMasterResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/DemoteMaster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) UndoDemoteMaster(ctx context.Context, in *tabletmanagerdata.UndoDemoteMasterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UndoDemoteMasterResponse, error) { + out := new(tabletmanagerdata.UndoDemoteMasterResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/UndoDemoteMaster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ReplicaWasPromoted(ctx context.Context, in *tabletmanagerdata.ReplicaWasPromotedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicaWasPromotedResponse, error) { + out := new(tabletmanagerdata.ReplicaWasPromotedResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ReplicaWasPromoted", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) SetMaster(ctx context.Context, in *tabletmanagerdata.SetMasterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SetMasterResponse, error) { + out := new(tabletmanagerdata.SetMasterResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/SetMaster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ReplicaWasRestarted(ctx context.Context, in *tabletmanagerdata.ReplicaWasRestartedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReplicaWasRestartedResponse, error) { + out := new(tabletmanagerdata.ReplicaWasRestartedResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ReplicaWasRestarted", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) StopReplicationAndGetStatus(ctx context.Context, in *tabletmanagerdata.StopReplicationAndGetStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopReplicationAndGetStatusResponse, error) { + out := new(tabletmanagerdata.StopReplicationAndGetStatusResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/StopReplicationAndGetStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) PromoteReplica(ctx context.Context, in *tabletmanagerdata.PromoteReplicaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PromoteReplicaResponse, error) { + out := new(tabletmanagerdata.PromoteReplicaResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/PromoteReplica", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) Backup(ctx context.Context, in *tabletmanagerdata.BackupRequest, opts ...grpc.CallOption) (TabletManager_BackupClient, error) { + stream, err := c.cc.NewStream(ctx, &_TabletManager_serviceDesc.Streams[0], "/tabletmanagerservice.TabletManager/Backup", opts...) + if err != nil { + return nil, err + } + x := &tabletManagerBackupClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TabletManager_BackupClient interface { + Recv() (*tabletmanagerdata.BackupResponse, error) + grpc.ClientStream +} + +type tabletManagerBackupClient struct { + grpc.ClientStream +} + +func (x *tabletManagerBackupClient) Recv() (*tabletmanagerdata.BackupResponse, error) { + m := new(tabletmanagerdata.BackupResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *tabletManagerClient) RestoreFromBackup(ctx context.Context, in *tabletmanagerdata.RestoreFromBackupRequest, opts ...grpc.CallOption) (TabletManager_RestoreFromBackupClient, error) { + stream, err := c.cc.NewStream(ctx, &_TabletManager_serviceDesc.Streams[1], "/tabletmanagerservice.TabletManager/RestoreFromBackup", opts...) + if err != nil { + return nil, err + } + x := &tabletManagerRestoreFromBackupClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TabletManager_RestoreFromBackupClient interface { + Recv() (*tabletmanagerdata.RestoreFromBackupResponse, error) + grpc.ClientStream +} + +type tabletManagerRestoreFromBackupClient struct { + grpc.ClientStream +} + +func (x *tabletManagerRestoreFromBackupClient) Recv() (*tabletmanagerdata.RestoreFromBackupResponse, error) { + m := new(tabletmanagerdata.RestoreFromBackupResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *tabletManagerClient) SlaveStatus(ctx context.Context, in *tabletmanagerdata.SlaveStatusRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SlaveStatusResponse, error) { + out := new(tabletmanagerdata.SlaveStatusResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/SlaveStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) StopSlave(ctx context.Context, in *tabletmanagerdata.StopSlaveRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopSlaveResponse, error) { + out := new(tabletmanagerdata.StopSlaveResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/StopSlave", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) StopSlaveMinimum(ctx context.Context, in *tabletmanagerdata.StopSlaveMinimumRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StopSlaveMinimumResponse, error) { + out := new(tabletmanagerdata.StopSlaveMinimumResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/StopSlaveMinimum", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) StartSlave(ctx context.Context, in *tabletmanagerdata.StartSlaveRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartSlaveResponse, error) { + out := new(tabletmanagerdata.StartSlaveResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/StartSlave", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) StartSlaveUntilAfter(ctx context.Context, in *tabletmanagerdata.StartSlaveUntilAfterRequest, opts ...grpc.CallOption) (*tabletmanagerdata.StartSlaveUntilAfterResponse, error) { + out := new(tabletmanagerdata.StartSlaveUntilAfterResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/StartSlaveUntilAfter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) GetSlaves(ctx context.Context, in *tabletmanagerdata.GetSlavesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.GetSlavesResponse, error) { + out := new(tabletmanagerdata.GetSlavesResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/GetSlaves", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) InitSlave(ctx context.Context, in *tabletmanagerdata.InitSlaveRequest, opts ...grpc.CallOption) (*tabletmanagerdata.InitSlaveResponse, error) { + out := new(tabletmanagerdata.InitSlaveResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/InitSlave", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) SlaveWasPromoted(ctx context.Context, in *tabletmanagerdata.SlaveWasPromotedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SlaveWasPromotedResponse, error) { + out := new(tabletmanagerdata.SlaveWasPromotedResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/SlaveWasPromoted", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) SlaveWasRestarted(ctx context.Context, in *tabletmanagerdata.SlaveWasRestartedRequest, opts ...grpc.CallOption) (*tabletmanagerdata.SlaveWasRestartedResponse, error) { + out := new(tabletmanagerdata.SlaveWasRestartedResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/SlaveWasRestarted", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TabletManagerServer is the server API for TabletManager service. +type TabletManagerServer interface { + // Ping returns the input payload + Ping(context.Context, *tabletmanagerdata.PingRequest) (*tabletmanagerdata.PingResponse, error) + // Sleep sleeps for the provided duration + Sleep(context.Context, *tabletmanagerdata.SleepRequest) (*tabletmanagerdata.SleepResponse, error) + // ExecuteHook executes the hook remotely + ExecuteHook(context.Context, *tabletmanagerdata.ExecuteHookRequest) (*tabletmanagerdata.ExecuteHookResponse, error) + // GetSchema asks the tablet for its schema + GetSchema(context.Context, *tabletmanagerdata.GetSchemaRequest) (*tabletmanagerdata.GetSchemaResponse, error) + // GetPermissions asks the tablet for its permissions + GetPermissions(context.Context, *tabletmanagerdata.GetPermissionsRequest) (*tabletmanagerdata.GetPermissionsResponse, error) + SetReadOnly(context.Context, *tabletmanagerdata.SetReadOnlyRequest) (*tabletmanagerdata.SetReadOnlyResponse, error) + SetReadWrite(context.Context, *tabletmanagerdata.SetReadWriteRequest) (*tabletmanagerdata.SetReadWriteResponse, error) + // ChangeType asks the remote tablet to change its type + ChangeType(context.Context, *tabletmanagerdata.ChangeTypeRequest) (*tabletmanagerdata.ChangeTypeResponse, error) + RefreshState(context.Context, *tabletmanagerdata.RefreshStateRequest) (*tabletmanagerdata.RefreshStateResponse, error) + RunHealthCheck(context.Context, *tabletmanagerdata.RunHealthCheckRequest) (*tabletmanagerdata.RunHealthCheckResponse, error) + IgnoreHealthError(context.Context, *tabletmanagerdata.IgnoreHealthErrorRequest) (*tabletmanagerdata.IgnoreHealthErrorResponse, error) + ReloadSchema(context.Context, *tabletmanagerdata.ReloadSchemaRequest) (*tabletmanagerdata.ReloadSchemaResponse, error) + PreflightSchema(context.Context, *tabletmanagerdata.PreflightSchemaRequest) (*tabletmanagerdata.PreflightSchemaResponse, error) + ApplySchema(context.Context, *tabletmanagerdata.ApplySchemaRequest) (*tabletmanagerdata.ApplySchemaResponse, error) + LockTables(context.Context, *tabletmanagerdata.LockTablesRequest) (*tabletmanagerdata.LockTablesResponse, error) + UnlockTables(context.Context, *tabletmanagerdata.UnlockTablesRequest) (*tabletmanagerdata.UnlockTablesResponse, error) + ExecuteFetchAsDba(context.Context, *tabletmanagerdata.ExecuteFetchAsDbaRequest) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error) + ExecuteFetchAsAllPrivs(context.Context, *tabletmanagerdata.ExecuteFetchAsAllPrivsRequest) (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse, error) + ExecuteFetchAsApp(context.Context, *tabletmanagerdata.ExecuteFetchAsAppRequest) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) + // ReplicationStatus returns the current replication status. + ReplicationStatus(context.Context, *tabletmanagerdata.ReplicationStatusRequest) (*tabletmanagerdata.ReplicationStatusResponse, error) + // MasterPosition returns the current master position + MasterPosition(context.Context, *tabletmanagerdata.MasterPositionRequest) (*tabletmanagerdata.MasterPositionResponse, error) + // WaitForPosition waits for the position to be reached + WaitForPosition(context.Context, *tabletmanagerdata.WaitForPositionRequest) (*tabletmanagerdata.WaitForPositionResponse, error) + // StopReplication makes mysql stop its replication + StopReplication(context.Context, *tabletmanagerdata.StopReplicationRequest) (*tabletmanagerdata.StopReplicationResponse, error) + // StopReplicationMinimum stops the mysql replication after it reaches + // the provided minimum point + StopReplicationMinimum(context.Context, *tabletmanagerdata.StopReplicationMinimumRequest) (*tabletmanagerdata.StopReplicationMinimumResponse, error) + // StartReplication starts the mysql replication + StartReplication(context.Context, *tabletmanagerdata.StartReplicationRequest) (*tabletmanagerdata.StartReplicationResponse, error) + // StartReplicationUnitAfter starts the mysql replication until and including + // the provided position + StartReplicationUntilAfter(context.Context, *tabletmanagerdata.StartReplicationUntilAfterRequest) (*tabletmanagerdata.StartReplicationUntilAfterResponse, error) + // GetReplicas asks for the list of mysql replicas + GetReplicas(context.Context, *tabletmanagerdata.GetReplicasRequest) (*tabletmanagerdata.GetReplicasResponse, error) + // VReplication API + VReplicationExec(context.Context, *tabletmanagerdata.VReplicationExecRequest) (*tabletmanagerdata.VReplicationExecResponse, error) + VReplicationWaitForPos(context.Context, *tabletmanagerdata.VReplicationWaitForPosRequest) (*tabletmanagerdata.VReplicationWaitForPosResponse, error) + // ResetReplication makes the target not replicating + ResetReplication(context.Context, *tabletmanagerdata.ResetReplicationRequest) (*tabletmanagerdata.ResetReplicationResponse, error) + // InitMaster initializes the tablet as a master + InitMaster(context.Context, *tabletmanagerdata.InitMasterRequest) (*tabletmanagerdata.InitMasterResponse, error) + // PopulateReparentJournal tells the tablet to add an entry to its + // reparent journal + PopulateReparentJournal(context.Context, *tabletmanagerdata.PopulateReparentJournalRequest) (*tabletmanagerdata.PopulateReparentJournalResponse, error) + // InitReplica tells the tablet to reparent to the master unconditionally + InitReplica(context.Context, *tabletmanagerdata.InitReplicaRequest) (*tabletmanagerdata.InitReplicaResponse, error) + // DemoteMaster tells the soon-to-be-former master it's gonna change + DemoteMaster(context.Context, *tabletmanagerdata.DemoteMasterRequest) (*tabletmanagerdata.DemoteMasterResponse, error) + // UndoDemoteMaster reverts all changes made by DemoteMaster + UndoDemoteMaster(context.Context, *tabletmanagerdata.UndoDemoteMasterRequest) (*tabletmanagerdata.UndoDemoteMasterResponse, error) + // ReplicaWasPromoted tells the remote tablet it is now the master + ReplicaWasPromoted(context.Context, *tabletmanagerdata.ReplicaWasPromotedRequest) (*tabletmanagerdata.ReplicaWasPromotedResponse, error) + // SetMaster tells the replica to reparent + SetMaster(context.Context, *tabletmanagerdata.SetMasterRequest) (*tabletmanagerdata.SetMasterResponse, error) + // ReplicaWasRestarted tells the remote tablet its master has changed + ReplicaWasRestarted(context.Context, *tabletmanagerdata.ReplicaWasRestartedRequest) (*tabletmanagerdata.ReplicaWasRestartedResponse, error) + // StopReplicationAndGetStatus stops MySQL replication, and returns the + // replication status + StopReplicationAndGetStatus(context.Context, *tabletmanagerdata.StopReplicationAndGetStatusRequest) (*tabletmanagerdata.StopReplicationAndGetStatusResponse, error) + // PromoteReplica makes the replica the new master + PromoteReplica(context.Context, *tabletmanagerdata.PromoteReplicaRequest) (*tabletmanagerdata.PromoteReplicaResponse, error) + Backup(*tabletmanagerdata.BackupRequest, TabletManager_BackupServer) error + // RestoreFromBackup deletes all local data and restores it from the latest backup. + RestoreFromBackup(*tabletmanagerdata.RestoreFromBackupRequest, TabletManager_RestoreFromBackupServer) error + // Deprecated - remove after 7.0 + SlaveStatus(context.Context, *tabletmanagerdata.SlaveStatusRequest) (*tabletmanagerdata.SlaveStatusResponse, error) + // Deprecated + StopSlave(context.Context, *tabletmanagerdata.StopSlaveRequest) (*tabletmanagerdata.StopSlaveResponse, error) + // Deprecated + StopSlaveMinimum(context.Context, *tabletmanagerdata.StopSlaveMinimumRequest) (*tabletmanagerdata.StopSlaveMinimumResponse, error) + // Deprecated + StartSlave(context.Context, *tabletmanagerdata.StartSlaveRequest) (*tabletmanagerdata.StartSlaveResponse, error) + // Deprecated + StartSlaveUntilAfter(context.Context, *tabletmanagerdata.StartSlaveUntilAfterRequest) (*tabletmanagerdata.StartSlaveUntilAfterResponse, error) + // Deprecated + GetSlaves(context.Context, *tabletmanagerdata.GetSlavesRequest) (*tabletmanagerdata.GetSlavesResponse, error) + // Deprecated + InitSlave(context.Context, *tabletmanagerdata.InitSlaveRequest) (*tabletmanagerdata.InitSlaveResponse, error) + // Deprecated + SlaveWasPromoted(context.Context, *tabletmanagerdata.SlaveWasPromotedRequest) (*tabletmanagerdata.SlaveWasPromotedResponse, error) + // Deprecated + SlaveWasRestarted(context.Context, *tabletmanagerdata.SlaveWasRestartedRequest) (*tabletmanagerdata.SlaveWasRestartedResponse, error) +} + +// UnimplementedTabletManagerServer can be embedded to have forward compatible implementations. +type UnimplementedTabletManagerServer struct { +} + +func (*UnimplementedTabletManagerServer) Ping(ctx context.Context, req *tabletmanagerdata.PingRequest) (*tabletmanagerdata.PingResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") +} +func (*UnimplementedTabletManagerServer) Sleep(ctx context.Context, req *tabletmanagerdata.SleepRequest) (*tabletmanagerdata.SleepResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Sleep not implemented") +} +func (*UnimplementedTabletManagerServer) ExecuteHook(ctx context.Context, req *tabletmanagerdata.ExecuteHookRequest) (*tabletmanagerdata.ExecuteHookResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExecuteHook not implemented") +} +func (*UnimplementedTabletManagerServer) GetSchema(ctx context.Context, req *tabletmanagerdata.GetSchemaRequest) (*tabletmanagerdata.GetSchemaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (*UnimplementedTabletManagerServer) GetPermissions(ctx context.Context, req *tabletmanagerdata.GetPermissionsRequest) (*tabletmanagerdata.GetPermissionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPermissions not implemented") +} +func (*UnimplementedTabletManagerServer) SetReadOnly(ctx context.Context, req *tabletmanagerdata.SetReadOnlyRequest) (*tabletmanagerdata.SetReadOnlyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetReadOnly not implemented") +} +func (*UnimplementedTabletManagerServer) SetReadWrite(ctx context.Context, req *tabletmanagerdata.SetReadWriteRequest) (*tabletmanagerdata.SetReadWriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetReadWrite not implemented") +} +func (*UnimplementedTabletManagerServer) ChangeType(ctx context.Context, req *tabletmanagerdata.ChangeTypeRequest) (*tabletmanagerdata.ChangeTypeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChangeType not implemented") +} +func (*UnimplementedTabletManagerServer) RefreshState(ctx context.Context, req *tabletmanagerdata.RefreshStateRequest) (*tabletmanagerdata.RefreshStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RefreshState not implemented") +} +func (*UnimplementedTabletManagerServer) RunHealthCheck(ctx context.Context, req *tabletmanagerdata.RunHealthCheckRequest) (*tabletmanagerdata.RunHealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RunHealthCheck not implemented") +} +func (*UnimplementedTabletManagerServer) IgnoreHealthError(ctx context.Context, req *tabletmanagerdata.IgnoreHealthErrorRequest) (*tabletmanagerdata.IgnoreHealthErrorResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IgnoreHealthError not implemented") +} +func (*UnimplementedTabletManagerServer) ReloadSchema(ctx context.Context, req *tabletmanagerdata.ReloadSchemaRequest) (*tabletmanagerdata.ReloadSchemaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReloadSchema not implemented") +} +func (*UnimplementedTabletManagerServer) PreflightSchema(ctx context.Context, req *tabletmanagerdata.PreflightSchemaRequest) (*tabletmanagerdata.PreflightSchemaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PreflightSchema not implemented") +} +func (*UnimplementedTabletManagerServer) ApplySchema(ctx context.Context, req *tabletmanagerdata.ApplySchemaRequest) (*tabletmanagerdata.ApplySchemaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplySchema not implemented") +} +func (*UnimplementedTabletManagerServer) LockTables(ctx context.Context, req *tabletmanagerdata.LockTablesRequest) (*tabletmanagerdata.LockTablesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LockTables not implemented") +} +func (*UnimplementedTabletManagerServer) UnlockTables(ctx context.Context, req *tabletmanagerdata.UnlockTablesRequest) (*tabletmanagerdata.UnlockTablesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnlockTables not implemented") +} +func (*UnimplementedTabletManagerServer) ExecuteFetchAsDba(ctx context.Context, req *tabletmanagerdata.ExecuteFetchAsDbaRequest) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExecuteFetchAsDba not implemented") +} +func (*UnimplementedTabletManagerServer) ExecuteFetchAsAllPrivs(ctx context.Context, req *tabletmanagerdata.ExecuteFetchAsAllPrivsRequest) (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExecuteFetchAsAllPrivs not implemented") +} +func (*UnimplementedTabletManagerServer) ExecuteFetchAsApp(ctx context.Context, req *tabletmanagerdata.ExecuteFetchAsAppRequest) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExecuteFetchAsApp not implemented") +} +func (*UnimplementedTabletManagerServer) ReplicationStatus(ctx context.Context, req *tabletmanagerdata.ReplicationStatusRequest) (*tabletmanagerdata.ReplicationStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReplicationStatus not implemented") +} +func (*UnimplementedTabletManagerServer) MasterPosition(ctx context.Context, req *tabletmanagerdata.MasterPositionRequest) (*tabletmanagerdata.MasterPositionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MasterPosition not implemented") +} +func (*UnimplementedTabletManagerServer) WaitForPosition(ctx context.Context, req *tabletmanagerdata.WaitForPositionRequest) (*tabletmanagerdata.WaitForPositionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WaitForPosition not implemented") +} +func (*UnimplementedTabletManagerServer) StopReplication(ctx context.Context, req *tabletmanagerdata.StopReplicationRequest) (*tabletmanagerdata.StopReplicationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopReplication not implemented") +} +func (*UnimplementedTabletManagerServer) StopReplicationMinimum(ctx context.Context, req *tabletmanagerdata.StopReplicationMinimumRequest) (*tabletmanagerdata.StopReplicationMinimumResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopReplicationMinimum not implemented") +} +func (*UnimplementedTabletManagerServer) StartReplication(ctx context.Context, req *tabletmanagerdata.StartReplicationRequest) (*tabletmanagerdata.StartReplicationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartReplication not implemented") +} +func (*UnimplementedTabletManagerServer) StartReplicationUntilAfter(ctx context.Context, req *tabletmanagerdata.StartReplicationUntilAfterRequest) (*tabletmanagerdata.StartReplicationUntilAfterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartReplicationUntilAfter not implemented") +} +func (*UnimplementedTabletManagerServer) GetReplicas(ctx context.Context, req *tabletmanagerdata.GetReplicasRequest) (*tabletmanagerdata.GetReplicasResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetReplicas not implemented") +} +func (*UnimplementedTabletManagerServer) VReplicationExec(ctx context.Context, req *tabletmanagerdata.VReplicationExecRequest) (*tabletmanagerdata.VReplicationExecResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VReplicationExec not implemented") +} +func (*UnimplementedTabletManagerServer) VReplicationWaitForPos(ctx context.Context, req *tabletmanagerdata.VReplicationWaitForPosRequest) (*tabletmanagerdata.VReplicationWaitForPosResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VReplicationWaitForPos not implemented") +} +func (*UnimplementedTabletManagerServer) ResetReplication(ctx context.Context, req *tabletmanagerdata.ResetReplicationRequest) (*tabletmanagerdata.ResetReplicationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResetReplication not implemented") +} +func (*UnimplementedTabletManagerServer) InitMaster(ctx context.Context, req *tabletmanagerdata.InitMasterRequest) (*tabletmanagerdata.InitMasterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method InitMaster not implemented") +} +func (*UnimplementedTabletManagerServer) PopulateReparentJournal(ctx context.Context, req *tabletmanagerdata.PopulateReparentJournalRequest) (*tabletmanagerdata.PopulateReparentJournalResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PopulateReparentJournal not implemented") +} +func (*UnimplementedTabletManagerServer) InitReplica(ctx context.Context, req *tabletmanagerdata.InitReplicaRequest) (*tabletmanagerdata.InitReplicaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method InitReplica not implemented") +} +func (*UnimplementedTabletManagerServer) DemoteMaster(ctx context.Context, req *tabletmanagerdata.DemoteMasterRequest) (*tabletmanagerdata.DemoteMasterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DemoteMaster not implemented") +} +func (*UnimplementedTabletManagerServer) UndoDemoteMaster(ctx context.Context, req *tabletmanagerdata.UndoDemoteMasterRequest) (*tabletmanagerdata.UndoDemoteMasterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UndoDemoteMaster not implemented") +} +func (*UnimplementedTabletManagerServer) ReplicaWasPromoted(ctx context.Context, req *tabletmanagerdata.ReplicaWasPromotedRequest) (*tabletmanagerdata.ReplicaWasPromotedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReplicaWasPromoted not implemented") +} +func (*UnimplementedTabletManagerServer) SetMaster(ctx context.Context, req *tabletmanagerdata.SetMasterRequest) (*tabletmanagerdata.SetMasterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetMaster not implemented") +} +func (*UnimplementedTabletManagerServer) ReplicaWasRestarted(ctx context.Context, req *tabletmanagerdata.ReplicaWasRestartedRequest) (*tabletmanagerdata.ReplicaWasRestartedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReplicaWasRestarted not implemented") +} +func (*UnimplementedTabletManagerServer) StopReplicationAndGetStatus(ctx context.Context, req *tabletmanagerdata.StopReplicationAndGetStatusRequest) (*tabletmanagerdata.StopReplicationAndGetStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopReplicationAndGetStatus not implemented") +} +func (*UnimplementedTabletManagerServer) PromoteReplica(ctx context.Context, req *tabletmanagerdata.PromoteReplicaRequest) (*tabletmanagerdata.PromoteReplicaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PromoteReplica not implemented") +} +func (*UnimplementedTabletManagerServer) Backup(req *tabletmanagerdata.BackupRequest, srv TabletManager_BackupServer) error { + return status.Errorf(codes.Unimplemented, "method Backup not implemented") +} +func (*UnimplementedTabletManagerServer) RestoreFromBackup(req *tabletmanagerdata.RestoreFromBackupRequest, srv TabletManager_RestoreFromBackupServer) error { + return status.Errorf(codes.Unimplemented, "method RestoreFromBackup not implemented") +} +func (*UnimplementedTabletManagerServer) SlaveStatus(ctx context.Context, req *tabletmanagerdata.SlaveStatusRequest) (*tabletmanagerdata.SlaveStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SlaveStatus not implemented") +} +func (*UnimplementedTabletManagerServer) StopSlave(ctx context.Context, req *tabletmanagerdata.StopSlaveRequest) (*tabletmanagerdata.StopSlaveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopSlave not implemented") +} +func (*UnimplementedTabletManagerServer) StopSlaveMinimum(ctx context.Context, req *tabletmanagerdata.StopSlaveMinimumRequest) (*tabletmanagerdata.StopSlaveMinimumResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopSlaveMinimum not implemented") +} +func (*UnimplementedTabletManagerServer) StartSlave(ctx context.Context, req *tabletmanagerdata.StartSlaveRequest) (*tabletmanagerdata.StartSlaveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartSlave not implemented") +} +func (*UnimplementedTabletManagerServer) StartSlaveUntilAfter(ctx context.Context, req *tabletmanagerdata.StartSlaveUntilAfterRequest) (*tabletmanagerdata.StartSlaveUntilAfterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartSlaveUntilAfter not implemented") +} +func (*UnimplementedTabletManagerServer) GetSlaves(ctx context.Context, req *tabletmanagerdata.GetSlavesRequest) (*tabletmanagerdata.GetSlavesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSlaves not implemented") +} +func (*UnimplementedTabletManagerServer) InitSlave(ctx context.Context, req *tabletmanagerdata.InitSlaveRequest) (*tabletmanagerdata.InitSlaveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method InitSlave not implemented") +} +func (*UnimplementedTabletManagerServer) SlaveWasPromoted(ctx context.Context, req *tabletmanagerdata.SlaveWasPromotedRequest) (*tabletmanagerdata.SlaveWasPromotedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SlaveWasPromoted not implemented") +} +func (*UnimplementedTabletManagerServer) SlaveWasRestarted(ctx context.Context, req *tabletmanagerdata.SlaveWasRestartedRequest) (*tabletmanagerdata.SlaveWasRestartedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SlaveWasRestarted not implemented") +} + +func RegisterTabletManagerServer(s *grpc.Server, srv TabletManagerServer) { + s.RegisterService(&_TabletManager_serviceDesc, srv) +} + +func _TabletManager_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.PingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).Ping(ctx, req.(*tabletmanagerdata.PingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_Sleep_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.SleepRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).Sleep(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/Sleep", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).Sleep(ctx, req.(*tabletmanagerdata.SleepRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ExecuteHook_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ExecuteHookRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ExecuteHook(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ExecuteHook", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ExecuteHook(ctx, req.(*tabletmanagerdata.ExecuteHookRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.GetSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).GetSchema(ctx, req.(*tabletmanagerdata.GetSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_GetPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.GetPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).GetPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/GetPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).GetPermissions(ctx, req.(*tabletmanagerdata.GetPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_SetReadOnly_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.SetReadOnlyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).SetReadOnly(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/SetReadOnly", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).SetReadOnly(ctx, req.(*tabletmanagerdata.SetReadOnlyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_SetReadWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.SetReadWriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).SetReadWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/SetReadWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).SetReadWrite(ctx, req.(*tabletmanagerdata.SetReadWriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ChangeType_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ChangeTypeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ChangeType(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ChangeType", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ChangeType(ctx, req.(*tabletmanagerdata.ChangeTypeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_RefreshState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.RefreshStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).RefreshState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/RefreshState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).RefreshState(ctx, req.(*tabletmanagerdata.RefreshStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_RunHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.RunHealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).RunHealthCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/RunHealthCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).RunHealthCheck(ctx, req.(*tabletmanagerdata.RunHealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_IgnoreHealthError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.IgnoreHealthErrorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).IgnoreHealthError(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/IgnoreHealthError", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).IgnoreHealthError(ctx, req.(*tabletmanagerdata.IgnoreHealthErrorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ReloadSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ReloadSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ReloadSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ReloadSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ReloadSchema(ctx, req.(*tabletmanagerdata.ReloadSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_PreflightSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.PreflightSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).PreflightSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/PreflightSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).PreflightSchema(ctx, req.(*tabletmanagerdata.PreflightSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ApplySchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ApplySchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ApplySchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ApplySchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ApplySchema(ctx, req.(*tabletmanagerdata.ApplySchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_LockTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.LockTablesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).LockTables(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/LockTables", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).LockTables(ctx, req.(*tabletmanagerdata.LockTablesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_UnlockTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.UnlockTablesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).UnlockTables(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/UnlockTables", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).UnlockTables(ctx, req.(*tabletmanagerdata.UnlockTablesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ExecuteFetchAsDba_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ExecuteFetchAsDbaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ExecuteFetchAsDba(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ExecuteFetchAsDba", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ExecuteFetchAsDba(ctx, req.(*tabletmanagerdata.ExecuteFetchAsDbaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ExecuteFetchAsAllPrivs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ExecuteFetchAsAllPrivsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ExecuteFetchAsAllPrivs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ExecuteFetchAsAllPrivs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ExecuteFetchAsAllPrivs(ctx, req.(*tabletmanagerdata.ExecuteFetchAsAllPrivsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ExecuteFetchAsApp_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ExecuteFetchAsAppRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ExecuteFetchAsApp(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ExecuteFetchAsApp", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ExecuteFetchAsApp(ctx, req.(*tabletmanagerdata.ExecuteFetchAsAppRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ReplicationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ReplicationStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ReplicationStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ReplicationStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ReplicationStatus(ctx, req.(*tabletmanagerdata.ReplicationStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_MasterPosition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.MasterPositionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).MasterPosition(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/MasterPosition", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).MasterPosition(ctx, req.(*tabletmanagerdata.MasterPositionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_WaitForPosition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.WaitForPositionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).WaitForPosition(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/WaitForPosition", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).WaitForPosition(ctx, req.(*tabletmanagerdata.WaitForPositionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_StopReplication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.StopReplicationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).StopReplication(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/StopReplication", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).StopReplication(ctx, req.(*tabletmanagerdata.StopReplicationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_StopReplicationMinimum_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.StopReplicationMinimumRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).StopReplicationMinimum(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/StopReplicationMinimum", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).StopReplicationMinimum(ctx, req.(*tabletmanagerdata.StopReplicationMinimumRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_StartReplication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.StartReplicationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).StartReplication(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/StartReplication", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).StartReplication(ctx, req.(*tabletmanagerdata.StartReplicationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_StartReplicationUntilAfter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.StartReplicationUntilAfterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).StartReplicationUntilAfter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/StartReplicationUntilAfter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).StartReplicationUntilAfter(ctx, req.(*tabletmanagerdata.StartReplicationUntilAfterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_GetReplicas_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.GetReplicasRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).GetReplicas(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/GetReplicas", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).GetReplicas(ctx, req.(*tabletmanagerdata.GetReplicasRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_VReplicationExec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.VReplicationExecRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).VReplicationExec(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/VReplicationExec", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).VReplicationExec(ctx, req.(*tabletmanagerdata.VReplicationExecRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_VReplicationWaitForPos_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.VReplicationWaitForPosRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).VReplicationWaitForPos(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/VReplicationWaitForPos", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).VReplicationWaitForPos(ctx, req.(*tabletmanagerdata.VReplicationWaitForPosRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ResetReplication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ResetReplicationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ResetReplication(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ResetReplication", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ResetReplication(ctx, req.(*tabletmanagerdata.ResetReplicationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_InitMaster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.InitMasterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).InitMaster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/InitMaster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).InitMaster(ctx, req.(*tabletmanagerdata.InitMasterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_PopulateReparentJournal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.PopulateReparentJournalRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).PopulateReparentJournal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/PopulateReparentJournal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).PopulateReparentJournal(ctx, req.(*tabletmanagerdata.PopulateReparentJournalRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_InitReplica_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.InitReplicaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).InitReplica(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/InitReplica", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).InitReplica(ctx, req.(*tabletmanagerdata.InitReplicaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_DemoteMaster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.DemoteMasterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).DemoteMaster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/DemoteMaster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).DemoteMaster(ctx, req.(*tabletmanagerdata.DemoteMasterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_UndoDemoteMaster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.UndoDemoteMasterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).UndoDemoteMaster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/UndoDemoteMaster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).UndoDemoteMaster(ctx, req.(*tabletmanagerdata.UndoDemoteMasterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ReplicaWasPromoted_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ReplicaWasPromotedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ReplicaWasPromoted(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ReplicaWasPromoted", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ReplicaWasPromoted(ctx, req.(*tabletmanagerdata.ReplicaWasPromotedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_SetMaster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.SetMasterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).SetMaster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/SetMaster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).SetMaster(ctx, req.(*tabletmanagerdata.SetMasterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ReplicaWasRestarted_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ReplicaWasRestartedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ReplicaWasRestarted(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ReplicaWasRestarted", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ReplicaWasRestarted(ctx, req.(*tabletmanagerdata.ReplicaWasRestartedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_StopReplicationAndGetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.StopReplicationAndGetStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).StopReplicationAndGetStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/StopReplicationAndGetStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).StopReplicationAndGetStatus(ctx, req.(*tabletmanagerdata.StopReplicationAndGetStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_PromoteReplica_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.PromoteReplicaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).PromoteReplica(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/PromoteReplica", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).PromoteReplica(ctx, req.(*tabletmanagerdata.PromoteReplicaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_Backup_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(tabletmanagerdata.BackupRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TabletManagerServer).Backup(m, &tabletManagerBackupServer{stream}) +} + +type TabletManager_BackupServer interface { + Send(*tabletmanagerdata.BackupResponse) error + grpc.ServerStream +} + +type tabletManagerBackupServer struct { + grpc.ServerStream +} + +func (x *tabletManagerBackupServer) Send(m *tabletmanagerdata.BackupResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _TabletManager_RestoreFromBackup_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(tabletmanagerdata.RestoreFromBackupRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TabletManagerServer).RestoreFromBackup(m, &tabletManagerRestoreFromBackupServer{stream}) +} + +type TabletManager_RestoreFromBackupServer interface { + Send(*tabletmanagerdata.RestoreFromBackupResponse) error + grpc.ServerStream +} + +type tabletManagerRestoreFromBackupServer struct { + grpc.ServerStream +} + +func (x *tabletManagerRestoreFromBackupServer) Send(m *tabletmanagerdata.RestoreFromBackupResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _TabletManager_SlaveStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.SlaveStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).SlaveStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/SlaveStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).SlaveStatus(ctx, req.(*tabletmanagerdata.SlaveStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_StopSlave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.StopSlaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).StopSlave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/StopSlave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).StopSlave(ctx, req.(*tabletmanagerdata.StopSlaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_StopSlaveMinimum_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.StopSlaveMinimumRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).StopSlaveMinimum(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/StopSlaveMinimum", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).StopSlaveMinimum(ctx, req.(*tabletmanagerdata.StopSlaveMinimumRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_StartSlave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.StartSlaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).StartSlave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/StartSlave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).StartSlave(ctx, req.(*tabletmanagerdata.StartSlaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_StartSlaveUntilAfter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.StartSlaveUntilAfterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).StartSlaveUntilAfter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/StartSlaveUntilAfter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).StartSlaveUntilAfter(ctx, req.(*tabletmanagerdata.StartSlaveUntilAfterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_GetSlaves_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.GetSlavesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).GetSlaves(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/GetSlaves", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).GetSlaves(ctx, req.(*tabletmanagerdata.GetSlavesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_InitSlave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.InitSlaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).InitSlave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/InitSlave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).InitSlave(ctx, req.(*tabletmanagerdata.InitSlaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_SlaveWasPromoted_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.SlaveWasPromotedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).SlaveWasPromoted(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/SlaveWasPromoted", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).SlaveWasPromoted(ctx, req.(*tabletmanagerdata.SlaveWasPromotedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_SlaveWasRestarted_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.SlaveWasRestartedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).SlaveWasRestarted(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/SlaveWasRestarted", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).SlaveWasRestarted(ctx, req.(*tabletmanagerdata.SlaveWasRestartedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _TabletManager_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tabletmanagerservice.TabletManager", + HandlerType: (*TabletManagerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _TabletManager_Ping_Handler, + }, + { + MethodName: "Sleep", + Handler: _TabletManager_Sleep_Handler, + }, + { + MethodName: "ExecuteHook", + Handler: _TabletManager_ExecuteHook_Handler, + }, + { + MethodName: "GetSchema", + Handler: _TabletManager_GetSchema_Handler, + }, + { + MethodName: "GetPermissions", + Handler: _TabletManager_GetPermissions_Handler, + }, + { + MethodName: "SetReadOnly", + Handler: _TabletManager_SetReadOnly_Handler, + }, + { + MethodName: "SetReadWrite", + Handler: _TabletManager_SetReadWrite_Handler, + }, + { + MethodName: "ChangeType", + Handler: _TabletManager_ChangeType_Handler, + }, + { + MethodName: "RefreshState", + Handler: _TabletManager_RefreshState_Handler, + }, + { + MethodName: "RunHealthCheck", + Handler: _TabletManager_RunHealthCheck_Handler, + }, + { + MethodName: "IgnoreHealthError", + Handler: _TabletManager_IgnoreHealthError_Handler, + }, + { + MethodName: "ReloadSchema", + Handler: _TabletManager_ReloadSchema_Handler, + }, + { + MethodName: "PreflightSchema", + Handler: _TabletManager_PreflightSchema_Handler, + }, + { + MethodName: "ApplySchema", + Handler: _TabletManager_ApplySchema_Handler, + }, + { + MethodName: "LockTables", + Handler: _TabletManager_LockTables_Handler, + }, + { + MethodName: "UnlockTables", + Handler: _TabletManager_UnlockTables_Handler, + }, + { + MethodName: "ExecuteFetchAsDba", + Handler: _TabletManager_ExecuteFetchAsDba_Handler, + }, + { + MethodName: "ExecuteFetchAsAllPrivs", + Handler: _TabletManager_ExecuteFetchAsAllPrivs_Handler, + }, + { + MethodName: "ExecuteFetchAsApp", + Handler: _TabletManager_ExecuteFetchAsApp_Handler, + }, + { + MethodName: "ReplicationStatus", + Handler: _TabletManager_ReplicationStatus_Handler, + }, + { + MethodName: "MasterPosition", + Handler: _TabletManager_MasterPosition_Handler, + }, + { + MethodName: "WaitForPosition", + Handler: _TabletManager_WaitForPosition_Handler, + }, + { + MethodName: "StopReplication", + Handler: _TabletManager_StopReplication_Handler, + }, + { + MethodName: "StopReplicationMinimum", + Handler: _TabletManager_StopReplicationMinimum_Handler, + }, + { + MethodName: "StartReplication", + Handler: _TabletManager_StartReplication_Handler, + }, + { + MethodName: "StartReplicationUntilAfter", + Handler: _TabletManager_StartReplicationUntilAfter_Handler, + }, + { + MethodName: "GetReplicas", + Handler: _TabletManager_GetReplicas_Handler, + }, + { + MethodName: "VReplicationExec", + Handler: _TabletManager_VReplicationExec_Handler, + }, + { + MethodName: "VReplicationWaitForPos", + Handler: _TabletManager_VReplicationWaitForPos_Handler, + }, + { + MethodName: "ResetReplication", + Handler: _TabletManager_ResetReplication_Handler, + }, + { + MethodName: "InitMaster", + Handler: _TabletManager_InitMaster_Handler, + }, + { + MethodName: "PopulateReparentJournal", + Handler: _TabletManager_PopulateReparentJournal_Handler, + }, + { + MethodName: "InitReplica", + Handler: _TabletManager_InitReplica_Handler, + }, + { + MethodName: "DemoteMaster", + Handler: _TabletManager_DemoteMaster_Handler, + }, + { + MethodName: "UndoDemoteMaster", + Handler: _TabletManager_UndoDemoteMaster_Handler, + }, + { + MethodName: "ReplicaWasPromoted", + Handler: _TabletManager_ReplicaWasPromoted_Handler, + }, + { + MethodName: "SetMaster", + Handler: _TabletManager_SetMaster_Handler, + }, + { + MethodName: "ReplicaWasRestarted", + Handler: _TabletManager_ReplicaWasRestarted_Handler, + }, + { + MethodName: "StopReplicationAndGetStatus", + Handler: _TabletManager_StopReplicationAndGetStatus_Handler, + }, + { + MethodName: "PromoteReplica", + Handler: _TabletManager_PromoteReplica_Handler, + }, + { + MethodName: "SlaveStatus", + Handler: _TabletManager_SlaveStatus_Handler, + }, + { + MethodName: "StopSlave", + Handler: _TabletManager_StopSlave_Handler, + }, + { + MethodName: "StopSlaveMinimum", + Handler: _TabletManager_StopSlaveMinimum_Handler, + }, + { + MethodName: "StartSlave", + Handler: _TabletManager_StartSlave_Handler, + }, + { + MethodName: "StartSlaveUntilAfter", + Handler: _TabletManager_StartSlaveUntilAfter_Handler, + }, + { + MethodName: "GetSlaves", + Handler: _TabletManager_GetSlaves_Handler, + }, + { + MethodName: "InitSlave", + Handler: _TabletManager_InitSlave_Handler, + }, + { + MethodName: "SlaveWasPromoted", + Handler: _TabletManager_SlaveWasPromoted_Handler, + }, + { + MethodName: "SlaveWasRestarted", + Handler: _TabletManager_SlaveWasRestarted_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Backup", + Handler: _TabletManager_Backup_Handler, + ServerStreams: true, + }, + { + StreamName: "RestoreFromBackup", + Handler: _TabletManager_RestoreFromBackup_Handler, + ServerStreams: true, + }, + }, + Metadata: "tabletmanagerservice.proto", +} diff --git a/internal/stackql-parser-fork/go/vt/proto/throttlerdata/throttlerdata.pb.go b/internal/stackql-parser-fork/go/vt/proto/throttlerdata/throttlerdata.pb.go new file mode 100644 index 00000000..aab974eb --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/throttlerdata/throttlerdata.pb.go @@ -0,0 +1,723 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: throttlerdata.proto + +package throttlerdata + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// MaxRatesRequest is the payload for the MaxRates RPC. +type MaxRatesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MaxRatesRequest) Reset() { *m = MaxRatesRequest{} } +func (m *MaxRatesRequest) String() string { return proto.CompactTextString(m) } +func (*MaxRatesRequest) ProtoMessage() {} +func (*MaxRatesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b67db2b008a2453d, []int{0} +} + +func (m *MaxRatesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MaxRatesRequest.Unmarshal(m, b) +} +func (m *MaxRatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MaxRatesRequest.Marshal(b, m, deterministic) +} +func (m *MaxRatesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaxRatesRequest.Merge(m, src) +} +func (m *MaxRatesRequest) XXX_Size() int { + return xxx_messageInfo_MaxRatesRequest.Size(m) +} +func (m *MaxRatesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MaxRatesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MaxRatesRequest proto.InternalMessageInfo + +// MaxRatesResponse is returned by the MaxRates RPC. +type MaxRatesResponse struct { + // max_rates returns the max rate for each throttler. It's keyed by the + // throttler name. + Rates map[string]int64 `protobuf:"bytes,1,rep,name=rates,proto3" json:"rates,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MaxRatesResponse) Reset() { *m = MaxRatesResponse{} } +func (m *MaxRatesResponse) String() string { return proto.CompactTextString(m) } +func (*MaxRatesResponse) ProtoMessage() {} +func (*MaxRatesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b67db2b008a2453d, []int{1} +} + +func (m *MaxRatesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MaxRatesResponse.Unmarshal(m, b) +} +func (m *MaxRatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MaxRatesResponse.Marshal(b, m, deterministic) +} +func (m *MaxRatesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaxRatesResponse.Merge(m, src) +} +func (m *MaxRatesResponse) XXX_Size() int { + return xxx_messageInfo_MaxRatesResponse.Size(m) +} +func (m *MaxRatesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MaxRatesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MaxRatesResponse proto.InternalMessageInfo + +func (m *MaxRatesResponse) GetRates() map[string]int64 { + if m != nil { + return m.Rates + } + return nil +} + +// SetMaxRateRequest is the payload for the SetMaxRate RPC. +type SetMaxRateRequest struct { + Rate int64 `protobuf:"varint,1,opt,name=rate,proto3" json:"rate,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetMaxRateRequest) Reset() { *m = SetMaxRateRequest{} } +func (m *SetMaxRateRequest) String() string { return proto.CompactTextString(m) } +func (*SetMaxRateRequest) ProtoMessage() {} +func (*SetMaxRateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b67db2b008a2453d, []int{2} +} + +func (m *SetMaxRateRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetMaxRateRequest.Unmarshal(m, b) +} +func (m *SetMaxRateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetMaxRateRequest.Marshal(b, m, deterministic) +} +func (m *SetMaxRateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetMaxRateRequest.Merge(m, src) +} +func (m *SetMaxRateRequest) XXX_Size() int { + return xxx_messageInfo_SetMaxRateRequest.Size(m) +} +func (m *SetMaxRateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetMaxRateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetMaxRateRequest proto.InternalMessageInfo + +func (m *SetMaxRateRequest) GetRate() int64 { + if m != nil { + return m.Rate + } + return 0 +} + +// SetMaxRateResponse is returned by the SetMaxRate RPC. +type SetMaxRateResponse struct { + // names is the list of throttler names which were updated. + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetMaxRateResponse) Reset() { *m = SetMaxRateResponse{} } +func (m *SetMaxRateResponse) String() string { return proto.CompactTextString(m) } +func (*SetMaxRateResponse) ProtoMessage() {} +func (*SetMaxRateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b67db2b008a2453d, []int{3} +} + +func (m *SetMaxRateResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetMaxRateResponse.Unmarshal(m, b) +} +func (m *SetMaxRateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetMaxRateResponse.Marshal(b, m, deterministic) +} +func (m *SetMaxRateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetMaxRateResponse.Merge(m, src) +} +func (m *SetMaxRateResponse) XXX_Size() int { + return xxx_messageInfo_SetMaxRateResponse.Size(m) +} +func (m *SetMaxRateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetMaxRateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetMaxRateResponse proto.InternalMessageInfo + +func (m *SetMaxRateResponse) GetNames() []string { + if m != nil { + return m.Names + } + return nil +} + +// Configuration holds the configuration parameters for the +// MaxReplicationLagModule which adaptively adjusts the throttling rate based on +// the observed replication lag across all replicas. +type Configuration struct { + // target_replication_lag_sec is the replication lag (in seconds) the + // MaxReplicationLagModule tries to aim for. + // If it is within the target, it tries to increase the throttler + // rate, otherwise it will lower it based on an educated guess of the + // replica's throughput. + TargetReplicationLagSec int64 `protobuf:"varint,1,opt,name=target_replication_lag_sec,json=targetReplicationLagSec,proto3" json:"target_replication_lag_sec,omitempty"` + // max_replication_lag_sec is meant as a last resort. + // By default, the module tries to find out the system maximum capacity while + // trying to keep the replication lag around "target_replication_lag_sec". + // Usually, we'll wait min_duration_between_(increases|decreases)_sec to see + // the effect of a throttler rate change on the replication lag. + // But if the lag goes above this field's value we will go into an "emergency" + // state and throttle more aggressively (see "emergency_decrease" below). + // This is the only way to ensure that the system will recover. + MaxReplicationLagSec int64 `protobuf:"varint,2,opt,name=max_replication_lag_sec,json=maxReplicationLagSec,proto3" json:"max_replication_lag_sec,omitempty"` + // initial_rate is the rate at which the module will start. + InitialRate int64 `protobuf:"varint,3,opt,name=initial_rate,json=initialRate,proto3" json:"initial_rate,omitempty"` + // max_increase defines by how much we will increase the rate + // e.g. 0.05 increases the rate by 5% while 1.0 by 100%. + // Note that any increase will let the system wait for at least + // (1 / MaxIncrease) seconds. If we wait for shorter periods of time, we + // won't notice if the rate increase also increases the replication lag. + // (If the system was already at its maximum capacity (e.g. 1k QPS) and we + // increase the rate by e.g. 5% to 1050 QPS, it will take 20 seconds until + // 1000 extra queries are buffered and the lag increases by 1 second.) + MaxIncrease float64 `protobuf:"fixed64,4,opt,name=max_increase,json=maxIncrease,proto3" json:"max_increase,omitempty"` + // emergency_decrease defines by how much we will decrease the current rate + // if the observed replication lag is above "max_replication_lag_sec". + // E.g. 0.50 decreases the current rate by 50%. + EmergencyDecrease float64 `protobuf:"fixed64,5,opt,name=emergency_decrease,json=emergencyDecrease,proto3" json:"emergency_decrease,omitempty"` + // min_duration_between_increases_sec specifies how long we'll wait at least + // for the last rate increase to have an effect on the system. + MinDurationBetweenIncreasesSec int64 `protobuf:"varint,6,opt,name=min_duration_between_increases_sec,json=minDurationBetweenIncreasesSec,proto3" json:"min_duration_between_increases_sec,omitempty"` + // max_duration_between_increases_sec specifies how long we'll wait at most + // for the last rate increase to have an effect on the system. + MaxDurationBetweenIncreasesSec int64 `protobuf:"varint,7,opt,name=max_duration_between_increases_sec,json=maxDurationBetweenIncreasesSec,proto3" json:"max_duration_between_increases_sec,omitempty"` + // min_duration_between_decreases_sec specifies how long we'll wait at least + // for the last rate decrease to have an effect on the system. + MinDurationBetweenDecreasesSec int64 `protobuf:"varint,8,opt,name=min_duration_between_decreases_sec,json=minDurationBetweenDecreasesSec,proto3" json:"min_duration_between_decreases_sec,omitempty"` + // spread_backlog_across_sec is used when we set the throttler rate after + // we guessed the rate of a replica and determined its backlog. + // For example, at a guessed rate of 100 QPS and a lag of 10s, the replica has + // a backlog of 1000 queries. + // When we set the new, decreased throttler rate, we factor in how long it + // will take the replica to go through the backlog (in addition to new + // requests). This field specifies over which timespan we plan to spread this. + // For example, for a backlog of 1000 queries spread over 5s means that we + // have to further reduce the rate by 200 QPS or the backlog will not be + // processed within the 5 seconds. + SpreadBacklogAcrossSec int64 `protobuf:"varint,9,opt,name=spread_backlog_across_sec,json=spreadBacklogAcrossSec,proto3" json:"spread_backlog_across_sec,omitempty"` + // ignore_n_slowest_replicas will ignore replication lag updates from the + // N slowest REPLICA tablets. Under certain circumstances, replicas are still + // considered e.g. a) if the lag is at most max_replication_lag_sec, b) there + // are less than N+1 replicas or c) the lag increased on each replica such + // that all replicas were ignored in a row. + IgnoreNSlowestReplicas int32 `protobuf:"varint,10,opt,name=ignore_n_slowest_replicas,json=ignoreNSlowestReplicas,proto3" json:"ignore_n_slowest_replicas,omitempty"` + // ignore_n_slowest_rdonlys does the same thing as ignore_n_slowest_replicas + // but for RDONLY tablets. Note that these two settings are independent. + IgnoreNSlowestRdonlys int32 `protobuf:"varint,11,opt,name=ignore_n_slowest_rdonlys,json=ignoreNSlowestRdonlys,proto3" json:"ignore_n_slowest_rdonlys,omitempty"` + // age_bad_rate_after_sec is the duration after which an unchanged bad rate + // will "age out" and increase by "bad_rate_increase". + // Bad rates are tracked by the code in memory.go and serve as an upper bound + // for future rate changes. This ensures that the adaptive throttler does not + // try known too high (bad) rates over and over again. + // To avoid that temporary degradations permanently reduce the maximum rate, + // a stable bad rate "ages out" after "age_bad_rate_after_sec". + AgeBadRateAfterSec int64 `protobuf:"varint,12,opt,name=age_bad_rate_after_sec,json=ageBadRateAfterSec,proto3" json:"age_bad_rate_after_sec,omitempty"` + // bad_rate_increase defines the percentage by which a bad rate will be + // increased when it's aging out. + BadRateIncrease float64 `protobuf:"fixed64,13,opt,name=bad_rate_increase,json=badRateIncrease,proto3" json:"bad_rate_increase,omitempty"` + // max_rate_approach_threshold is the fraction of the current rate limit that the actual + // rate must exceed for the throttler to increase the limit when the replication lag + // is below target_replication_lag_sec. For example, assuming the actual replication lag + // is below target_replication_lag_sec, if the current rate limit is 100, then the actual + // rate must exceed 100*max_rate_approach_threshold for the throttler to increase the current + // limit. + MaxRateApproachThreshold float64 `protobuf:"fixed64,14,opt,name=max_rate_approach_threshold,json=maxRateApproachThreshold,proto3" json:"max_rate_approach_threshold,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configuration) Reset() { *m = Configuration{} } +func (m *Configuration) String() string { return proto.CompactTextString(m) } +func (*Configuration) ProtoMessage() {} +func (*Configuration) Descriptor() ([]byte, []int) { + return fileDescriptor_b67db2b008a2453d, []int{4} +} + +func (m *Configuration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configuration.Unmarshal(m, b) +} +func (m *Configuration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configuration.Marshal(b, m, deterministic) +} +func (m *Configuration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configuration.Merge(m, src) +} +func (m *Configuration) XXX_Size() int { + return xxx_messageInfo_Configuration.Size(m) +} +func (m *Configuration) XXX_DiscardUnknown() { + xxx_messageInfo_Configuration.DiscardUnknown(m) +} + +var xxx_messageInfo_Configuration proto.InternalMessageInfo + +func (m *Configuration) GetTargetReplicationLagSec() int64 { + if m != nil { + return m.TargetReplicationLagSec + } + return 0 +} + +func (m *Configuration) GetMaxReplicationLagSec() int64 { + if m != nil { + return m.MaxReplicationLagSec + } + return 0 +} + +func (m *Configuration) GetInitialRate() int64 { + if m != nil { + return m.InitialRate + } + return 0 +} + +func (m *Configuration) GetMaxIncrease() float64 { + if m != nil { + return m.MaxIncrease + } + return 0 +} + +func (m *Configuration) GetEmergencyDecrease() float64 { + if m != nil { + return m.EmergencyDecrease + } + return 0 +} + +func (m *Configuration) GetMinDurationBetweenIncreasesSec() int64 { + if m != nil { + return m.MinDurationBetweenIncreasesSec + } + return 0 +} + +func (m *Configuration) GetMaxDurationBetweenIncreasesSec() int64 { + if m != nil { + return m.MaxDurationBetweenIncreasesSec + } + return 0 +} + +func (m *Configuration) GetMinDurationBetweenDecreasesSec() int64 { + if m != nil { + return m.MinDurationBetweenDecreasesSec + } + return 0 +} + +func (m *Configuration) GetSpreadBacklogAcrossSec() int64 { + if m != nil { + return m.SpreadBacklogAcrossSec + } + return 0 +} + +func (m *Configuration) GetIgnoreNSlowestReplicas() int32 { + if m != nil { + return m.IgnoreNSlowestReplicas + } + return 0 +} + +func (m *Configuration) GetIgnoreNSlowestRdonlys() int32 { + if m != nil { + return m.IgnoreNSlowestRdonlys + } + return 0 +} + +func (m *Configuration) GetAgeBadRateAfterSec() int64 { + if m != nil { + return m.AgeBadRateAfterSec + } + return 0 +} + +func (m *Configuration) GetBadRateIncrease() float64 { + if m != nil { + return m.BadRateIncrease + } + return 0 +} + +func (m *Configuration) GetMaxRateApproachThreshold() float64 { + if m != nil { + return m.MaxRateApproachThreshold + } + return 0 +} + +// GetConfigurationRequest is the payload for the GetConfiguration RPC. +type GetConfigurationRequest struct { + // throttler_name specifies which throttler to select. If empty, all active + // throttlers will be selected. + ThrottlerName string `protobuf:"bytes,1,opt,name=throttler_name,json=throttlerName,proto3" json:"throttler_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetConfigurationRequest) Reset() { *m = GetConfigurationRequest{} } +func (m *GetConfigurationRequest) String() string { return proto.CompactTextString(m) } +func (*GetConfigurationRequest) ProtoMessage() {} +func (*GetConfigurationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b67db2b008a2453d, []int{5} +} + +func (m *GetConfigurationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetConfigurationRequest.Unmarshal(m, b) +} +func (m *GetConfigurationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetConfigurationRequest.Marshal(b, m, deterministic) +} +func (m *GetConfigurationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetConfigurationRequest.Merge(m, src) +} +func (m *GetConfigurationRequest) XXX_Size() int { + return xxx_messageInfo_GetConfigurationRequest.Size(m) +} +func (m *GetConfigurationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetConfigurationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetConfigurationRequest proto.InternalMessageInfo + +func (m *GetConfigurationRequest) GetThrottlerName() string { + if m != nil { + return m.ThrottlerName + } + return "" +} + +// GetConfigurationResponse is returned by the GetConfiguration RPC. +type GetConfigurationResponse struct { + // max_rates returns the configurations for each throttler. + // It's keyed by the throttler name. + Configurations map[string]*Configuration `protobuf:"bytes,1,rep,name=configurations,proto3" json:"configurations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetConfigurationResponse) Reset() { *m = GetConfigurationResponse{} } +func (m *GetConfigurationResponse) String() string { return proto.CompactTextString(m) } +func (*GetConfigurationResponse) ProtoMessage() {} +func (*GetConfigurationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b67db2b008a2453d, []int{6} +} + +func (m *GetConfigurationResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetConfigurationResponse.Unmarshal(m, b) +} +func (m *GetConfigurationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetConfigurationResponse.Marshal(b, m, deterministic) +} +func (m *GetConfigurationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetConfigurationResponse.Merge(m, src) +} +func (m *GetConfigurationResponse) XXX_Size() int { + return xxx_messageInfo_GetConfigurationResponse.Size(m) +} +func (m *GetConfigurationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetConfigurationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetConfigurationResponse proto.InternalMessageInfo + +func (m *GetConfigurationResponse) GetConfigurations() map[string]*Configuration { + if m != nil { + return m.Configurations + } + return nil +} + +// UpdateConfigurationRequest is the payload for the UpdateConfiguration RPC. +type UpdateConfigurationRequest struct { + // throttler_name specifies which throttler to update. If empty, all active + // throttlers will be updated. + ThrottlerName string `protobuf:"bytes,1,opt,name=throttler_name,json=throttlerName,proto3" json:"throttler_name,omitempty"` + // configuration is the new (partial) configuration. + Configuration *Configuration `protobuf:"bytes,2,opt,name=configuration,proto3" json:"configuration,omitempty"` + // copy_zero_values specifies whether fields with zero values should be copied + // as well. + CopyZeroValues bool `protobuf:"varint,3,opt,name=copy_zero_values,json=copyZeroValues,proto3" json:"copy_zero_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateConfigurationRequest) Reset() { *m = UpdateConfigurationRequest{} } +func (m *UpdateConfigurationRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateConfigurationRequest) ProtoMessage() {} +func (*UpdateConfigurationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b67db2b008a2453d, []int{7} +} + +func (m *UpdateConfigurationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateConfigurationRequest.Unmarshal(m, b) +} +func (m *UpdateConfigurationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateConfigurationRequest.Marshal(b, m, deterministic) +} +func (m *UpdateConfigurationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateConfigurationRequest.Merge(m, src) +} +func (m *UpdateConfigurationRequest) XXX_Size() int { + return xxx_messageInfo_UpdateConfigurationRequest.Size(m) +} +func (m *UpdateConfigurationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateConfigurationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateConfigurationRequest proto.InternalMessageInfo + +func (m *UpdateConfigurationRequest) GetThrottlerName() string { + if m != nil { + return m.ThrottlerName + } + return "" +} + +func (m *UpdateConfigurationRequest) GetConfiguration() *Configuration { + if m != nil { + return m.Configuration + } + return nil +} + +func (m *UpdateConfigurationRequest) GetCopyZeroValues() bool { + if m != nil { + return m.CopyZeroValues + } + return false +} + +// UpdateConfigurationResponse is returned by the UpdateConfiguration RPC. +type UpdateConfigurationResponse struct { + // names is the list of throttler names which were updated. + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateConfigurationResponse) Reset() { *m = UpdateConfigurationResponse{} } +func (m *UpdateConfigurationResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateConfigurationResponse) ProtoMessage() {} +func (*UpdateConfigurationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b67db2b008a2453d, []int{8} +} + +func (m *UpdateConfigurationResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateConfigurationResponse.Unmarshal(m, b) +} +func (m *UpdateConfigurationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateConfigurationResponse.Marshal(b, m, deterministic) +} +func (m *UpdateConfigurationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateConfigurationResponse.Merge(m, src) +} +func (m *UpdateConfigurationResponse) XXX_Size() int { + return xxx_messageInfo_UpdateConfigurationResponse.Size(m) +} +func (m *UpdateConfigurationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateConfigurationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateConfigurationResponse proto.InternalMessageInfo + +func (m *UpdateConfigurationResponse) GetNames() []string { + if m != nil { + return m.Names + } + return nil +} + +// ResetConfigurationRequest is the payload for the ResetConfiguration RPC. +type ResetConfigurationRequest struct { + // throttler_name specifies which throttler to reset. If empty, all active + // throttlers will be reset. + ThrottlerName string `protobuf:"bytes,1,opt,name=throttler_name,json=throttlerName,proto3" json:"throttler_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResetConfigurationRequest) Reset() { *m = ResetConfigurationRequest{} } +func (m *ResetConfigurationRequest) String() string { return proto.CompactTextString(m) } +func (*ResetConfigurationRequest) ProtoMessage() {} +func (*ResetConfigurationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b67db2b008a2453d, []int{9} +} + +func (m *ResetConfigurationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResetConfigurationRequest.Unmarshal(m, b) +} +func (m *ResetConfigurationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResetConfigurationRequest.Marshal(b, m, deterministic) +} +func (m *ResetConfigurationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetConfigurationRequest.Merge(m, src) +} +func (m *ResetConfigurationRequest) XXX_Size() int { + return xxx_messageInfo_ResetConfigurationRequest.Size(m) +} +func (m *ResetConfigurationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResetConfigurationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetConfigurationRequest proto.InternalMessageInfo + +func (m *ResetConfigurationRequest) GetThrottlerName() string { + if m != nil { + return m.ThrottlerName + } + return "" +} + +// ResetConfigurationResponse is returned by the ResetConfiguration RPC. +type ResetConfigurationResponse struct { + // names is the list of throttler names which were updated. + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResetConfigurationResponse) Reset() { *m = ResetConfigurationResponse{} } +func (m *ResetConfigurationResponse) String() string { return proto.CompactTextString(m) } +func (*ResetConfigurationResponse) ProtoMessage() {} +func (*ResetConfigurationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b67db2b008a2453d, []int{10} +} + +func (m *ResetConfigurationResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResetConfigurationResponse.Unmarshal(m, b) +} +func (m *ResetConfigurationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResetConfigurationResponse.Marshal(b, m, deterministic) +} +func (m *ResetConfigurationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetConfigurationResponse.Merge(m, src) +} +func (m *ResetConfigurationResponse) XXX_Size() int { + return xxx_messageInfo_ResetConfigurationResponse.Size(m) +} +func (m *ResetConfigurationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResetConfigurationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetConfigurationResponse proto.InternalMessageInfo + +func (m *ResetConfigurationResponse) GetNames() []string { + if m != nil { + return m.Names + } + return nil +} + +func init() { + proto.RegisterType((*MaxRatesRequest)(nil), "throttlerdata.MaxRatesRequest") + proto.RegisterType((*MaxRatesResponse)(nil), "throttlerdata.MaxRatesResponse") + proto.RegisterMapType((map[string]int64)(nil), "throttlerdata.MaxRatesResponse.RatesEntry") + proto.RegisterType((*SetMaxRateRequest)(nil), "throttlerdata.SetMaxRateRequest") + proto.RegisterType((*SetMaxRateResponse)(nil), "throttlerdata.SetMaxRateResponse") + proto.RegisterType((*Configuration)(nil), "throttlerdata.Configuration") + proto.RegisterType((*GetConfigurationRequest)(nil), "throttlerdata.GetConfigurationRequest") + proto.RegisterType((*GetConfigurationResponse)(nil), "throttlerdata.GetConfigurationResponse") + proto.RegisterMapType((map[string]*Configuration)(nil), "throttlerdata.GetConfigurationResponse.ConfigurationsEntry") + proto.RegisterType((*UpdateConfigurationRequest)(nil), "throttlerdata.UpdateConfigurationRequest") + proto.RegisterType((*UpdateConfigurationResponse)(nil), "throttlerdata.UpdateConfigurationResponse") + proto.RegisterType((*ResetConfigurationRequest)(nil), "throttlerdata.ResetConfigurationRequest") + proto.RegisterType((*ResetConfigurationResponse)(nil), "throttlerdata.ResetConfigurationResponse") +} + +func init() { proto.RegisterFile("throttlerdata.proto", fileDescriptor_b67db2b008a2453d) } + +var fileDescriptor_b67db2b008a2453d = []byte{ + // 734 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5f, 0x4f, 0x03, 0x45, + 0x10, 0xcf, 0x51, 0x8a, 0x30, 0xa5, 0x40, 0x17, 0x84, 0xa3, 0x18, 0x53, 0x2f, 0x31, 0x36, 0x8d, + 0xb6, 0x49, 0x89, 0x11, 0x25, 0x26, 0x50, 0x31, 0x46, 0xa3, 0x3c, 0x1c, 0xea, 0x03, 0x2f, 0x9b, + 0xed, 0xdd, 0x70, 0xbd, 0x70, 0x77, 0x7b, 0xee, 0x2e, 0xd0, 0xfa, 0x21, 0xfc, 0x20, 0xbe, 0xf9, + 0x8d, 0xfc, 0x28, 0xe6, 0x76, 0xb7, 0x7f, 0xae, 0x14, 0x30, 0xe1, 0x6d, 0x77, 0xe6, 0x37, 0xbf, + 0xf9, 0xcd, 0xde, 0xcc, 0x1c, 0xec, 0xab, 0x91, 0xe0, 0x4a, 0x25, 0x28, 0x42, 0xa6, 0x58, 0x37, + 0x17, 0x5c, 0x71, 0x52, 0x2f, 0x19, 0xbd, 0x06, 0xec, 0xfe, 0xc2, 0xc6, 0x3e, 0x53, 0x28, 0x7d, + 0xfc, 0xe3, 0x01, 0xa5, 0xf2, 0xfe, 0x72, 0x60, 0x6f, 0x6e, 0x93, 0x39, 0xcf, 0x24, 0x92, 0x0b, + 0xa8, 0x8a, 0xc2, 0xe0, 0x3a, 0xad, 0x4a, 0xbb, 0xd6, 0xef, 0x74, 0xcb, 0xdc, 0xcb, 0xf8, 0xae, + 0xbe, 0x7d, 0x9f, 0x29, 0x31, 0xf1, 0x4d, 0x60, 0xf3, 0x0c, 0x60, 0x6e, 0x24, 0x7b, 0x50, 0xb9, + 0xc7, 0x89, 0xeb, 0xb4, 0x9c, 0xf6, 0x96, 0x5f, 0x1c, 0xc9, 0x01, 0x54, 0x1f, 0x59, 0xf2, 0x80, + 0xee, 0x5a, 0xcb, 0x69, 0x57, 0x7c, 0x73, 0xf9, 0x66, 0xed, 0xcc, 0xf1, 0x3e, 0x83, 0xc6, 0x0d, + 0x2a, 0x9b, 0xc2, 0xaa, 0x24, 0x04, 0xd6, 0x0b, 0x5e, 0xcd, 0x50, 0xf1, 0xf5, 0xd9, 0xeb, 0x00, + 0x59, 0x04, 0x5a, 0xe9, 0x07, 0x50, 0xcd, 0x58, 0x6a, 0xa5, 0x6f, 0xf9, 0xe6, 0xe2, 0xfd, 0xbd, + 0x01, 0xf5, 0xef, 0x78, 0x76, 0x17, 0x47, 0x0f, 0x82, 0xa9, 0x98, 0x67, 0xe4, 0x1c, 0x9a, 0x8a, + 0x89, 0x08, 0x15, 0x15, 0x98, 0x27, 0x71, 0xa0, 0xad, 0x34, 0x61, 0x11, 0x95, 0x18, 0xd8, 0x3c, + 0x47, 0x06, 0xe1, 0xcf, 0x01, 0x3f, 0xb3, 0xe8, 0x06, 0x03, 0xf2, 0x25, 0x1c, 0xa5, 0x6c, 0xbc, + 0x32, 0xd2, 0xd4, 0x73, 0x90, 0xb2, 0xf1, 0xf3, 0xb0, 0x4f, 0x60, 0x3b, 0xce, 0x62, 0x15, 0xb3, + 0x84, 0xea, 0x6a, 0x2a, 0x1a, 0x5b, 0xb3, 0xb6, 0xa2, 0x8c, 0x02, 0x52, 0x30, 0xc7, 0x59, 0x20, + 0x90, 0x49, 0x74, 0xd7, 0x5b, 0x4e, 0xdb, 0xf1, 0x6b, 0x29, 0x1b, 0xff, 0x68, 0x4d, 0xe4, 0x0b, + 0x20, 0x98, 0xa2, 0x88, 0x30, 0x0b, 0x26, 0x34, 0x44, 0x0b, 0xac, 0x6a, 0x60, 0x63, 0xe6, 0xb9, + 0xb2, 0x0e, 0xf2, 0x13, 0x78, 0x69, 0x9c, 0xd1, 0xd0, 0x16, 0x4e, 0x87, 0xa8, 0x9e, 0x10, 0xb3, + 0x59, 0x0a, 0xa9, 0x65, 0x6f, 0x68, 0x29, 0x1f, 0xa7, 0x71, 0x76, 0x65, 0x81, 0x03, 0x83, 0x9b, + 0xa6, 0x95, 0x45, 0x01, 0x05, 0x17, 0x1b, 0xbf, 0xc5, 0xf5, 0x81, 0xe5, 0x62, 0xe3, 0xb7, 0xb8, + 0x56, 0xe9, 0x9a, 0x56, 0x64, 0xb8, 0x36, 0x5f, 0xd2, 0x35, 0xad, 0x4f, 0x73, 0x7d, 0x0d, 0xc7, + 0x32, 0x17, 0xc8, 0x42, 0x3a, 0x64, 0xc1, 0x7d, 0xc2, 0x23, 0xca, 0x02, 0xc1, 0xa5, 0xa1, 0xd8, + 0xd2, 0x14, 0x87, 0x06, 0x30, 0x30, 0xfe, 0x4b, 0xed, 0xb6, 0xa1, 0x71, 0x94, 0x71, 0x81, 0x34, + 0xa3, 0x32, 0xe1, 0x4f, 0x28, 0x67, 0x1d, 0x21, 0x5d, 0x68, 0x39, 0xed, 0xaa, 0x7f, 0x68, 0x00, + 0xd7, 0x37, 0xc6, 0x6d, 0xbf, 0xab, 0x24, 0x5f, 0x81, 0xfb, 0x3c, 0x34, 0xe4, 0x59, 0x32, 0x91, + 0x6e, 0x4d, 0x47, 0x7e, 0xb8, 0x14, 0x69, 0x9c, 0xa4, 0x0f, 0x87, 0x2c, 0x42, 0x3a, 0x64, 0xa1, + 0xee, 0x03, 0xca, 0xee, 0x14, 0x0a, 0xad, 0x75, 0x5b, 0x6b, 0x25, 0x2c, 0xc2, 0x01, 0x0b, 0x8b, + 0x86, 0xb8, 0x2c, 0x5c, 0x85, 0xce, 0x0e, 0x34, 0x66, 0xf8, 0x59, 0x77, 0xd4, 0xf5, 0x47, 0xdf, + 0x1d, 0x1a, 0xec, 0xac, 0x43, 0xbe, 0x85, 0x13, 0xdd, 0x9e, 0x9a, 0x3b, 0xcf, 0x05, 0x67, 0xc1, + 0x88, 0xaa, 0x91, 0x40, 0x39, 0xe2, 0x49, 0xe8, 0xee, 0xe8, 0x28, 0x37, 0x35, 0x93, 0x73, 0x69, + 0x01, 0xbf, 0x4e, 0xfd, 0xde, 0x05, 0x1c, 0xfd, 0x80, 0xaa, 0x34, 0x2e, 0xd3, 0x39, 0xfc, 0x14, + 0x76, 0x66, 0xab, 0x80, 0x16, 0xa3, 0x65, 0x67, 0x7a, 0xbe, 0x67, 0xae, 0x59, 0x8a, 0xde, 0xbf, + 0x0e, 0xb8, 0xcf, 0x29, 0xec, 0x84, 0x06, 0xb0, 0x13, 0x2c, 0x3a, 0xa6, 0x5b, 0xe6, 0x7c, 0x69, + 0xcb, 0xbc, 0x44, 0xd0, 0x2d, 0x59, 0xed, 0xda, 0x59, 0xa2, 0x6c, 0x52, 0xd8, 0x5f, 0x01, 0x5b, + 0xb1, 0x88, 0xfa, 0x8b, 0x8b, 0xa8, 0xd6, 0xff, 0x68, 0x49, 0x44, 0x59, 0xc1, 0xc2, 0x9a, 0xfa, + 0xc7, 0x81, 0xe6, 0x6f, 0x79, 0xc8, 0x14, 0xbe, 0xe3, 0xa1, 0xc8, 0x00, 0xea, 0x25, 0xe1, 0xff, + 0x4b, 0x45, 0x39, 0x84, 0xb4, 0x61, 0x2f, 0xe0, 0xf9, 0x84, 0xfe, 0x89, 0x82, 0x53, 0x2d, 0x50, + 0xea, 0xcd, 0xb2, 0x59, 0x3c, 0x4a, 0x3e, 0xb9, 0x45, 0xc1, 0x7f, 0xd7, 0x56, 0xef, 0x14, 0x4e, + 0x56, 0x4a, 0x7e, 0x75, 0x75, 0x0e, 0xe0, 0xd8, 0x47, 0xf9, 0xbe, 0x7e, 0xe8, 0x43, 0x73, 0x15, + 0xc7, 0x6b, 0x79, 0x07, 0x9f, 0xdf, 0x76, 0x1e, 0x63, 0x85, 0x52, 0x76, 0x63, 0xde, 0x33, 0xa7, + 0x5e, 0xc4, 0x7b, 0x8f, 0xaa, 0xa7, 0x7f, 0x6d, 0xbd, 0xd2, 0x0b, 0x0d, 0x37, 0xb4, 0xf1, 0xf4, + 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6c, 0xe5, 0x12, 0x96, 0x06, 0x07, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/throttlerservice/throttlerservice.pb.go b/internal/stackql-parser-fork/go/vt/proto/throttlerservice/throttlerservice.pb.go new file mode 100644 index 00000000..08a4879e --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/throttlerservice/throttlerservice.pb.go @@ -0,0 +1,299 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: throttlerservice.proto + +package throttlerservice + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + throttlerdata "github.com/stackql/stackql-parser/go/vt/proto/throttlerdata" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("throttlerservice.proto", fileDescriptor_33af55db6d07f810) } + +var fileDescriptor_33af55db6d07f810 = []byte{ + // 241 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x3d, 0x4b, 0xc4, 0x40, + 0x10, 0x86, 0x05, 0x41, 0x74, 0xaa, 0x63, 0x0f, 0x2c, 0xae, 0xf0, 0xab, 0x50, 0x4f, 0x30, 0x0b, + 0xfa, 0x0f, 0xb4, 0xb0, 0xba, 0x26, 0xa7, 0x8d, 0xdd, 0xea, 0x8d, 0x71, 0x51, 0x76, 0xe2, 0xce, + 0x24, 0xf8, 0xbf, 0xfd, 0x03, 0x42, 0xe2, 0xae, 0x64, 0xfc, 0xb8, 0x74, 0xe1, 0x7d, 0x9f, 0x7d, + 0x1f, 0x02, 0x03, 0xbb, 0xf2, 0x1c, 0x49, 0xe4, 0x15, 0x23, 0x63, 0x6c, 0xfd, 0x23, 0x16, 0x75, + 0x24, 0x21, 0x33, 0xd1, 0xf9, 0x6c, 0x9a, 0x93, 0x95, 0x13, 0xd7, 0x63, 0x17, 0x1f, 0x9b, 0xb0, + 0x73, 0x9b, 0x72, 0xb3, 0x80, 0xed, 0x85, 0x7b, 0x2f, 0x9d, 0x20, 0x9b, 0xbd, 0x62, 0xc8, 0xa7, + 0xa2, 0xc4, 0xb7, 0x06, 0x59, 0x66, 0xfb, 0x7f, 0xf6, 0x5c, 0x53, 0x60, 0x3c, 0xda, 0x30, 0x4b, + 0x80, 0x25, 0xca, 0x57, 0x61, 0x0e, 0xd4, 0x83, 0xef, 0x2a, 0x4d, 0x1e, 0xfe, 0x43, 0xe4, 0x51, + 0x84, 0xc9, 0x0d, 0xca, 0x35, 0x85, 0x27, 0x5f, 0x35, 0xd1, 0x89, 0xa7, 0x60, 0x8e, 0xd5, 0x43, + 0x0d, 0x24, 0xc1, 0xc9, 0x5a, 0x2e, 0x6b, 0x02, 0x4c, 0xef, 0xea, 0x95, 0x13, 0x1c, 0x9a, 0xe6, + 0x6a, 0xe1, 0x17, 0x26, 0xc9, 0xce, 0xc6, 0xa0, 0xd9, 0xf7, 0x02, 0xa6, 0x44, 0xd6, 0x3f, 0x76, + 0xaa, 0x36, 0x7e, 0x22, 0xc9, 0x36, 0x1f, 0x41, 0x26, 0xd9, 0x95, 0xbd, 0x3f, 0x6f, 0xbd, 0x20, + 0x73, 0xe1, 0xc9, 0xf6, 0x5f, 0xb6, 0x22, 0xdb, 0x8a, 0xed, 0xae, 0xc2, 0xea, 0xdb, 0x79, 0xd8, + 0xea, 0xf2, 0xcb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x49, 0x64, 0xc0, 0xd9, 0x6e, 0x02, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ThrottlerClient is the client API for Throttler service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ThrottlerClient interface { + // MaxRates returns the current max rate for each throttler of the process. + MaxRates(ctx context.Context, in *throttlerdata.MaxRatesRequest, opts ...grpc.CallOption) (*throttlerdata.MaxRatesResponse, error) + // SetMaxRate allows to change the current max rate for all throttlers + // of the process. + SetMaxRate(ctx context.Context, in *throttlerdata.SetMaxRateRequest, opts ...grpc.CallOption) (*throttlerdata.SetMaxRateResponse, error) + // GetConfiguration returns the configuration of the MaxReplicationlag module + // for the given throttler or all throttlers if "throttler_name" is empty. + GetConfiguration(ctx context.Context, in *throttlerdata.GetConfigurationRequest, opts ...grpc.CallOption) (*throttlerdata.GetConfigurationResponse, error) + // UpdateConfiguration (partially) updates the configuration of the + // MaxReplicationlag module for the given throttler or all throttlers if + // "throttler_name" is empty. + // If "copy_zero_values" is true, fields with zero values will be copied + // as well. + UpdateConfiguration(ctx context.Context, in *throttlerdata.UpdateConfigurationRequest, opts ...grpc.CallOption) (*throttlerdata.UpdateConfigurationResponse, error) + // ResetConfiguration resets the configuration of the MaxReplicationlag module + // to the initial configuration for the given throttler or all throttlers if + // "throttler_name" is empty. + ResetConfiguration(ctx context.Context, in *throttlerdata.ResetConfigurationRequest, opts ...grpc.CallOption) (*throttlerdata.ResetConfigurationResponse, error) +} + +type throttlerClient struct { + cc *grpc.ClientConn +} + +func NewThrottlerClient(cc *grpc.ClientConn) ThrottlerClient { + return &throttlerClient{cc} +} + +func (c *throttlerClient) MaxRates(ctx context.Context, in *throttlerdata.MaxRatesRequest, opts ...grpc.CallOption) (*throttlerdata.MaxRatesResponse, error) { + out := new(throttlerdata.MaxRatesResponse) + err := c.cc.Invoke(ctx, "/throttlerservice.Throttler/MaxRates", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *throttlerClient) SetMaxRate(ctx context.Context, in *throttlerdata.SetMaxRateRequest, opts ...grpc.CallOption) (*throttlerdata.SetMaxRateResponse, error) { + out := new(throttlerdata.SetMaxRateResponse) + err := c.cc.Invoke(ctx, "/throttlerservice.Throttler/SetMaxRate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *throttlerClient) GetConfiguration(ctx context.Context, in *throttlerdata.GetConfigurationRequest, opts ...grpc.CallOption) (*throttlerdata.GetConfigurationResponse, error) { + out := new(throttlerdata.GetConfigurationResponse) + err := c.cc.Invoke(ctx, "/throttlerservice.Throttler/GetConfiguration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *throttlerClient) UpdateConfiguration(ctx context.Context, in *throttlerdata.UpdateConfigurationRequest, opts ...grpc.CallOption) (*throttlerdata.UpdateConfigurationResponse, error) { + out := new(throttlerdata.UpdateConfigurationResponse) + err := c.cc.Invoke(ctx, "/throttlerservice.Throttler/UpdateConfiguration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *throttlerClient) ResetConfiguration(ctx context.Context, in *throttlerdata.ResetConfigurationRequest, opts ...grpc.CallOption) (*throttlerdata.ResetConfigurationResponse, error) { + out := new(throttlerdata.ResetConfigurationResponse) + err := c.cc.Invoke(ctx, "/throttlerservice.Throttler/ResetConfiguration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ThrottlerServer is the server API for Throttler service. +type ThrottlerServer interface { + // MaxRates returns the current max rate for each throttler of the process. + MaxRates(context.Context, *throttlerdata.MaxRatesRequest) (*throttlerdata.MaxRatesResponse, error) + // SetMaxRate allows to change the current max rate for all throttlers + // of the process. + SetMaxRate(context.Context, *throttlerdata.SetMaxRateRequest) (*throttlerdata.SetMaxRateResponse, error) + // GetConfiguration returns the configuration of the MaxReplicationlag module + // for the given throttler or all throttlers if "throttler_name" is empty. + GetConfiguration(context.Context, *throttlerdata.GetConfigurationRequest) (*throttlerdata.GetConfigurationResponse, error) + // UpdateConfiguration (partially) updates the configuration of the + // MaxReplicationlag module for the given throttler or all throttlers if + // "throttler_name" is empty. + // If "copy_zero_values" is true, fields with zero values will be copied + // as well. + UpdateConfiguration(context.Context, *throttlerdata.UpdateConfigurationRequest) (*throttlerdata.UpdateConfigurationResponse, error) + // ResetConfiguration resets the configuration of the MaxReplicationlag module + // to the initial configuration for the given throttler or all throttlers if + // "throttler_name" is empty. + ResetConfiguration(context.Context, *throttlerdata.ResetConfigurationRequest) (*throttlerdata.ResetConfigurationResponse, error) +} + +// UnimplementedThrottlerServer can be embedded to have forward compatible implementations. +type UnimplementedThrottlerServer struct { +} + +func (*UnimplementedThrottlerServer) MaxRates(ctx context.Context, req *throttlerdata.MaxRatesRequest) (*throttlerdata.MaxRatesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MaxRates not implemented") +} +func (*UnimplementedThrottlerServer) SetMaxRate(ctx context.Context, req *throttlerdata.SetMaxRateRequest) (*throttlerdata.SetMaxRateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetMaxRate not implemented") +} +func (*UnimplementedThrottlerServer) GetConfiguration(ctx context.Context, req *throttlerdata.GetConfigurationRequest) (*throttlerdata.GetConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetConfiguration not implemented") +} +func (*UnimplementedThrottlerServer) UpdateConfiguration(ctx context.Context, req *throttlerdata.UpdateConfigurationRequest) (*throttlerdata.UpdateConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateConfiguration not implemented") +} +func (*UnimplementedThrottlerServer) ResetConfiguration(ctx context.Context, req *throttlerdata.ResetConfigurationRequest) (*throttlerdata.ResetConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResetConfiguration not implemented") +} + +func RegisterThrottlerServer(s *grpc.Server, srv ThrottlerServer) { + s.RegisterService(&_Throttler_serviceDesc, srv) +} + +func _Throttler_MaxRates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(throttlerdata.MaxRatesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ThrottlerServer).MaxRates(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/throttlerservice.Throttler/MaxRates", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ThrottlerServer).MaxRates(ctx, req.(*throttlerdata.MaxRatesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Throttler_SetMaxRate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(throttlerdata.SetMaxRateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ThrottlerServer).SetMaxRate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/throttlerservice.Throttler/SetMaxRate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ThrottlerServer).SetMaxRate(ctx, req.(*throttlerdata.SetMaxRateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Throttler_GetConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(throttlerdata.GetConfigurationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ThrottlerServer).GetConfiguration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/throttlerservice.Throttler/GetConfiguration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ThrottlerServer).GetConfiguration(ctx, req.(*throttlerdata.GetConfigurationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Throttler_UpdateConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(throttlerdata.UpdateConfigurationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ThrottlerServer).UpdateConfiguration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/throttlerservice.Throttler/UpdateConfiguration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ThrottlerServer).UpdateConfiguration(ctx, req.(*throttlerdata.UpdateConfigurationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Throttler_ResetConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(throttlerdata.ResetConfigurationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ThrottlerServer).ResetConfiguration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/throttlerservice.Throttler/ResetConfiguration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ThrottlerServer).ResetConfiguration(ctx, req.(*throttlerdata.ResetConfigurationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Throttler_serviceDesc = grpc.ServiceDesc{ + ServiceName: "throttlerservice.Throttler", + HandlerType: (*ThrottlerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "MaxRates", + Handler: _Throttler_MaxRates_Handler, + }, + { + MethodName: "SetMaxRate", + Handler: _Throttler_SetMaxRate_Handler, + }, + { + MethodName: "GetConfiguration", + Handler: _Throttler_GetConfiguration_Handler, + }, + { + MethodName: "UpdateConfiguration", + Handler: _Throttler_UpdateConfiguration_Handler, + }, + { + MethodName: "ResetConfiguration", + Handler: _Throttler_ResetConfiguration_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "throttlerservice.proto", +} diff --git a/internal/stackql-parser-fork/go/vt/proto/topodata/topodata.pb.go b/internal/stackql-parser-fork/go/vt/proto/topodata/topodata.pb.go new file mode 100644 index 00000000..92cb6ca6 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/topodata/topodata.pb.go @@ -0,0 +1,1465 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: topodata.proto + +package topodata + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + vttime "github.com/stackql/stackql-parser/go/vt/proto/vttime" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// KeyspaceType describes the type of the keyspace +type KeyspaceType int32 + +const ( + // NORMAL is the default value + KeyspaceType_NORMAL KeyspaceType = 0 + // SNAPSHOT is when we are creating a snapshot keyspace + KeyspaceType_SNAPSHOT KeyspaceType = 1 +) + +var KeyspaceType_name = map[int32]string{ + 0: "NORMAL", + 1: "SNAPSHOT", +} + +var KeyspaceType_value = map[string]int32{ + "NORMAL": 0, + "SNAPSHOT": 1, +} + +func (x KeyspaceType) String() string { + return proto.EnumName(KeyspaceType_name, int32(x)) +} + +func (KeyspaceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{0} +} + +// KeyspaceIdType describes the type of the sharding key for a +// range-based sharded keyspace. +type KeyspaceIdType int32 + +const ( + // UNSET is the default value, when range-based sharding is not used. + KeyspaceIdType_UNSET KeyspaceIdType = 0 + // UINT64 is when uint64 value is used. + // This is represented as 'unsigned bigint' in mysql + KeyspaceIdType_UINT64 KeyspaceIdType = 1 + // BYTES is when an array of bytes is used. + // This is represented as 'varbinary' in mysql + KeyspaceIdType_BYTES KeyspaceIdType = 2 +) + +var KeyspaceIdType_name = map[int32]string{ + 0: "UNSET", + 1: "UINT64", + 2: "BYTES", +} + +var KeyspaceIdType_value = map[string]int32{ + "UNSET": 0, + "UINT64": 1, + "BYTES": 2, +} + +func (x KeyspaceIdType) String() string { + return proto.EnumName(KeyspaceIdType_name, int32(x)) +} + +func (KeyspaceIdType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{1} +} + +// TabletType represents the type of a given tablet. +type TabletType int32 + +const ( + // UNKNOWN is not a valid value. + TabletType_UNKNOWN TabletType = 0 + // MASTER is the master server for the shard. Only MASTER allows DMLs. + TabletType_MASTER TabletType = 1 + // REPLICA replicates from master. It is used to serve live traffic. + // A REPLICA can be promoted to MASTER. A demoted MASTER will go to REPLICA. + TabletType_REPLICA TabletType = 2 + // RDONLY (old name) / BATCH (new name) is used to serve traffic for + // long-running jobs. It is a separate type from REPLICA so + // long-running queries don't affect web-like traffic. + TabletType_RDONLY TabletType = 3 + TabletType_BATCH TabletType = 3 + // SPARE is a type of servers that cannot serve queries, but is available + // in case an extra server is needed. + TabletType_SPARE TabletType = 4 + // EXPERIMENTAL is like SPARE, except it can serve queries. This + // type can be used for usages not planned by Vitess, like online + // export to another storage engine. + TabletType_EXPERIMENTAL TabletType = 5 + // BACKUP is the type a server goes to when taking a backup. No queries + // can be served in BACKUP mode. + TabletType_BACKUP TabletType = 6 + // RESTORE is the type a server uses when restoring a backup, at + // startup time. No queries can be served in RESTORE mode. + TabletType_RESTORE TabletType = 7 + // DRAINED is the type a server goes into when used by Vitess tools + // to perform an offline action. It is a serving type (as + // the tools processes may need to run queries), but it's not used + // to route queries from Vitess users. In this state, + // this tablet is dedicated to the process that uses it. + TabletType_DRAINED TabletType = 8 +) + +var TabletType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "MASTER", + 2: "REPLICA", + 3: "RDONLY", + // Duplicate value: 3: "BATCH", + 4: "SPARE", + 5: "EXPERIMENTAL", + 6: "BACKUP", + 7: "RESTORE", + 8: "DRAINED", +} + +var TabletType_value = map[string]int32{ + "UNKNOWN": 0, + "MASTER": 1, + "REPLICA": 2, + "RDONLY": 3, + "BATCH": 3, + "SPARE": 4, + "EXPERIMENTAL": 5, + "BACKUP": 6, + "RESTORE": 7, + "DRAINED": 8, +} + +func (x TabletType) String() string { + return proto.EnumName(TabletType_name, int32(x)) +} + +func (TabletType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{2} +} + +// KeyRange describes a range of sharding keys, when range-based +// sharding is used. +type KeyRange struct { + Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyRange) Reset() { *m = KeyRange{} } +func (m *KeyRange) String() string { return proto.CompactTextString(m) } +func (*KeyRange) ProtoMessage() {} +func (*KeyRange) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{0} +} + +func (m *KeyRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyRange.Unmarshal(m, b) +} +func (m *KeyRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyRange.Marshal(b, m, deterministic) +} +func (m *KeyRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyRange.Merge(m, src) +} +func (m *KeyRange) XXX_Size() int { + return xxx_messageInfo_KeyRange.Size(m) +} +func (m *KeyRange) XXX_DiscardUnknown() { + xxx_messageInfo_KeyRange.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyRange proto.InternalMessageInfo + +func (m *KeyRange) GetStart() []byte { + if m != nil { + return m.Start + } + return nil +} + +func (m *KeyRange) GetEnd() []byte { + if m != nil { + return m.End + } + return nil +} + +// TabletAlias is a globally unique tablet identifier. +type TabletAlias struct { + // cell is the cell (or datacenter) the tablet is in + Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` + // uid is a unique id for this tablet within the shard + // (this is the MySQL server id as well). + Uid uint32 `protobuf:"varint,2,opt,name=uid,proto3" json:"uid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TabletAlias) Reset() { *m = TabletAlias{} } +func (m *TabletAlias) String() string { return proto.CompactTextString(m) } +func (*TabletAlias) ProtoMessage() {} +func (*TabletAlias) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{1} +} + +func (m *TabletAlias) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TabletAlias.Unmarshal(m, b) +} +func (m *TabletAlias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TabletAlias.Marshal(b, m, deterministic) +} +func (m *TabletAlias) XXX_Merge(src proto.Message) { + xxx_messageInfo_TabletAlias.Merge(m, src) +} +func (m *TabletAlias) XXX_Size() int { + return xxx_messageInfo_TabletAlias.Size(m) +} +func (m *TabletAlias) XXX_DiscardUnknown() { + xxx_messageInfo_TabletAlias.DiscardUnknown(m) +} + +var xxx_messageInfo_TabletAlias proto.InternalMessageInfo + +func (m *TabletAlias) GetCell() string { + if m != nil { + return m.Cell + } + return "" +} + +func (m *TabletAlias) GetUid() uint32 { + if m != nil { + return m.Uid + } + return 0 +} + +// Tablet represents information about a running instance of vttablet. +type Tablet struct { + // alias is the unique name of the tablet. + Alias *TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` + // Fully qualified domain name of the host. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Map of named ports. Normally this should include vt and grpc. + // Going forward, the mysql port will be stored in mysql_port + // instead of here. + // For accessing mysql port, use topoproto.MysqlPort to fetch, and + // topoproto.SetMysqlPort to set. These wrappers will ensure + // legacy behavior is supported. + PortMap map[string]int32 `protobuf:"bytes,4,rep,name=port_map,json=portMap,proto3" json:"port_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + // Keyspace name. + Keyspace string `protobuf:"bytes,5,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard name. If range based sharding is used, it should match + // key_range. + Shard string `protobuf:"bytes,6,opt,name=shard,proto3" json:"shard,omitempty"` + // If range based sharding is used, range for the tablet's shard. + KeyRange *KeyRange `protobuf:"bytes,7,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"` + // type is the current type of the tablet. + Type TabletType `protobuf:"varint,8,opt,name=type,proto3,enum=topodata.TabletType" json:"type,omitempty"` + // It this is set, it is used as the database name instead of the + // normal "vt_" + keyspace. + DbNameOverride string `protobuf:"bytes,9,opt,name=db_name_override,json=dbNameOverride,proto3" json:"db_name_override,omitempty"` + // tablet tags + Tags map[string]string `protobuf:"bytes,10,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // MySQL hostname. + MysqlHostname string `protobuf:"bytes,12,opt,name=mysql_hostname,json=mysqlHostname,proto3" json:"mysql_hostname,omitempty"` + // MySQL port. Use topoproto.MysqlPort and topoproto.SetMysqlPort + // to access this variable. The functions provide support + // for legacy behavior. + MysqlPort int32 `protobuf:"varint,13,opt,name=mysql_port,json=mysqlPort,proto3" json:"mysql_port,omitempty"` + // master_term_start_time is the time (in UTC) at which the current term of + // the current tablet began as master. If this tablet is not currently the + // master, this value is ignored. + // + // A new master term begins any time an authoritative decision is communicated + // about which tablet should be the master, such as via Vitess + // replication-management commands like PlannedReparentShard, + // EmergencyReparentShard, and TabletExternallyReparented. + // + MasterTermStartTime *vttime.Time `protobuf:"bytes,14,opt,name=master_term_start_time,json=masterTermStartTime,proto3" json:"master_term_start_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Tablet) Reset() { *m = Tablet{} } +func (m *Tablet) String() string { return proto.CompactTextString(m) } +func (*Tablet) ProtoMessage() {} +func (*Tablet) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{2} +} + +func (m *Tablet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Tablet.Unmarshal(m, b) +} +func (m *Tablet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Tablet.Marshal(b, m, deterministic) +} +func (m *Tablet) XXX_Merge(src proto.Message) { + xxx_messageInfo_Tablet.Merge(m, src) +} +func (m *Tablet) XXX_Size() int { + return xxx_messageInfo_Tablet.Size(m) +} +func (m *Tablet) XXX_DiscardUnknown() { + xxx_messageInfo_Tablet.DiscardUnknown(m) +} + +var xxx_messageInfo_Tablet proto.InternalMessageInfo + +func (m *Tablet) GetAlias() *TabletAlias { + if m != nil { + return m.Alias + } + return nil +} + +func (m *Tablet) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +func (m *Tablet) GetPortMap() map[string]int32 { + if m != nil { + return m.PortMap + } + return nil +} + +func (m *Tablet) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *Tablet) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *Tablet) GetKeyRange() *KeyRange { + if m != nil { + return m.KeyRange + } + return nil +} + +func (m *Tablet) GetType() TabletType { + if m != nil { + return m.Type + } + return TabletType_UNKNOWN +} + +func (m *Tablet) GetDbNameOverride() string { + if m != nil { + return m.DbNameOverride + } + return "" +} + +func (m *Tablet) GetTags() map[string]string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Tablet) GetMysqlHostname() string { + if m != nil { + return m.MysqlHostname + } + return "" +} + +func (m *Tablet) GetMysqlPort() int32 { + if m != nil { + return m.MysqlPort + } + return 0 +} + +func (m *Tablet) GetMasterTermStartTime() *vttime.Time { + if m != nil { + return m.MasterTermStartTime + } + return nil +} + +// A Shard contains data about a subset of the data whithin a keyspace. +type Shard struct { + // master_alias is the tablet alias of the master for the shard. + // If it is unset, then there is no master in this shard yet. + // No lock is necessary to update this field, when for instance + // TabletExternallyReparented updates this. However, we lock the + // shard for reparenting operations (InitShardMaster, + // PlannedReparentShard,EmergencyReparentShard), to guarantee + // exclusive operation. + MasterAlias *TabletAlias `protobuf:"bytes,1,opt,name=master_alias,json=masterAlias,proto3" json:"master_alias,omitempty"` + // master_term_start_time is the time (in UTC) at which the current term of + // the master specified in master_alias began. + // + // A new master term begins any time an authoritative decision is communicated + // about which tablet should be the master, such as via Vitess + // replication-management commands like PlannedReparentShard, + // EmergencyReparentShard, and TabletExternallyReparented. + // + // The master_alias should only ever be changed if the new master's term began + // at a later time than this. Note that a new term can start for the tablet + // that is already the master. In that case, the master_term_start_time would + // be increased without changing the master_alias. + MasterTermStartTime *vttime.Time `protobuf:"bytes,8,opt,name=master_term_start_time,json=masterTermStartTime,proto3" json:"master_term_start_time,omitempty"` + // key_range is the KeyRange for this shard. It can be unset if: + // - we are not using range-based sharding in this shard. + // - the shard covers the entire keyrange. + // This must match the shard name based on our other conventions, but + // helpful to have it decomposed here. + // Once set at creation time, it is never changed. + KeyRange *KeyRange `protobuf:"bytes,2,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"` + // served_types has at most one entry per TabletType + // This field is in the process of being deprecated in favor of + // is_master_serving. Keeping for backwards compatibility purposes. + ServedTypes []*Shard_ServedType `protobuf:"bytes,3,rep,name=served_types,json=servedTypes,proto3" json:"served_types,omitempty"` + // SourceShards is the list of shards we're replicating from, + // using filtered replication. + // The keyspace lock is always taken when changing this. + SourceShards []*Shard_SourceShard `protobuf:"bytes,4,rep,name=source_shards,json=sourceShards,proto3" json:"source_shards,omitempty"` + // tablet_controls has at most one entry per TabletType. + // The keyspace lock is always taken when changing this. + TabletControls []*Shard_TabletControl `protobuf:"bytes,6,rep,name=tablet_controls,json=tabletControls,proto3" json:"tablet_controls,omitempty"` + // is_master_serving sets whether this shard master is serving traffic or not. + // The keyspace lock is always taken when changing this. + IsMasterServing bool `protobuf:"varint,7,opt,name=is_master_serving,json=isMasterServing,proto3" json:"is_master_serving,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Shard) Reset() { *m = Shard{} } +func (m *Shard) String() string { return proto.CompactTextString(m) } +func (*Shard) ProtoMessage() {} +func (*Shard) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{3} +} + +func (m *Shard) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Shard.Unmarshal(m, b) +} +func (m *Shard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Shard.Marshal(b, m, deterministic) +} +func (m *Shard) XXX_Merge(src proto.Message) { + xxx_messageInfo_Shard.Merge(m, src) +} +func (m *Shard) XXX_Size() int { + return xxx_messageInfo_Shard.Size(m) +} +func (m *Shard) XXX_DiscardUnknown() { + xxx_messageInfo_Shard.DiscardUnknown(m) +} + +var xxx_messageInfo_Shard proto.InternalMessageInfo + +func (m *Shard) GetMasterAlias() *TabletAlias { + if m != nil { + return m.MasterAlias + } + return nil +} + +func (m *Shard) GetMasterTermStartTime() *vttime.Time { + if m != nil { + return m.MasterTermStartTime + } + return nil +} + +func (m *Shard) GetKeyRange() *KeyRange { + if m != nil { + return m.KeyRange + } + return nil +} + +func (m *Shard) GetServedTypes() []*Shard_ServedType { + if m != nil { + return m.ServedTypes + } + return nil +} + +func (m *Shard) GetSourceShards() []*Shard_SourceShard { + if m != nil { + return m.SourceShards + } + return nil +} + +func (m *Shard) GetTabletControls() []*Shard_TabletControl { + if m != nil { + return m.TabletControls + } + return nil +} + +func (m *Shard) GetIsMasterServing() bool { + if m != nil { + return m.IsMasterServing + } + return false +} + +// ServedType is an entry in the served_types +type Shard_ServedType struct { + TabletType TabletType `protobuf:"varint,1,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Shard_ServedType) Reset() { *m = Shard_ServedType{} } +func (m *Shard_ServedType) String() string { return proto.CompactTextString(m) } +func (*Shard_ServedType) ProtoMessage() {} +func (*Shard_ServedType) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{3, 0} +} + +func (m *Shard_ServedType) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Shard_ServedType.Unmarshal(m, b) +} +func (m *Shard_ServedType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Shard_ServedType.Marshal(b, m, deterministic) +} +func (m *Shard_ServedType) XXX_Merge(src proto.Message) { + xxx_messageInfo_Shard_ServedType.Merge(m, src) +} +func (m *Shard_ServedType) XXX_Size() int { + return xxx_messageInfo_Shard_ServedType.Size(m) +} +func (m *Shard_ServedType) XXX_DiscardUnknown() { + xxx_messageInfo_Shard_ServedType.DiscardUnknown(m) +} + +var xxx_messageInfo_Shard_ServedType proto.InternalMessageInfo + +func (m *Shard_ServedType) GetTabletType() TabletType { + if m != nil { + return m.TabletType + } + return TabletType_UNKNOWN +} + +func (m *Shard_ServedType) GetCells() []string { + if m != nil { + return m.Cells + } + return nil +} + +// SourceShard represents a data source for filtered replication +// across shards. When this is used in a destination shard, the master +// of that shard will run filtered replication. +type Shard_SourceShard struct { + // Uid is the unique ID for this SourceShard object. + Uid uint32 `protobuf:"varint,1,opt,name=uid,proto3" json:"uid,omitempty"` + // the source keyspace + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // the source shard + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + // the source shard keyrange + KeyRange *KeyRange `protobuf:"bytes,4,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"` + // the source table list to replicate + Tables []string `protobuf:"bytes,5,rep,name=tables,proto3" json:"tables,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Shard_SourceShard) Reset() { *m = Shard_SourceShard{} } +func (m *Shard_SourceShard) String() string { return proto.CompactTextString(m) } +func (*Shard_SourceShard) ProtoMessage() {} +func (*Shard_SourceShard) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{3, 1} +} + +func (m *Shard_SourceShard) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Shard_SourceShard.Unmarshal(m, b) +} +func (m *Shard_SourceShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Shard_SourceShard.Marshal(b, m, deterministic) +} +func (m *Shard_SourceShard) XXX_Merge(src proto.Message) { + xxx_messageInfo_Shard_SourceShard.Merge(m, src) +} +func (m *Shard_SourceShard) XXX_Size() int { + return xxx_messageInfo_Shard_SourceShard.Size(m) +} +func (m *Shard_SourceShard) XXX_DiscardUnknown() { + xxx_messageInfo_Shard_SourceShard.DiscardUnknown(m) +} + +var xxx_messageInfo_Shard_SourceShard proto.InternalMessageInfo + +func (m *Shard_SourceShard) GetUid() uint32 { + if m != nil { + return m.Uid + } + return 0 +} + +func (m *Shard_SourceShard) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *Shard_SourceShard) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *Shard_SourceShard) GetKeyRange() *KeyRange { + if m != nil { + return m.KeyRange + } + return nil +} + +func (m *Shard_SourceShard) GetTables() []string { + if m != nil { + return m.Tables + } + return nil +} + +// TabletControl controls tablet's behavior +type Shard_TabletControl struct { + // which tablet type is affected + TabletType TabletType `protobuf:"varint,1,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + BlacklistedTables []string `protobuf:"bytes,4,rep,name=blacklisted_tables,json=blacklistedTables,proto3" json:"blacklisted_tables,omitempty"` + // frozen is set if we've started failing over traffic for + // the master. If set, this record should not be removed. + Frozen bool `protobuf:"varint,5,opt,name=frozen,proto3" json:"frozen,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Shard_TabletControl) Reset() { *m = Shard_TabletControl{} } +func (m *Shard_TabletControl) String() string { return proto.CompactTextString(m) } +func (*Shard_TabletControl) ProtoMessage() {} +func (*Shard_TabletControl) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{3, 2} +} + +func (m *Shard_TabletControl) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Shard_TabletControl.Unmarshal(m, b) +} +func (m *Shard_TabletControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Shard_TabletControl.Marshal(b, m, deterministic) +} +func (m *Shard_TabletControl) XXX_Merge(src proto.Message) { + xxx_messageInfo_Shard_TabletControl.Merge(m, src) +} +func (m *Shard_TabletControl) XXX_Size() int { + return xxx_messageInfo_Shard_TabletControl.Size(m) +} +func (m *Shard_TabletControl) XXX_DiscardUnknown() { + xxx_messageInfo_Shard_TabletControl.DiscardUnknown(m) +} + +var xxx_messageInfo_Shard_TabletControl proto.InternalMessageInfo + +func (m *Shard_TabletControl) GetTabletType() TabletType { + if m != nil { + return m.TabletType + } + return TabletType_UNKNOWN +} + +func (m *Shard_TabletControl) GetCells() []string { + if m != nil { + return m.Cells + } + return nil +} + +func (m *Shard_TabletControl) GetBlacklistedTables() []string { + if m != nil { + return m.BlacklistedTables + } + return nil +} + +func (m *Shard_TabletControl) GetFrozen() bool { + if m != nil { + return m.Frozen + } + return false +} + +// A Keyspace contains data about a keyspace. +type Keyspace struct { + // name of the column used for sharding + // empty if the keyspace is not sharded + ShardingColumnName string `protobuf:"bytes,1,opt,name=sharding_column_name,json=shardingColumnName,proto3" json:"sharding_column_name,omitempty"` + // type of the column used for sharding + // UNSET if the keyspace is not sharded + ShardingColumnType KeyspaceIdType `protobuf:"varint,2,opt,name=sharding_column_type,json=shardingColumnType,proto3,enum=topodata.KeyspaceIdType" json:"sharding_column_type,omitempty"` + // ServedFrom will redirect the appropriate traffic to + // another keyspace. + ServedFroms []*Keyspace_ServedFrom `protobuf:"bytes,4,rep,name=served_froms,json=servedFroms,proto3" json:"served_froms,omitempty"` + // keyspace_type will determine how this keyspace is treated by + // vtgate / vschema. Normal keyspaces are routable by + // any query. Snapshot keyspaces are only accessible + // by explicit addresssing or by calling "use keyspace" first + KeyspaceType KeyspaceType `protobuf:"varint,5,opt,name=keyspace_type,json=keyspaceType,proto3,enum=topodata.KeyspaceType" json:"keyspace_type,omitempty"` + // base_keyspace is the base keyspace from which a snapshot + // keyspace is created. empty for normal keyspaces + BaseKeyspace string `protobuf:"bytes,6,opt,name=base_keyspace,json=baseKeyspace,proto3" json:"base_keyspace,omitempty"` + // snapshot_time (in UTC) is a property of snapshot + // keyspaces which tells us what point in time + // the snapshot is of + SnapshotTime *vttime.Time `protobuf:"bytes,7,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Keyspace) Reset() { *m = Keyspace{} } +func (m *Keyspace) String() string { return proto.CompactTextString(m) } +func (*Keyspace) ProtoMessage() {} +func (*Keyspace) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{4} +} + +func (m *Keyspace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Keyspace.Unmarshal(m, b) +} +func (m *Keyspace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Keyspace.Marshal(b, m, deterministic) +} +func (m *Keyspace) XXX_Merge(src proto.Message) { + xxx_messageInfo_Keyspace.Merge(m, src) +} +func (m *Keyspace) XXX_Size() int { + return xxx_messageInfo_Keyspace.Size(m) +} +func (m *Keyspace) XXX_DiscardUnknown() { + xxx_messageInfo_Keyspace.DiscardUnknown(m) +} + +var xxx_messageInfo_Keyspace proto.InternalMessageInfo + +func (m *Keyspace) GetShardingColumnName() string { + if m != nil { + return m.ShardingColumnName + } + return "" +} + +func (m *Keyspace) GetShardingColumnType() KeyspaceIdType { + if m != nil { + return m.ShardingColumnType + } + return KeyspaceIdType_UNSET +} + +func (m *Keyspace) GetServedFroms() []*Keyspace_ServedFrom { + if m != nil { + return m.ServedFroms + } + return nil +} + +func (m *Keyspace) GetKeyspaceType() KeyspaceType { + if m != nil { + return m.KeyspaceType + } + return KeyspaceType_NORMAL +} + +func (m *Keyspace) GetBaseKeyspace() string { + if m != nil { + return m.BaseKeyspace + } + return "" +} + +func (m *Keyspace) GetSnapshotTime() *vttime.Time { + if m != nil { + return m.SnapshotTime + } + return nil +} + +// ServedFrom indicates a relationship between a TabletType and the +// keyspace name that's serving it. +type Keyspace_ServedFrom struct { + // the tablet type (key for the map) + TabletType TabletType `protobuf:"varint,1,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + // the cells to limit this to + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + // the keyspace name that's serving it + Keyspace string `protobuf:"bytes,3,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Keyspace_ServedFrom) Reset() { *m = Keyspace_ServedFrom{} } +func (m *Keyspace_ServedFrom) String() string { return proto.CompactTextString(m) } +func (*Keyspace_ServedFrom) ProtoMessage() {} +func (*Keyspace_ServedFrom) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{4, 0} +} + +func (m *Keyspace_ServedFrom) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Keyspace_ServedFrom.Unmarshal(m, b) +} +func (m *Keyspace_ServedFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Keyspace_ServedFrom.Marshal(b, m, deterministic) +} +func (m *Keyspace_ServedFrom) XXX_Merge(src proto.Message) { + xxx_messageInfo_Keyspace_ServedFrom.Merge(m, src) +} +func (m *Keyspace_ServedFrom) XXX_Size() int { + return xxx_messageInfo_Keyspace_ServedFrom.Size(m) +} +func (m *Keyspace_ServedFrom) XXX_DiscardUnknown() { + xxx_messageInfo_Keyspace_ServedFrom.DiscardUnknown(m) +} + +var xxx_messageInfo_Keyspace_ServedFrom proto.InternalMessageInfo + +func (m *Keyspace_ServedFrom) GetTabletType() TabletType { + if m != nil { + return m.TabletType + } + return TabletType_UNKNOWN +} + +func (m *Keyspace_ServedFrom) GetCells() []string { + if m != nil { + return m.Cells + } + return nil +} + +func (m *Keyspace_ServedFrom) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +// ShardReplication describes the MySQL replication relationships +// whithin a cell. +type ShardReplication struct { + // Note there can be only one Node in this array + // for a given tablet. + Nodes []*ShardReplication_Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShardReplication) Reset() { *m = ShardReplication{} } +func (m *ShardReplication) String() string { return proto.CompactTextString(m) } +func (*ShardReplication) ProtoMessage() {} +func (*ShardReplication) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{5} +} + +func (m *ShardReplication) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShardReplication.Unmarshal(m, b) +} +func (m *ShardReplication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShardReplication.Marshal(b, m, deterministic) +} +func (m *ShardReplication) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShardReplication.Merge(m, src) +} +func (m *ShardReplication) XXX_Size() int { + return xxx_messageInfo_ShardReplication.Size(m) +} +func (m *ShardReplication) XXX_DiscardUnknown() { + xxx_messageInfo_ShardReplication.DiscardUnknown(m) +} + +var xxx_messageInfo_ShardReplication proto.InternalMessageInfo + +func (m *ShardReplication) GetNodes() []*ShardReplication_Node { + if m != nil { + return m.Nodes + } + return nil +} + +// Node describes a tablet instance within the cell +type ShardReplication_Node struct { + TabletAlias *TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShardReplication_Node) Reset() { *m = ShardReplication_Node{} } +func (m *ShardReplication_Node) String() string { return proto.CompactTextString(m) } +func (*ShardReplication_Node) ProtoMessage() {} +func (*ShardReplication_Node) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{5, 0} +} + +func (m *ShardReplication_Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShardReplication_Node.Unmarshal(m, b) +} +func (m *ShardReplication_Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShardReplication_Node.Marshal(b, m, deterministic) +} +func (m *ShardReplication_Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShardReplication_Node.Merge(m, src) +} +func (m *ShardReplication_Node) XXX_Size() int { + return xxx_messageInfo_ShardReplication_Node.Size(m) +} +func (m *ShardReplication_Node) XXX_DiscardUnknown() { + xxx_messageInfo_ShardReplication_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_ShardReplication_Node proto.InternalMessageInfo + +func (m *ShardReplication_Node) GetTabletAlias() *TabletAlias { + if m != nil { + return m.TabletAlias + } + return nil +} + +// ShardReference is used as a pointer from a SrvKeyspace to a Shard +type ShardReference struct { + // Copied from Shard. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + KeyRange *KeyRange `protobuf:"bytes,2,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShardReference) Reset() { *m = ShardReference{} } +func (m *ShardReference) String() string { return proto.CompactTextString(m) } +func (*ShardReference) ProtoMessage() {} +func (*ShardReference) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{6} +} + +func (m *ShardReference) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShardReference.Unmarshal(m, b) +} +func (m *ShardReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShardReference.Marshal(b, m, deterministic) +} +func (m *ShardReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShardReference.Merge(m, src) +} +func (m *ShardReference) XXX_Size() int { + return xxx_messageInfo_ShardReference.Size(m) +} +func (m *ShardReference) XXX_DiscardUnknown() { + xxx_messageInfo_ShardReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ShardReference proto.InternalMessageInfo + +func (m *ShardReference) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ShardReference) GetKeyRange() *KeyRange { + if m != nil { + return m.KeyRange + } + return nil +} + +// ShardTabletControl is used as a pointer from a SrvKeyspace to a Shard +type ShardTabletControl struct { + // Copied from Shard. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + KeyRange *KeyRange `protobuf:"bytes,2,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"` + // Disable query serving in this shard + QueryServiceDisabled bool `protobuf:"varint,3,opt,name=query_service_disabled,json=queryServiceDisabled,proto3" json:"query_service_disabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShardTabletControl) Reset() { *m = ShardTabletControl{} } +func (m *ShardTabletControl) String() string { return proto.CompactTextString(m) } +func (*ShardTabletControl) ProtoMessage() {} +func (*ShardTabletControl) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{7} +} + +func (m *ShardTabletControl) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShardTabletControl.Unmarshal(m, b) +} +func (m *ShardTabletControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShardTabletControl.Marshal(b, m, deterministic) +} +func (m *ShardTabletControl) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShardTabletControl.Merge(m, src) +} +func (m *ShardTabletControl) XXX_Size() int { + return xxx_messageInfo_ShardTabletControl.Size(m) +} +func (m *ShardTabletControl) XXX_DiscardUnknown() { + xxx_messageInfo_ShardTabletControl.DiscardUnknown(m) +} + +var xxx_messageInfo_ShardTabletControl proto.InternalMessageInfo + +func (m *ShardTabletControl) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ShardTabletControl) GetKeyRange() *KeyRange { + if m != nil { + return m.KeyRange + } + return nil +} + +func (m *ShardTabletControl) GetQueryServiceDisabled() bool { + if m != nil { + return m.QueryServiceDisabled + } + return false +} + +// SrvKeyspace is a rollup node for the keyspace itself. +type SrvKeyspace struct { + // The partitions this keyspace is serving, per tablet type. + Partitions []*SrvKeyspace_KeyspacePartition `protobuf:"bytes,1,rep,name=partitions,proto3" json:"partitions,omitempty"` + // copied from Keyspace + ShardingColumnName string `protobuf:"bytes,2,opt,name=sharding_column_name,json=shardingColumnName,proto3" json:"sharding_column_name,omitempty"` + ShardingColumnType KeyspaceIdType `protobuf:"varint,3,opt,name=sharding_column_type,json=shardingColumnType,proto3,enum=topodata.KeyspaceIdType" json:"sharding_column_type,omitempty"` + ServedFrom []*SrvKeyspace_ServedFrom `protobuf:"bytes,4,rep,name=served_from,json=servedFrom,proto3" json:"served_from,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SrvKeyspace) Reset() { *m = SrvKeyspace{} } +func (m *SrvKeyspace) String() string { return proto.CompactTextString(m) } +func (*SrvKeyspace) ProtoMessage() {} +func (*SrvKeyspace) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{8} +} + +func (m *SrvKeyspace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SrvKeyspace.Unmarshal(m, b) +} +func (m *SrvKeyspace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SrvKeyspace.Marshal(b, m, deterministic) +} +func (m *SrvKeyspace) XXX_Merge(src proto.Message) { + xxx_messageInfo_SrvKeyspace.Merge(m, src) +} +func (m *SrvKeyspace) XXX_Size() int { + return xxx_messageInfo_SrvKeyspace.Size(m) +} +func (m *SrvKeyspace) XXX_DiscardUnknown() { + xxx_messageInfo_SrvKeyspace.DiscardUnknown(m) +} + +var xxx_messageInfo_SrvKeyspace proto.InternalMessageInfo + +func (m *SrvKeyspace) GetPartitions() []*SrvKeyspace_KeyspacePartition { + if m != nil { + return m.Partitions + } + return nil +} + +func (m *SrvKeyspace) GetShardingColumnName() string { + if m != nil { + return m.ShardingColumnName + } + return "" +} + +func (m *SrvKeyspace) GetShardingColumnType() KeyspaceIdType { + if m != nil { + return m.ShardingColumnType + } + return KeyspaceIdType_UNSET +} + +func (m *SrvKeyspace) GetServedFrom() []*SrvKeyspace_ServedFrom { + if m != nil { + return m.ServedFrom + } + return nil +} + +type SrvKeyspace_KeyspacePartition struct { + // The type this partition applies to. + ServedType TabletType `protobuf:"varint,1,opt,name=served_type,json=servedType,proto3,enum=topodata.TabletType" json:"served_type,omitempty"` + // List of non-overlapping continuous shards sorted by range. + ShardReferences []*ShardReference `protobuf:"bytes,2,rep,name=shard_references,json=shardReferences,proto3" json:"shard_references,omitempty"` + // List of shard tablet controls + ShardTabletControls []*ShardTabletControl `protobuf:"bytes,3,rep,name=shard_tablet_controls,json=shardTabletControls,proto3" json:"shard_tablet_controls,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SrvKeyspace_KeyspacePartition) Reset() { *m = SrvKeyspace_KeyspacePartition{} } +func (m *SrvKeyspace_KeyspacePartition) String() string { return proto.CompactTextString(m) } +func (*SrvKeyspace_KeyspacePartition) ProtoMessage() {} +func (*SrvKeyspace_KeyspacePartition) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{8, 0} +} + +func (m *SrvKeyspace_KeyspacePartition) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SrvKeyspace_KeyspacePartition.Unmarshal(m, b) +} +func (m *SrvKeyspace_KeyspacePartition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SrvKeyspace_KeyspacePartition.Marshal(b, m, deterministic) +} +func (m *SrvKeyspace_KeyspacePartition) XXX_Merge(src proto.Message) { + xxx_messageInfo_SrvKeyspace_KeyspacePartition.Merge(m, src) +} +func (m *SrvKeyspace_KeyspacePartition) XXX_Size() int { + return xxx_messageInfo_SrvKeyspace_KeyspacePartition.Size(m) +} +func (m *SrvKeyspace_KeyspacePartition) XXX_DiscardUnknown() { + xxx_messageInfo_SrvKeyspace_KeyspacePartition.DiscardUnknown(m) +} + +var xxx_messageInfo_SrvKeyspace_KeyspacePartition proto.InternalMessageInfo + +func (m *SrvKeyspace_KeyspacePartition) GetServedType() TabletType { + if m != nil { + return m.ServedType + } + return TabletType_UNKNOWN +} + +func (m *SrvKeyspace_KeyspacePartition) GetShardReferences() []*ShardReference { + if m != nil { + return m.ShardReferences + } + return nil +} + +func (m *SrvKeyspace_KeyspacePartition) GetShardTabletControls() []*ShardTabletControl { + if m != nil { + return m.ShardTabletControls + } + return nil +} + +// ServedFrom indicates a relationship between a TabletType and the +// keyspace name that's serving it. +type SrvKeyspace_ServedFrom struct { + // the tablet type + TabletType TabletType `protobuf:"varint,1,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + // the keyspace name that's serving it + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SrvKeyspace_ServedFrom) Reset() { *m = SrvKeyspace_ServedFrom{} } +func (m *SrvKeyspace_ServedFrom) String() string { return proto.CompactTextString(m) } +func (*SrvKeyspace_ServedFrom) ProtoMessage() {} +func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{8, 1} +} + +func (m *SrvKeyspace_ServedFrom) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SrvKeyspace_ServedFrom.Unmarshal(m, b) +} +func (m *SrvKeyspace_ServedFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SrvKeyspace_ServedFrom.Marshal(b, m, deterministic) +} +func (m *SrvKeyspace_ServedFrom) XXX_Merge(src proto.Message) { + xxx_messageInfo_SrvKeyspace_ServedFrom.Merge(m, src) +} +func (m *SrvKeyspace_ServedFrom) XXX_Size() int { + return xxx_messageInfo_SrvKeyspace_ServedFrom.Size(m) +} +func (m *SrvKeyspace_ServedFrom) XXX_DiscardUnknown() { + xxx_messageInfo_SrvKeyspace_ServedFrom.DiscardUnknown(m) +} + +var xxx_messageInfo_SrvKeyspace_ServedFrom proto.InternalMessageInfo + +func (m *SrvKeyspace_ServedFrom) GetTabletType() TabletType { + if m != nil { + return m.TabletType + } + return TabletType_UNKNOWN +} + +func (m *SrvKeyspace_ServedFrom) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +// CellInfo contains information about a cell. CellInfo objects are +// stored in the global topology server, and describe how to reach +// local topology servers. +type CellInfo struct { + // ServerAddress contains the address of the server for the cell. + // The syntax of this field is topology implementation specific. + // For instance, for Zookeeper, it is a comma-separated list of + // server addresses. + ServerAddress string `protobuf:"bytes,1,opt,name=server_address,json=serverAddress,proto3" json:"server_address,omitempty"` + // Root is the path to store data in. It is only used when talking + // to server_address. + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CellInfo) Reset() { *m = CellInfo{} } +func (m *CellInfo) String() string { return proto.CompactTextString(m) } +func (*CellInfo) ProtoMessage() {} +func (*CellInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{9} +} + +func (m *CellInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CellInfo.Unmarshal(m, b) +} +func (m *CellInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CellInfo.Marshal(b, m, deterministic) +} +func (m *CellInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_CellInfo.Merge(m, src) +} +func (m *CellInfo) XXX_Size() int { + return xxx_messageInfo_CellInfo.Size(m) +} +func (m *CellInfo) XXX_DiscardUnknown() { + xxx_messageInfo_CellInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_CellInfo proto.InternalMessageInfo + +func (m *CellInfo) GetServerAddress() string { + if m != nil { + return m.ServerAddress + } + return "" +} + +func (m *CellInfo) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + +// CellsAlias +type CellsAlias struct { + // Cells that map to this alias + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CellsAlias) Reset() { *m = CellsAlias{} } +func (m *CellsAlias) String() string { return proto.CompactTextString(m) } +func (*CellsAlias) ProtoMessage() {} +func (*CellsAlias) Descriptor() ([]byte, []int) { + return fileDescriptor_52c350cb619f972e, []int{10} +} + +func (m *CellsAlias) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CellsAlias.Unmarshal(m, b) +} +func (m *CellsAlias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CellsAlias.Marshal(b, m, deterministic) +} +func (m *CellsAlias) XXX_Merge(src proto.Message) { + xxx_messageInfo_CellsAlias.Merge(m, src) +} +func (m *CellsAlias) XXX_Size() int { + return xxx_messageInfo_CellsAlias.Size(m) +} +func (m *CellsAlias) XXX_DiscardUnknown() { + xxx_messageInfo_CellsAlias.DiscardUnknown(m) +} + +var xxx_messageInfo_CellsAlias proto.InternalMessageInfo + +func (m *CellsAlias) GetCells() []string { + if m != nil { + return m.Cells + } + return nil +} + +func init() { + proto.RegisterEnum("topodata.KeyspaceType", KeyspaceType_name, KeyspaceType_value) + proto.RegisterEnum("topodata.KeyspaceIdType", KeyspaceIdType_name, KeyspaceIdType_value) + proto.RegisterEnum("topodata.TabletType", TabletType_name, TabletType_value) + proto.RegisterType((*KeyRange)(nil), "topodata.KeyRange") + proto.RegisterType((*TabletAlias)(nil), "topodata.TabletAlias") + proto.RegisterType((*Tablet)(nil), "topodata.Tablet") + proto.RegisterMapType((map[string]int32)(nil), "topodata.Tablet.PortMapEntry") + proto.RegisterMapType((map[string]string)(nil), "topodata.Tablet.TagsEntry") + proto.RegisterType((*Shard)(nil), "topodata.Shard") + proto.RegisterType((*Shard_ServedType)(nil), "topodata.Shard.ServedType") + proto.RegisterType((*Shard_SourceShard)(nil), "topodata.Shard.SourceShard") + proto.RegisterType((*Shard_TabletControl)(nil), "topodata.Shard.TabletControl") + proto.RegisterType((*Keyspace)(nil), "topodata.Keyspace") + proto.RegisterType((*Keyspace_ServedFrom)(nil), "topodata.Keyspace.ServedFrom") + proto.RegisterType((*ShardReplication)(nil), "topodata.ShardReplication") + proto.RegisterType((*ShardReplication_Node)(nil), "topodata.ShardReplication.Node") + proto.RegisterType((*ShardReference)(nil), "topodata.ShardReference") + proto.RegisterType((*ShardTabletControl)(nil), "topodata.ShardTabletControl") + proto.RegisterType((*SrvKeyspace)(nil), "topodata.SrvKeyspace") + proto.RegisterType((*SrvKeyspace_KeyspacePartition)(nil), "topodata.SrvKeyspace.KeyspacePartition") + proto.RegisterType((*SrvKeyspace_ServedFrom)(nil), "topodata.SrvKeyspace.ServedFrom") + proto.RegisterType((*CellInfo)(nil), "topodata.CellInfo") + proto.RegisterType((*CellsAlias)(nil), "topodata.CellsAlias") +} + +func init() { proto.RegisterFile("topodata.proto", fileDescriptor_52c350cb619f972e) } + +var fileDescriptor_52c350cb619f972e = []byte{ + // 1349 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcf, 0x6e, 0xdb, 0x46, + 0x13, 0x0f, 0xf5, 0xcf, 0xd4, 0x88, 0x92, 0x99, 0x8d, 0x63, 0x10, 0xfa, 0xbe, 0xa0, 0x86, 0x8a, + 0xa0, 0x82, 0x8b, 0xca, 0xad, 0x93, 0xb4, 0x46, 0x8a, 0x02, 0x51, 0x64, 0xa5, 0x71, 0x6c, 0xcb, + 0xc2, 0x4a, 0x46, 0x9b, 0x5e, 0x08, 0x5a, 0x5a, 0x3b, 0x84, 0x25, 0x52, 0xd9, 0x5d, 0x0b, 0x50, + 0x5f, 0xa1, 0x87, 0xf6, 0xdc, 0x37, 0xe8, 0xfb, 0xf4, 0xd8, 0x4b, 0xfb, 0x1c, 0x3d, 0x14, 0x3b, + 0x4b, 0x52, 0x94, 0x14, 0xa7, 0x4e, 0xe1, 0xdb, 0xcc, 0xec, 0xcc, 0x70, 0xe6, 0xb7, 0xbf, 0x99, + 0x95, 0xa0, 0x22, 0xc3, 0x49, 0x38, 0xf4, 0xa4, 0xd7, 0x98, 0xf0, 0x50, 0x86, 0xc4, 0x8c, 0xf5, + 0xaa, 0x35, 0x95, 0xd2, 0x1f, 0x33, 0x6d, 0xaf, 0xed, 0x82, 0x79, 0xc8, 0x66, 0xd4, 0x0b, 0x2e, + 0x18, 0xd9, 0x80, 0xbc, 0x90, 0x1e, 0x97, 0x8e, 0xb1, 0x65, 0xd4, 0x2d, 0xaa, 0x15, 0x62, 0x43, + 0x96, 0x05, 0x43, 0x27, 0x83, 0x36, 0x25, 0xd6, 0x1e, 0x41, 0xa9, 0xef, 0x9d, 0x8d, 0x98, 0x6c, + 0x8e, 0x7c, 0x4f, 0x10, 0x02, 0xb9, 0x01, 0x1b, 0x8d, 0x30, 0xaa, 0x48, 0x51, 0x56, 0x41, 0x57, + 0xbe, 0x0e, 0x2a, 0x53, 0x25, 0xd6, 0xfe, 0xce, 0x41, 0x41, 0x47, 0x91, 0x4f, 0x21, 0xef, 0xa9, + 0x48, 0x8c, 0x28, 0xed, 0xde, 0x6f, 0x24, 0xb5, 0xa6, 0xd2, 0x52, 0xed, 0x43, 0xaa, 0x60, 0xbe, + 0x09, 0x85, 0x0c, 0xbc, 0x31, 0xc3, 0x74, 0x45, 0x9a, 0xe8, 0x64, 0x0f, 0xcc, 0x49, 0xc8, 0xa5, + 0x3b, 0xf6, 0x26, 0x4e, 0x6e, 0x2b, 0x5b, 0x2f, 0xed, 0x3e, 0x58, 0xce, 0xd5, 0xe8, 0x86, 0x5c, + 0x1e, 0x7b, 0x93, 0x76, 0x20, 0xf9, 0x8c, 0xae, 0x4d, 0xb4, 0xa6, 0xb2, 0x5e, 0xb2, 0x99, 0x98, + 0x78, 0x03, 0xe6, 0xe4, 0x75, 0xd6, 0x58, 0x47, 0x18, 0xde, 0x78, 0x7c, 0xe8, 0x14, 0xf0, 0x40, + 0x2b, 0x64, 0x07, 0x8a, 0x97, 0x6c, 0xe6, 0x72, 0x85, 0x94, 0xb3, 0x86, 0x85, 0x93, 0xf9, 0xc7, + 0x62, 0x0c, 0x31, 0x8d, 0x46, 0xb3, 0x0e, 0x39, 0x39, 0x9b, 0x30, 0xc7, 0xdc, 0x32, 0xea, 0x95, + 0xdd, 0x8d, 0xe5, 0xc2, 0xfa, 0xb3, 0x09, 0xa3, 0xe8, 0x41, 0xea, 0x60, 0x0f, 0xcf, 0x5c, 0xd5, + 0x91, 0x1b, 0x4e, 0x19, 0xe7, 0xfe, 0x90, 0x39, 0x45, 0xfc, 0x76, 0x65, 0x78, 0xd6, 0xf1, 0xc6, + 0xec, 0x24, 0xb2, 0x92, 0x06, 0xe4, 0xa4, 0x77, 0x21, 0x1c, 0xc0, 0x66, 0xab, 0x2b, 0xcd, 0xf6, + 0xbd, 0x0b, 0xa1, 0x3b, 0x45, 0x3f, 0xf2, 0x10, 0x2a, 0xe3, 0x99, 0x78, 0x3b, 0x72, 0x13, 0x08, + 0x2d, 0xcc, 0x5b, 0x46, 0xeb, 0xcb, 0x18, 0xc7, 0x07, 0x00, 0xda, 0x4d, 0xc1, 0xe3, 0x94, 0xb7, + 0x8c, 0x7a, 0x9e, 0x16, 0xd1, 0xa2, 0xd0, 0x23, 0x4d, 0xd8, 0x1c, 0x7b, 0x42, 0x32, 0xee, 0x4a, + 0xc6, 0xc7, 0x2e, 0xd2, 0xc2, 0x55, 0x1c, 0x72, 0x2a, 0x88, 0x83, 0xd5, 0x88, 0x28, 0xd5, 0xf7, + 0xc7, 0x8c, 0xde, 0xd3, 0xbe, 0x7d, 0xc6, 0xc7, 0x3d, 0xe5, 0xa9, 0x8c, 0xd5, 0xa7, 0x60, 0xa5, + 0x2f, 0x42, 0xf1, 0xe3, 0x92, 0xcd, 0x22, 0xca, 0x28, 0x51, 0xa1, 0x3e, 0xf5, 0x46, 0x57, 0xfa, + 0x92, 0xf3, 0x54, 0x2b, 0x4f, 0x33, 0x7b, 0x46, 0xf5, 0x2b, 0x28, 0x26, 0x7d, 0xfd, 0x5b, 0x60, + 0x31, 0x15, 0xf8, 0x2a, 0x67, 0x66, 0xed, 0xdc, 0xab, 0x9c, 0x59, 0xb2, 0xad, 0xda, 0xef, 0x05, + 0xc8, 0xf7, 0xf0, 0x22, 0xf7, 0xc0, 0x8a, 0xba, 0xb9, 0x01, 0x09, 0x4b, 0xda, 0x55, 0x13, 0xfd, + 0x7a, 0x1c, 0xcc, 0x1b, 0xe2, 0xb0, 0xc8, 0xa2, 0xcc, 0x0d, 0x58, 0xf4, 0x0d, 0x58, 0x82, 0xf1, + 0x29, 0x1b, 0xba, 0x8a, 0x2a, 0xc2, 0xc9, 0x2e, 0xdf, 0x3c, 0x36, 0xd5, 0xe8, 0xa1, 0x0f, 0x72, + 0xaa, 0x24, 0x12, 0x59, 0x90, 0x67, 0x50, 0x16, 0xe1, 0x15, 0x1f, 0x30, 0x17, 0x59, 0x2c, 0xa2, + 0x31, 0xf9, 0xdf, 0x4a, 0x3c, 0x3a, 0xa1, 0x4c, 0x2d, 0x31, 0x57, 0x04, 0x79, 0x01, 0xeb, 0x12, + 0x01, 0x71, 0x07, 0x61, 0x20, 0x79, 0x38, 0x12, 0x4e, 0x61, 0x79, 0xd4, 0x74, 0x0e, 0x8d, 0x5b, + 0x4b, 0x7b, 0xd1, 0x8a, 0x4c, 0xab, 0x82, 0x6c, 0xc3, 0x5d, 0x5f, 0xb8, 0x11, 0x7e, 0xaa, 0x44, + 0x3f, 0xb8, 0xc0, 0x39, 0x32, 0xe9, 0xba, 0x2f, 0x8e, 0xd1, 0xde, 0xd3, 0xe6, 0xea, 0x6b, 0x80, + 0x79, 0x43, 0xe4, 0x09, 0x94, 0xa2, 0x0a, 0x70, 0x9e, 0x8c, 0xf7, 0xcc, 0x13, 0xc8, 0x44, 0x56, + 0xbc, 0x50, 0xab, 0x48, 0x38, 0x99, 0xad, 0xac, 0xe2, 0x05, 0x2a, 0xd5, 0x5f, 0x0d, 0x28, 0xa5, + 0x9a, 0x8d, 0x17, 0x95, 0x91, 0x2c, 0xaa, 0x85, 0xd5, 0x90, 0xb9, 0x6e, 0x35, 0x64, 0xaf, 0x5d, + 0x0d, 0xb9, 0x1b, 0x5c, 0xea, 0x26, 0x14, 0xb0, 0x50, 0xe1, 0xe4, 0xb1, 0xb6, 0x48, 0xab, 0xfe, + 0x66, 0x40, 0x79, 0x01, 0xc5, 0x5b, 0xed, 0x9d, 0x7c, 0x06, 0xe4, 0x6c, 0xe4, 0x0d, 0x2e, 0x47, + 0xbe, 0x90, 0x8a, 0x50, 0xba, 0x84, 0x1c, 0xba, 0xdc, 0x4d, 0x9d, 0x60, 0x52, 0xa1, 0xaa, 0x3c, + 0xe7, 0xe1, 0x8f, 0x2c, 0xc0, 0x0d, 0x69, 0xd2, 0x48, 0x4b, 0xc6, 0x2a, 0x6f, 0x17, 0x6a, 0x7f, + 0x64, 0xf1, 0xfd, 0xd0, 0xe8, 0x7c, 0x0e, 0x1b, 0x08, 0x88, 0x1f, 0x5c, 0xb8, 0x83, 0x70, 0x74, + 0x35, 0x0e, 0x70, 0xa9, 0x45, 0xc3, 0x4a, 0xe2, 0xb3, 0x16, 0x1e, 0xa9, 0xbd, 0x46, 0x5e, 0xad, + 0x46, 0x60, 0x9f, 0x19, 0xec, 0xd3, 0x59, 0x00, 0x11, 0xbf, 0x71, 0xa0, 0x39, 0xbe, 0x94, 0x0b, + 0x7b, 0x7e, 0x96, 0x4c, 0xca, 0x39, 0x0f, 0xc7, 0x62, 0xf5, 0x41, 0x88, 0x73, 0x44, 0xc3, 0xf2, + 0x82, 0x87, 0xe3, 0x78, 0x58, 0x94, 0x2c, 0xc8, 0xd7, 0x50, 0x8e, 0x6f, 0x5a, 0x97, 0x91, 0xc7, + 0x32, 0x36, 0x57, 0x53, 0x60, 0x11, 0xd6, 0x65, 0x4a, 0x23, 0x1f, 0x43, 0xf9, 0xcc, 0x13, 0xcc, + 0x4d, 0xb8, 0xa3, 0x5f, 0x0f, 0x4b, 0x19, 0x13, 0x84, 0xbe, 0x80, 0xb2, 0x08, 0xbc, 0x89, 0x78, + 0x13, 0x46, 0x8b, 0x63, 0xed, 0x1d, 0x8b, 0xc3, 0x8a, 0x5d, 0x70, 0x73, 0x5e, 0xc5, 0xb3, 0xa0, + 0x6a, 0xbc, 0x5d, 0x3e, 0xa4, 0x99, 0x9e, 0x5d, 0x64, 0xba, 0xbe, 0xe4, 0xda, 0x4f, 0x06, 0xd8, + 0x7a, 0x29, 0xb0, 0xc9, 0xc8, 0x1f, 0x78, 0xd2, 0x0f, 0x03, 0xf2, 0x04, 0xf2, 0x41, 0x38, 0x64, + 0x6a, 0x73, 0x2a, 0x84, 0x3f, 0x5a, 0xda, 0x03, 0x29, 0xd7, 0x46, 0x27, 0x1c, 0x32, 0xaa, 0xbd, + 0xab, 0xcf, 0x20, 0xa7, 0x54, 0xb5, 0x7f, 0xa3, 0x16, 0x6e, 0xb2, 0x7f, 0xe5, 0x5c, 0xa9, 0x9d, + 0x42, 0x25, 0xfa, 0xc2, 0x39, 0xe3, 0x2c, 0x18, 0x30, 0xf5, 0xd3, 0x23, 0xc5, 0x30, 0x94, 0x3f, + 0x78, 0xc5, 0xd6, 0x7e, 0x36, 0x80, 0x60, 0xde, 0xc5, 0xd1, 0xbb, 0x8d, 0xdc, 0xe4, 0x31, 0x6c, + 0xbe, 0xbd, 0x62, 0x7c, 0xa6, 0x37, 0xde, 0x80, 0xb9, 0x43, 0x5f, 0xa8, 0xaf, 0xe8, 0x0d, 0x62, + 0xd2, 0x0d, 0x3c, 0xed, 0xe9, 0xc3, 0xfd, 0xe8, 0xac, 0xf6, 0x57, 0x0e, 0x4a, 0x3d, 0x3e, 0x4d, + 0x68, 0xf3, 0x2d, 0xc0, 0xc4, 0xe3, 0xd2, 0x57, 0x98, 0xc6, 0xb0, 0x7f, 0x92, 0x82, 0x7d, 0xee, + 0x9a, 0x30, 0xb4, 0x1b, 0xfb, 0xd3, 0x54, 0xe8, 0xb5, 0x13, 0x9a, 0xf9, 0xe0, 0x09, 0xcd, 0xfe, + 0x87, 0x09, 0x6d, 0x42, 0x29, 0x35, 0xa1, 0xd1, 0x80, 0x6e, 0xbd, 0xbb, 0x8f, 0xd4, 0x8c, 0xc2, + 0x7c, 0x46, 0xab, 0x7f, 0x1a, 0x70, 0x77, 0xa5, 0x45, 0x35, 0x15, 0xa9, 0x47, 0xf2, 0xfd, 0x53, + 0x31, 0x7f, 0x1d, 0x49, 0x0b, 0x6c, 0xac, 0xd2, 0xe5, 0x31, 0xa1, 0xf4, 0x80, 0x94, 0xd2, 0x7d, + 0x2d, 0x32, 0x8e, 0xae, 0x8b, 0x05, 0x5d, 0x90, 0x2e, 0xdc, 0xd7, 0x49, 0x96, 0x5f, 0x49, 0xfd, + 0x52, 0xff, 0x7f, 0x29, 0xd3, 0xe2, 0x23, 0x79, 0x4f, 0xac, 0xd8, 0x44, 0xd5, 0xbd, 0x8d, 0x89, + 0x7f, 0xcf, 0x2b, 0x16, 0xad, 0xee, 0x43, 0x30, 0x5b, 0x6c, 0x34, 0x3a, 0x08, 0xce, 0x43, 0xf5, + 0x3b, 0x11, 0x71, 0xe1, 0xae, 0x37, 0x1c, 0x72, 0x26, 0x44, 0xc4, 0xfa, 0xb2, 0xb6, 0x36, 0xb5, + 0x51, 0x8d, 0x04, 0x0f, 0x43, 0x19, 0x25, 0x44, 0x39, 0x5a, 0x14, 0x35, 0x00, 0x95, 0x4c, 0xe8, + 0x1f, 0x4a, 0xef, 0x5c, 0x37, 0xdb, 0x75, 0xb0, 0xd2, 0xfb, 0x93, 0x00, 0x14, 0x3a, 0x27, 0xf4, + 0xb8, 0x79, 0x64, 0xdf, 0x21, 0x16, 0x98, 0xbd, 0x4e, 0xb3, 0xdb, 0x7b, 0x79, 0xd2, 0xb7, 0x8d, + 0xed, 0x5d, 0xa8, 0x2c, 0xd2, 0x89, 0x14, 0x21, 0x7f, 0xda, 0xe9, 0xb5, 0xfb, 0xf6, 0x1d, 0x15, + 0x76, 0x7a, 0xd0, 0xe9, 0x7f, 0xf9, 0xd8, 0x36, 0x94, 0xf9, 0xf9, 0xeb, 0x7e, 0xbb, 0x67, 0x67, + 0xb6, 0x7f, 0x31, 0x00, 0xe6, 0x58, 0x90, 0x12, 0xac, 0x9d, 0x76, 0x0e, 0x3b, 0x27, 0xdf, 0x75, + 0x74, 0xc8, 0x71, 0xb3, 0xd7, 0x6f, 0x53, 0xdb, 0x50, 0x07, 0xb4, 0xdd, 0x3d, 0x3a, 0x68, 0x35, + 0xed, 0x8c, 0x3a, 0xa0, 0xfb, 0x27, 0x9d, 0xa3, 0xd7, 0x76, 0x16, 0x73, 0x35, 0xfb, 0xad, 0x97, + 0x5a, 0xec, 0x75, 0x9b, 0xb4, 0x6d, 0xe7, 0x88, 0x0d, 0x56, 0xfb, 0xfb, 0x6e, 0x9b, 0x1e, 0x1c, + 0xb7, 0x3b, 0xfd, 0xe6, 0x91, 0x9d, 0x57, 0x31, 0xcf, 0x9b, 0xad, 0xc3, 0xd3, 0xae, 0x5d, 0xd0, + 0xc9, 0x7a, 0xfd, 0x13, 0xda, 0xb6, 0xd7, 0x94, 0xb2, 0x4f, 0x9b, 0x07, 0x9d, 0xf6, 0xbe, 0x6d, + 0x56, 0x33, 0xb6, 0xf1, 0x7c, 0x0f, 0xd6, 0xfd, 0xb0, 0x31, 0xf5, 0x25, 0x13, 0x42, 0xff, 0xdd, + 0xfa, 0xe1, 0x61, 0xa4, 0xf9, 0xe1, 0x8e, 0x96, 0x76, 0x2e, 0xc2, 0x9d, 0xa9, 0xdc, 0xc1, 0xd3, + 0x9d, 0xf8, 0x52, 0xcf, 0x0a, 0xa8, 0x3f, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x51, 0xac, 0x2b, + 0xc1, 0xc6, 0x0d, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/vschema/vschema.pb.go b/internal/stackql-parser-fork/go/vt/proto/vschema/vschema.pb.go new file mode 100644 index 00000000..9bc51fd0 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/vschema/vschema.pb.go @@ -0,0 +1,609 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: vschema.proto + +package vschema + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + query "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// RoutingRules specify the high level routing rules for the VSchema. +type RoutingRules struct { + // rules should ideally be a map. However protos dont't allow + // repeated fields as elements of a map. So, we use a list + // instead. + Rules []*RoutingRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RoutingRules) Reset() { *m = RoutingRules{} } +func (m *RoutingRules) String() string { return proto.CompactTextString(m) } +func (*RoutingRules) ProtoMessage() {} +func (*RoutingRules) Descriptor() ([]byte, []int) { + return fileDescriptor_3f6849254fea3e77, []int{0} +} + +func (m *RoutingRules) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RoutingRules.Unmarshal(m, b) +} +func (m *RoutingRules) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RoutingRules.Marshal(b, m, deterministic) +} +func (m *RoutingRules) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoutingRules.Merge(m, src) +} +func (m *RoutingRules) XXX_Size() int { + return xxx_messageInfo_RoutingRules.Size(m) +} +func (m *RoutingRules) XXX_DiscardUnknown() { + xxx_messageInfo_RoutingRules.DiscardUnknown(m) +} + +var xxx_messageInfo_RoutingRules proto.InternalMessageInfo + +func (m *RoutingRules) GetRules() []*RoutingRule { + if m != nil { + return m.Rules + } + return nil +} + +// RoutingRule specifies a routing rule. +type RoutingRule struct { + FromTable string `protobuf:"bytes,1,opt,name=from_table,json=fromTable,proto3" json:"from_table,omitempty"` + ToTables []string `protobuf:"bytes,2,rep,name=to_tables,json=toTables,proto3" json:"to_tables,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RoutingRule) Reset() { *m = RoutingRule{} } +func (m *RoutingRule) String() string { return proto.CompactTextString(m) } +func (*RoutingRule) ProtoMessage() {} +func (*RoutingRule) Descriptor() ([]byte, []int) { + return fileDescriptor_3f6849254fea3e77, []int{1} +} + +func (m *RoutingRule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RoutingRule.Unmarshal(m, b) +} +func (m *RoutingRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RoutingRule.Marshal(b, m, deterministic) +} +func (m *RoutingRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoutingRule.Merge(m, src) +} +func (m *RoutingRule) XXX_Size() int { + return xxx_messageInfo_RoutingRule.Size(m) +} +func (m *RoutingRule) XXX_DiscardUnknown() { + xxx_messageInfo_RoutingRule.DiscardUnknown(m) +} + +var xxx_messageInfo_RoutingRule proto.InternalMessageInfo + +func (m *RoutingRule) GetFromTable() string { + if m != nil { + return m.FromTable + } + return "" +} + +func (m *RoutingRule) GetToTables() []string { + if m != nil { + return m.ToTables + } + return nil +} + +// Keyspace is the vschema for a keyspace. +type Keyspace struct { + // If sharded is false, vindexes and tables are ignored. + Sharded bool `protobuf:"varint,1,opt,name=sharded,proto3" json:"sharded,omitempty"` + Vindexes map[string]*Vindex `protobuf:"bytes,2,rep,name=vindexes,proto3" json:"vindexes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Tables map[string]*Table `protobuf:"bytes,3,rep,name=tables,proto3" json:"tables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If require_explicit_routing is true, vindexes and tables are not added to global routing + RequireExplicitRouting bool `protobuf:"varint,4,opt,name=require_explicit_routing,json=requireExplicitRouting,proto3" json:"require_explicit_routing,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Keyspace) Reset() { *m = Keyspace{} } +func (m *Keyspace) String() string { return proto.CompactTextString(m) } +func (*Keyspace) ProtoMessage() {} +func (*Keyspace) Descriptor() ([]byte, []int) { + return fileDescriptor_3f6849254fea3e77, []int{2} +} + +func (m *Keyspace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Keyspace.Unmarshal(m, b) +} +func (m *Keyspace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Keyspace.Marshal(b, m, deterministic) +} +func (m *Keyspace) XXX_Merge(src proto.Message) { + xxx_messageInfo_Keyspace.Merge(m, src) +} +func (m *Keyspace) XXX_Size() int { + return xxx_messageInfo_Keyspace.Size(m) +} +func (m *Keyspace) XXX_DiscardUnknown() { + xxx_messageInfo_Keyspace.DiscardUnknown(m) +} + +var xxx_messageInfo_Keyspace proto.InternalMessageInfo + +func (m *Keyspace) GetSharded() bool { + if m != nil { + return m.Sharded + } + return false +} + +func (m *Keyspace) GetVindexes() map[string]*Vindex { + if m != nil { + return m.Vindexes + } + return nil +} + +func (m *Keyspace) GetTables() map[string]*Table { + if m != nil { + return m.Tables + } + return nil +} + +func (m *Keyspace) GetRequireExplicitRouting() bool { + if m != nil { + return m.RequireExplicitRouting + } + return false +} + +// Vindex is the vindex info for a Keyspace. +type Vindex struct { + // The type must match one of the predefined + // (or plugged in) vindex names. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // params is a map of attribute value pairs + // that must be defined as required by the + // vindex constructors. The values can only + // be strings. + Params map[string]string `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A lookup vindex can have an owner table defined. + // If so, rows in the lookup table are created or + // deleted in sync with corresponding rows in the + // owner table. + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Vindex) Reset() { *m = Vindex{} } +func (m *Vindex) String() string { return proto.CompactTextString(m) } +func (*Vindex) ProtoMessage() {} +func (*Vindex) Descriptor() ([]byte, []int) { + return fileDescriptor_3f6849254fea3e77, []int{3} +} + +func (m *Vindex) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Vindex.Unmarshal(m, b) +} +func (m *Vindex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Vindex.Marshal(b, m, deterministic) +} +func (m *Vindex) XXX_Merge(src proto.Message) { + xxx_messageInfo_Vindex.Merge(m, src) +} +func (m *Vindex) XXX_Size() int { + return xxx_messageInfo_Vindex.Size(m) +} +func (m *Vindex) XXX_DiscardUnknown() { + xxx_messageInfo_Vindex.DiscardUnknown(m) +} + +var xxx_messageInfo_Vindex proto.InternalMessageInfo + +func (m *Vindex) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Vindex) GetParams() map[string]string { + if m != nil { + return m.Params + } + return nil +} + +func (m *Vindex) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +// Table is the table info for a Keyspace. +type Table struct { + // If the table is a sequence, type must be + // "sequence". Otherwise, it should be empty. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // column_vindexes associates columns to vindexes. + ColumnVindexes []*ColumnVindex `protobuf:"bytes,2,rep,name=column_vindexes,json=columnVindexes,proto3" json:"column_vindexes,omitempty"` + // auto_increment is specified if a column needs + // to be associated with a sequence. + AutoIncrement *AutoIncrement `protobuf:"bytes,3,opt,name=auto_increment,json=autoIncrement,proto3" json:"auto_increment,omitempty"` + // columns lists the columns for the table. + Columns []*Column `protobuf:"bytes,4,rep,name=columns,proto3" json:"columns,omitempty"` + // pinned pins an unsharded table to a specific + // shard, as dictated by the keyspace id. + // The keyspace id is represented in hex form + // like in keyranges. + Pinned string `protobuf:"bytes,5,opt,name=pinned,proto3" json:"pinned,omitempty"` + // column_list_authoritative is set to true if columns is + // an authoritative list for the table. This allows + // us to expand 'select *' expressions. + ColumnListAuthoritative bool `protobuf:"varint,6,opt,name=column_list_authoritative,json=columnListAuthoritative,proto3" json:"column_list_authoritative,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Table) Reset() { *m = Table{} } +func (m *Table) String() string { return proto.CompactTextString(m) } +func (*Table) ProtoMessage() {} +func (*Table) Descriptor() ([]byte, []int) { + return fileDescriptor_3f6849254fea3e77, []int{4} +} + +func (m *Table) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Table.Unmarshal(m, b) +} +func (m *Table) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Table.Marshal(b, m, deterministic) +} +func (m *Table) XXX_Merge(src proto.Message) { + xxx_messageInfo_Table.Merge(m, src) +} +func (m *Table) XXX_Size() int { + return xxx_messageInfo_Table.Size(m) +} +func (m *Table) XXX_DiscardUnknown() { + xxx_messageInfo_Table.DiscardUnknown(m) +} + +var xxx_messageInfo_Table proto.InternalMessageInfo + +func (m *Table) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Table) GetColumnVindexes() []*ColumnVindex { + if m != nil { + return m.ColumnVindexes + } + return nil +} + +func (m *Table) GetAutoIncrement() *AutoIncrement { + if m != nil { + return m.AutoIncrement + } + return nil +} + +func (m *Table) GetColumns() []*Column { + if m != nil { + return m.Columns + } + return nil +} + +func (m *Table) GetPinned() string { + if m != nil { + return m.Pinned + } + return "" +} + +func (m *Table) GetColumnListAuthoritative() bool { + if m != nil { + return m.ColumnListAuthoritative + } + return false +} + +// ColumnVindex is used to associate a column to a vindex. +type ColumnVindex struct { + // Legacy implementation, moving forward all vindexes should define a list of columns. + Column string `protobuf:"bytes,1,opt,name=column,proto3" json:"column,omitempty"` + // The name must match a vindex defined in Keyspace. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // List of columns that define this Vindex + Columns []string `protobuf:"bytes,3,rep,name=columns,proto3" json:"columns,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ColumnVindex) Reset() { *m = ColumnVindex{} } +func (m *ColumnVindex) String() string { return proto.CompactTextString(m) } +func (*ColumnVindex) ProtoMessage() {} +func (*ColumnVindex) Descriptor() ([]byte, []int) { + return fileDescriptor_3f6849254fea3e77, []int{5} +} + +func (m *ColumnVindex) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ColumnVindex.Unmarshal(m, b) +} +func (m *ColumnVindex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ColumnVindex.Marshal(b, m, deterministic) +} +func (m *ColumnVindex) XXX_Merge(src proto.Message) { + xxx_messageInfo_ColumnVindex.Merge(m, src) +} +func (m *ColumnVindex) XXX_Size() int { + return xxx_messageInfo_ColumnVindex.Size(m) +} +func (m *ColumnVindex) XXX_DiscardUnknown() { + xxx_messageInfo_ColumnVindex.DiscardUnknown(m) +} + +var xxx_messageInfo_ColumnVindex proto.InternalMessageInfo + +func (m *ColumnVindex) GetColumn() string { + if m != nil { + return m.Column + } + return "" +} + +func (m *ColumnVindex) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ColumnVindex) GetColumns() []string { + if m != nil { + return m.Columns + } + return nil +} + +// Autoincrement is used to designate a column as auto-inc. +type AutoIncrement struct { + Column string `protobuf:"bytes,1,opt,name=column,proto3" json:"column,omitempty"` + // The sequence must match a table of type SEQUENCE. + Sequence string `protobuf:"bytes,2,opt,name=sequence,proto3" json:"sequence,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AutoIncrement) Reset() { *m = AutoIncrement{} } +func (m *AutoIncrement) String() string { return proto.CompactTextString(m) } +func (*AutoIncrement) ProtoMessage() {} +func (*AutoIncrement) Descriptor() ([]byte, []int) { + return fileDescriptor_3f6849254fea3e77, []int{6} +} + +func (m *AutoIncrement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AutoIncrement.Unmarshal(m, b) +} +func (m *AutoIncrement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AutoIncrement.Marshal(b, m, deterministic) +} +func (m *AutoIncrement) XXX_Merge(src proto.Message) { + xxx_messageInfo_AutoIncrement.Merge(m, src) +} +func (m *AutoIncrement) XXX_Size() int { + return xxx_messageInfo_AutoIncrement.Size(m) +} +func (m *AutoIncrement) XXX_DiscardUnknown() { + xxx_messageInfo_AutoIncrement.DiscardUnknown(m) +} + +var xxx_messageInfo_AutoIncrement proto.InternalMessageInfo + +func (m *AutoIncrement) GetColumn() string { + if m != nil { + return m.Column + } + return "" +} + +func (m *AutoIncrement) GetSequence() string { + if m != nil { + return m.Sequence + } + return "" +} + +// Column describes a column. +type Column struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type query.Type `protobuf:"varint,2,opt,name=type,proto3,enum=query.Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Column) Reset() { *m = Column{} } +func (m *Column) String() string { return proto.CompactTextString(m) } +func (*Column) ProtoMessage() {} +func (*Column) Descriptor() ([]byte, []int) { + return fileDescriptor_3f6849254fea3e77, []int{7} +} + +func (m *Column) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Column.Unmarshal(m, b) +} +func (m *Column) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Column.Marshal(b, m, deterministic) +} +func (m *Column) XXX_Merge(src proto.Message) { + xxx_messageInfo_Column.Merge(m, src) +} +func (m *Column) XXX_Size() int { + return xxx_messageInfo_Column.Size(m) +} +func (m *Column) XXX_DiscardUnknown() { + xxx_messageInfo_Column.DiscardUnknown(m) +} + +var xxx_messageInfo_Column proto.InternalMessageInfo + +func (m *Column) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Column) GetType() query.Type { + if m != nil { + return m.Type + } + return query.Type_NULL_TYPE +} + +// SrvVSchema is the roll-up of all the Keyspace schema for a cell. +type SrvVSchema struct { + // keyspaces is a map of keyspace name -> Keyspace object. + Keyspaces map[string]*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + RoutingRules *RoutingRules `protobuf:"bytes,2,opt,name=routing_rules,json=routingRules,proto3" json:"routing_rules,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SrvVSchema) Reset() { *m = SrvVSchema{} } +func (m *SrvVSchema) String() string { return proto.CompactTextString(m) } +func (*SrvVSchema) ProtoMessage() {} +func (*SrvVSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_3f6849254fea3e77, []int{8} +} + +func (m *SrvVSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SrvVSchema.Unmarshal(m, b) +} +func (m *SrvVSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SrvVSchema.Marshal(b, m, deterministic) +} +func (m *SrvVSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_SrvVSchema.Merge(m, src) +} +func (m *SrvVSchema) XXX_Size() int { + return xxx_messageInfo_SrvVSchema.Size(m) +} +func (m *SrvVSchema) XXX_DiscardUnknown() { + xxx_messageInfo_SrvVSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_SrvVSchema proto.InternalMessageInfo + +func (m *SrvVSchema) GetKeyspaces() map[string]*Keyspace { + if m != nil { + return m.Keyspaces + } + return nil +} + +func (m *SrvVSchema) GetRoutingRules() *RoutingRules { + if m != nil { + return m.RoutingRules + } + return nil +} + +func init() { + proto.RegisterType((*RoutingRules)(nil), "vschema.RoutingRules") + proto.RegisterType((*RoutingRule)(nil), "vschema.RoutingRule") + proto.RegisterType((*Keyspace)(nil), "vschema.Keyspace") + proto.RegisterMapType((map[string]*Table)(nil), "vschema.Keyspace.TablesEntry") + proto.RegisterMapType((map[string]*Vindex)(nil), "vschema.Keyspace.VindexesEntry") + proto.RegisterType((*Vindex)(nil), "vschema.Vindex") + proto.RegisterMapType((map[string]string)(nil), "vschema.Vindex.ParamsEntry") + proto.RegisterType((*Table)(nil), "vschema.Table") + proto.RegisterType((*ColumnVindex)(nil), "vschema.ColumnVindex") + proto.RegisterType((*AutoIncrement)(nil), "vschema.AutoIncrement") + proto.RegisterType((*Column)(nil), "vschema.Column") + proto.RegisterType((*SrvVSchema)(nil), "vschema.SrvVSchema") + proto.RegisterMapType((map[string]*Keyspace)(nil), "vschema.SrvVSchema.KeyspacesEntry") +} + +func init() { proto.RegisterFile("vschema.proto", fileDescriptor_3f6849254fea3e77) } + +var fileDescriptor_3f6849254fea3e77 = []byte{ + // 673 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x54, 0xcf, 0x4e, 0xdb, 0x4e, + 0x10, 0x96, 0x13, 0x62, 0x92, 0x31, 0x09, 0xbf, 0xdf, 0x0a, 0xa8, 0x1b, 0x84, 0x88, 0x2c, 0xda, + 0xa6, 0x3d, 0x24, 0x52, 0x50, 0x25, 0x9a, 0x8a, 0xaa, 0x14, 0x71, 0x40, 0x45, 0x6a, 0x65, 0x10, + 0x87, 0x5e, 0x2c, 0xe3, 0x6c, 0x61, 0x45, 0xe2, 0x35, 0xbb, 0x6b, 0x97, 0x3c, 0x4a, 0xaf, 0x7d, + 0xad, 0x3e, 0x42, 0x5f, 0xa2, 0xf2, 0xfe, 0x31, 0x1b, 0x48, 0x6f, 0x3b, 0x3b, 0xf3, 0x7d, 0xf3, + 0xed, 0xec, 0xcc, 0x40, 0xbb, 0xe0, 0xc9, 0x0d, 0x9e, 0xc5, 0x83, 0x8c, 0x51, 0x41, 0xd1, 0xaa, + 0x36, 0xbb, 0xde, 0x5d, 0x8e, 0xd9, 0x5c, 0xdd, 0x06, 0x63, 0x58, 0x0b, 0x69, 0x2e, 0x48, 0x7a, + 0x1d, 0xe6, 0x53, 0xcc, 0xd1, 0x1b, 0x68, 0xb0, 0xf2, 0xe0, 0x3b, 0xbd, 0x7a, 0xdf, 0x1b, 0x6d, + 0x0c, 0x0c, 0x89, 0x15, 0x15, 0xaa, 0x90, 0xe0, 0x14, 0x3c, 0xeb, 0x16, 0xed, 0x00, 0x7c, 0x67, + 0x74, 0x16, 0x89, 0xf8, 0x6a, 0x8a, 0x7d, 0xa7, 0xe7, 0xf4, 0x5b, 0x61, 0xab, 0xbc, 0xb9, 0x28, + 0x2f, 0xd0, 0x36, 0xb4, 0x04, 0x55, 0x4e, 0xee, 0xd7, 0x7a, 0xf5, 0x7e, 0x2b, 0x6c, 0x0a, 0x2a, + 0x7d, 0x3c, 0xf8, 0x53, 0x83, 0xe6, 0x67, 0x3c, 0xe7, 0x59, 0x9c, 0x60, 0xe4, 0xc3, 0x2a, 0xbf, + 0x89, 0xd9, 0x04, 0x4f, 0x24, 0x4b, 0x33, 0x34, 0x26, 0x7a, 0x0f, 0xcd, 0x82, 0xa4, 0x13, 0x7c, + 0xaf, 0x29, 0xbc, 0xd1, 0x6e, 0x25, 0xd0, 0xc0, 0x07, 0x97, 0x3a, 0xe2, 0x24, 0x15, 0x6c, 0x1e, + 0x56, 0x00, 0xf4, 0x16, 0x5c, 0x9d, 0xbd, 0x2e, 0xa1, 0x3b, 0x4f, 0xa1, 0x4a, 0x8d, 0x02, 0xea, + 0x60, 0x74, 0x00, 0x3e, 0xc3, 0x77, 0x39, 0x61, 0x38, 0xc2, 0xf7, 0xd9, 0x94, 0x24, 0x44, 0x44, + 0x4c, 0x3d, 0xdb, 0x5f, 0x91, 0xf2, 0xb6, 0xb4, 0xff, 0x44, 0xbb, 0x75, 0x51, 0xba, 0x67, 0xd0, + 0x5e, 0xd0, 0x82, 0xfe, 0x83, 0xfa, 0x2d, 0x9e, 0xeb, 0xd2, 0x94, 0x47, 0xf4, 0x02, 0x1a, 0x45, + 0x3c, 0xcd, 0xb1, 0x5f, 0xeb, 0x39, 0x7d, 0x6f, 0xb4, 0x5e, 0x49, 0x52, 0xc0, 0x50, 0x79, 0xc7, + 0xb5, 0x03, 0xa7, 0x7b, 0x0a, 0x9e, 0x25, 0x6f, 0x09, 0xd7, 0xde, 0x22, 0x57, 0xa7, 0xe2, 0x92, + 0x30, 0x8b, 0x2a, 0xf8, 0xe5, 0x80, 0xab, 0x12, 0x20, 0x04, 0x2b, 0x62, 0x9e, 0x99, 0xef, 0x92, + 0x67, 0xb4, 0x0f, 0x6e, 0x16, 0xb3, 0x78, 0x66, 0x6a, 0xbc, 0xfd, 0x48, 0xd5, 0xe0, 0xab, 0xf4, + 0xea, 0x32, 0xa9, 0x50, 0xb4, 0x01, 0x0d, 0xfa, 0x23, 0xc5, 0xcc, 0xaf, 0x4b, 0x26, 0x65, 0x74, + 0xdf, 0x81, 0x67, 0x05, 0x2f, 0x11, 0xbd, 0x61, 0x8b, 0x6e, 0xd9, 0x22, 0x7f, 0xd6, 0xa0, 0xa1, + 0x3a, 0x67, 0x99, 0xc6, 0x0f, 0xb0, 0x9e, 0xd0, 0x69, 0x3e, 0x4b, 0xa3, 0x47, 0x0d, 0xb1, 0x59, + 0x89, 0x3d, 0x96, 0x7e, 0x5d, 0xc8, 0x4e, 0x62, 0x59, 0x98, 0xa3, 0x43, 0xe8, 0xc4, 0xb9, 0xa0, + 0x11, 0x49, 0x13, 0x86, 0x67, 0x38, 0x15, 0x52, 0xb7, 0x37, 0xda, 0xaa, 0xe0, 0x47, 0xb9, 0xa0, + 0xa7, 0xc6, 0x1b, 0xb6, 0x63, 0xdb, 0x44, 0xaf, 0x61, 0x55, 0x11, 0x72, 0x7f, 0x45, 0xa6, 0x5d, + 0x7f, 0x94, 0x36, 0x34, 0x7e, 0xb4, 0x05, 0x6e, 0x46, 0xd2, 0x14, 0x4f, 0xfc, 0x86, 0xd4, 0xaf, + 0x2d, 0x34, 0x86, 0xe7, 0xfa, 0x05, 0x53, 0xc2, 0x45, 0x14, 0xe7, 0xe2, 0x86, 0x32, 0x22, 0x62, + 0x41, 0x0a, 0xec, 0xbb, 0xb2, 0xb1, 0x9e, 0xa9, 0x80, 0x33, 0xc2, 0xc5, 0x91, 0xed, 0x0e, 0x2e, + 0x60, 0xcd, 0x7e, 0x5d, 0x99, 0x43, 0x85, 0xea, 0x1a, 0x69, 0xab, 0xac, 0x5c, 0x1a, 0xcf, 0x4c, + 0x71, 0xe5, 0xb9, 0x9c, 0x2e, 0x23, 0xbd, 0x2e, 0xa7, 0xd0, 0x98, 0xc1, 0x31, 0xb4, 0x17, 0x1e, + 0xfd, 0x4f, 0xda, 0x2e, 0x34, 0x39, 0xbe, 0xcb, 0x71, 0x9a, 0x18, 0xea, 0xca, 0x0e, 0x0e, 0xc1, + 0x3d, 0x5e, 0x4c, 0xee, 0x58, 0xc9, 0x77, 0xf5, 0x57, 0x96, 0xa8, 0xce, 0xc8, 0x1b, 0xa8, 0x55, + 0x74, 0x31, 0xcf, 0xb0, 0xfa, 0xd7, 0xe0, 0xb7, 0x03, 0x70, 0xce, 0x8a, 0xcb, 0x73, 0x59, 0x4c, + 0xf4, 0x11, 0x5a, 0xb7, 0x7a, 0x38, 0xcd, 0x4a, 0x0a, 0xaa, 0x4a, 0x3f, 0xc4, 0x55, 0x13, 0xac, + 0x9b, 0xf2, 0x01, 0x84, 0xc6, 0xd0, 0xd6, 0xd3, 0x1a, 0xa9, 0xc5, 0xa6, 0xa6, 0x63, 0x73, 0xd9, + 0x62, 0xe3, 0xe1, 0x1a, 0xb3, 0xac, 0xee, 0x17, 0xe8, 0x2c, 0x12, 0x2f, 0x69, 0xe0, 0x57, 0x8b, + 0x53, 0xf7, 0xff, 0x93, 0xa5, 0x62, 0xf5, 0xf4, 0xa7, 0x97, 0xdf, 0xf6, 0x0a, 0x22, 0x30, 0xe7, + 0x03, 0x42, 0x87, 0xea, 0x34, 0xbc, 0xa6, 0xc3, 0x42, 0x0c, 0xe5, 0x36, 0x1e, 0x6a, 0xec, 0x95, + 0x2b, 0xcd, 0xfd, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xa7, 0x99, 0x19, 0xc3, 0x05, 0x00, + 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/vtctldata/vtctldata.pb.go b/internal/stackql-parser-fork/go/vt/proto/vtctldata/vtctldata.pb.go new file mode 100644 index 00000000..022ee040 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/vtctldata/vtctldata.pb.go @@ -0,0 +1,303 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: vtctldata.proto + +package vtctldata + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + logutil "github.com/stackql/stackql-parser/go/vt/proto/logutil" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// ExecuteVtctlCommandRequest is the payload for ExecuteVtctlCommand. +// timeouts are in nanoseconds. +type ExecuteVtctlCommandRequest struct { + Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` + ActionTimeout int64 `protobuf:"varint,2,opt,name=action_timeout,json=actionTimeout,proto3" json:"action_timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteVtctlCommandRequest) Reset() { *m = ExecuteVtctlCommandRequest{} } +func (m *ExecuteVtctlCommandRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteVtctlCommandRequest) ProtoMessage() {} +func (*ExecuteVtctlCommandRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{0} +} + +func (m *ExecuteVtctlCommandRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteVtctlCommandRequest.Unmarshal(m, b) +} +func (m *ExecuteVtctlCommandRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteVtctlCommandRequest.Marshal(b, m, deterministic) +} +func (m *ExecuteVtctlCommandRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteVtctlCommandRequest.Merge(m, src) +} +func (m *ExecuteVtctlCommandRequest) XXX_Size() int { + return xxx_messageInfo_ExecuteVtctlCommandRequest.Size(m) +} +func (m *ExecuteVtctlCommandRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteVtctlCommandRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteVtctlCommandRequest proto.InternalMessageInfo + +func (m *ExecuteVtctlCommandRequest) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *ExecuteVtctlCommandRequest) GetActionTimeout() int64 { + if m != nil { + return m.ActionTimeout + } + return 0 +} + +// ExecuteVtctlCommandResponse is streamed back by ExecuteVtctlCommand. +type ExecuteVtctlCommandResponse struct { + Event *logutil.Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteVtctlCommandResponse) Reset() { *m = ExecuteVtctlCommandResponse{} } +func (m *ExecuteVtctlCommandResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteVtctlCommandResponse) ProtoMessage() {} +func (*ExecuteVtctlCommandResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{1} +} + +func (m *ExecuteVtctlCommandResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteVtctlCommandResponse.Unmarshal(m, b) +} +func (m *ExecuteVtctlCommandResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteVtctlCommandResponse.Marshal(b, m, deterministic) +} +func (m *ExecuteVtctlCommandResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteVtctlCommandResponse.Merge(m, src) +} +func (m *ExecuteVtctlCommandResponse) XXX_Size() int { + return xxx_messageInfo_ExecuteVtctlCommandResponse.Size(m) +} +func (m *ExecuteVtctlCommandResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteVtctlCommandResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteVtctlCommandResponse proto.InternalMessageInfo + +func (m *ExecuteVtctlCommandResponse) GetEvent() *logutil.Event { + if m != nil { + return m.Event + } + return nil +} + +// TableMaterializeSttings contains the settings for one table. +type TableMaterializeSettings struct { + TargetTable string `protobuf:"bytes,1,opt,name=target_table,json=targetTable,proto3" json:"target_table,omitempty"` + // source_expression is a select statement. + SourceExpression string `protobuf:"bytes,2,opt,name=source_expression,json=sourceExpression,proto3" json:"source_expression,omitempty"` + // create_ddl contains the DDL to create the target table. + // If empty, the target table must already exist. + // if "copy", the target table DDL is the same as the source table. + CreateDdl string `protobuf:"bytes,3,opt,name=create_ddl,json=createDdl,proto3" json:"create_ddl,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TableMaterializeSettings) Reset() { *m = TableMaterializeSettings{} } +func (m *TableMaterializeSettings) String() string { return proto.CompactTextString(m) } +func (*TableMaterializeSettings) ProtoMessage() {} +func (*TableMaterializeSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{2} +} + +func (m *TableMaterializeSettings) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TableMaterializeSettings.Unmarshal(m, b) +} +func (m *TableMaterializeSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TableMaterializeSettings.Marshal(b, m, deterministic) +} +func (m *TableMaterializeSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_TableMaterializeSettings.Merge(m, src) +} +func (m *TableMaterializeSettings) XXX_Size() int { + return xxx_messageInfo_TableMaterializeSettings.Size(m) +} +func (m *TableMaterializeSettings) XXX_DiscardUnknown() { + xxx_messageInfo_TableMaterializeSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_TableMaterializeSettings proto.InternalMessageInfo + +func (m *TableMaterializeSettings) GetTargetTable() string { + if m != nil { + return m.TargetTable + } + return "" +} + +func (m *TableMaterializeSettings) GetSourceExpression() string { + if m != nil { + return m.SourceExpression + } + return "" +} + +func (m *TableMaterializeSettings) GetCreateDdl() string { + if m != nil { + return m.CreateDdl + } + return "" +} + +// MaterializeSettings contains the settings for the Materialize command. +type MaterializeSettings struct { + // workflow is the name of the workflow. + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + SourceKeyspace string `protobuf:"bytes,2,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` + TargetKeyspace string `protobuf:"bytes,3,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + // stop_after_copy specifies if vreplication should be stopped after copying. + StopAfterCopy bool `protobuf:"varint,4,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + TableSettings []*TableMaterializeSettings `protobuf:"bytes,5,rep,name=table_settings,json=tableSettings,proto3" json:"table_settings,omitempty"` + // optional parameters. + Cell string `protobuf:"bytes,6,opt,name=cell,proto3" json:"cell,omitempty"` + TabletTypes string `protobuf:"bytes,7,opt,name=tablet_types,json=tabletTypes,proto3" json:"tablet_types,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MaterializeSettings) Reset() { *m = MaterializeSettings{} } +func (m *MaterializeSettings) String() string { return proto.CompactTextString(m) } +func (*MaterializeSettings) ProtoMessage() {} +func (*MaterializeSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_f41247b323a1ab2e, []int{3} +} + +func (m *MaterializeSettings) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MaterializeSettings.Unmarshal(m, b) +} +func (m *MaterializeSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MaterializeSettings.Marshal(b, m, deterministic) +} +func (m *MaterializeSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaterializeSettings.Merge(m, src) +} +func (m *MaterializeSettings) XXX_Size() int { + return xxx_messageInfo_MaterializeSettings.Size(m) +} +func (m *MaterializeSettings) XXX_DiscardUnknown() { + xxx_messageInfo_MaterializeSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_MaterializeSettings proto.InternalMessageInfo + +func (m *MaterializeSettings) GetWorkflow() string { + if m != nil { + return m.Workflow + } + return "" +} + +func (m *MaterializeSettings) GetSourceKeyspace() string { + if m != nil { + return m.SourceKeyspace + } + return "" +} + +func (m *MaterializeSettings) GetTargetKeyspace() string { + if m != nil { + return m.TargetKeyspace + } + return "" +} + +func (m *MaterializeSettings) GetStopAfterCopy() bool { + if m != nil { + return m.StopAfterCopy + } + return false +} + +func (m *MaterializeSettings) GetTableSettings() []*TableMaterializeSettings { + if m != nil { + return m.TableSettings + } + return nil +} + +func (m *MaterializeSettings) GetCell() string { + if m != nil { + return m.Cell + } + return "" +} + +func (m *MaterializeSettings) GetTabletTypes() string { + if m != nil { + return m.TabletTypes + } + return "" +} + +func init() { + proto.RegisterType((*ExecuteVtctlCommandRequest)(nil), "vtctldata.ExecuteVtctlCommandRequest") + proto.RegisterType((*ExecuteVtctlCommandResponse)(nil), "vtctldata.ExecuteVtctlCommandResponse") + proto.RegisterType((*TableMaterializeSettings)(nil), "vtctldata.TableMaterializeSettings") + proto.RegisterType((*MaterializeSettings)(nil), "vtctldata.MaterializeSettings") +} + +func init() { proto.RegisterFile("vtctldata.proto", fileDescriptor_f41247b323a1ab2e) } + +var fileDescriptor_f41247b323a1ab2e = []byte{ + // 422 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xd1, 0x6e, 0xd3, 0x30, + 0x14, 0x86, 0x95, 0xb5, 0x1b, 0xeb, 0x29, 0x4d, 0xc1, 0xdc, 0x58, 0x45, 0x48, 0xa1, 0xc0, 0x88, + 0x84, 0xd4, 0x48, 0xe3, 0x09, 0xa0, 0xf4, 0x06, 0xc4, 0x4d, 0xa8, 0x40, 0xe2, 0x26, 0x72, 0x93, + 0xb3, 0xc8, 0x9a, 0x1b, 0x07, 0xfb, 0xa4, 0x5b, 0x79, 0x03, 0x5e, 0x86, 0x67, 0x44, 0xb6, 0xb3, + 0x70, 0xb3, 0xdd, 0x1d, 0x7f, 0xe7, 0xb7, 0xfd, 0x9f, 0x5f, 0x07, 0xe6, 0x07, 0x2a, 0x49, 0x55, + 0x82, 0xc4, 0xaa, 0x35, 0x9a, 0x34, 0x9b, 0x0c, 0x60, 0x31, 0x53, 0xba, 0xee, 0x48, 0xaa, 0xd0, + 0x59, 0xfe, 0x80, 0xc5, 0xe6, 0x16, 0xcb, 0x8e, 0xf0, 0xbb, 0x93, 0xac, 0xf5, 0x7e, 0x2f, 0x9a, + 0x2a, 0xc7, 0x5f, 0x1d, 0x5a, 0x62, 0x0c, 0xc6, 0xc2, 0xd4, 0x96, 0x47, 0xc9, 0x28, 0x9d, 0xe4, + 0xbe, 0x66, 0x6f, 0x20, 0x16, 0x25, 0x49, 0xdd, 0x14, 0x24, 0xf7, 0xa8, 0x3b, 0xe2, 0x27, 0x49, + 0x94, 0x8e, 0xf2, 0x59, 0xa0, 0xdb, 0x00, 0x97, 0x6b, 0x78, 0x7e, 0xef, 0xc3, 0xb6, 0xd5, 0x8d, + 0x45, 0xf6, 0x1a, 0x4e, 0xf1, 0x80, 0x0d, 0xf1, 0x28, 0x89, 0xd2, 0xe9, 0x65, 0xbc, 0xba, 0xb3, + 0xb5, 0x71, 0x34, 0x0f, 0xcd, 0xe5, 0x9f, 0x08, 0xf8, 0x56, 0xec, 0x14, 0x7e, 0x15, 0x84, 0x46, + 0x0a, 0x25, 0x7f, 0xe3, 0x37, 0x24, 0x92, 0x4d, 0x6d, 0xd9, 0x4b, 0x78, 0x4c, 0xc2, 0xd4, 0x48, + 0x05, 0x39, 0x89, 0x7f, 0x69, 0x92, 0x4f, 0x03, 0xf3, 0xb7, 0xd8, 0x3b, 0x78, 0x6a, 0x75, 0x67, + 0x4a, 0x2c, 0xf0, 0xb6, 0x35, 0x68, 0xad, 0xd4, 0x8d, 0xb7, 0x3b, 0xc9, 0x9f, 0x84, 0xc6, 0x66, + 0xe0, 0xec, 0x05, 0x40, 0x69, 0x50, 0x10, 0x16, 0x55, 0xa5, 0xf8, 0xc8, 0xab, 0x26, 0x81, 0x7c, + 0xaa, 0xd4, 0xf2, 0xef, 0x09, 0x3c, 0xbb, 0xcf, 0xc6, 0x02, 0xce, 0x6f, 0xb4, 0xb9, 0xbe, 0x52, + 0xfa, 0xa6, 0xb7, 0x30, 0x9c, 0xd9, 0x5b, 0x98, 0xf7, 0xff, 0x5f, 0xe3, 0xd1, 0xb6, 0xa2, 0xc4, + 0xfe, 0xf7, 0x38, 0xe0, 0x2f, 0x3d, 0x75, 0xc2, 0x7e, 0x96, 0x41, 0x18, 0x0c, 0xc4, 0x01, 0x0f, + 0xc2, 0x0b, 0x98, 0x5b, 0xd2, 0x6d, 0x21, 0xae, 0x08, 0x4d, 0x51, 0xea, 0xf6, 0xc8, 0xc7, 0x49, + 0x94, 0x9e, 0xe7, 0x33, 0x87, 0x3f, 0x38, 0xba, 0xd6, 0xed, 0x91, 0x7d, 0x86, 0xd8, 0xa7, 0x52, + 0xd8, 0xde, 0x27, 0x3f, 0x4d, 0x46, 0xe9, 0xf4, 0xf2, 0xd5, 0xea, 0xff, 0x6e, 0x3c, 0x94, 0x6c, + 0x3e, 0xf3, 0x57, 0x87, 0x09, 0x19, 0x8c, 0x4b, 0x54, 0x8a, 0x9f, 0x79, 0x47, 0xbe, 0x0e, 0xe1, + 0xef, 0x94, 0x0b, 0xff, 0xd8, 0xa2, 0xe5, 0x8f, 0xee, 0xc2, 0x77, 0x6c, 0xeb, 0xd0, 0xc7, 0xf4, + 0xe7, 0xc5, 0x41, 0x12, 0x5a, 0xbb, 0x92, 0x3a, 0x0b, 0x55, 0x56, 0xeb, 0xec, 0x40, 0x99, 0x5f, + 0xbd, 0x6c, 0x30, 0xb2, 0x3b, 0xf3, 0xe0, 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x46, 0x37, + 0xd0, 0x53, 0xb8, 0x02, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/vtctlservice/vtctlservice.pb.go b/internal/stackql-parser-fork/go/vt/proto/vtctlservice/vtctlservice.pb.go new file mode 100644 index 00000000..8a19c85b --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/vtctlservice/vtctlservice.pb.go @@ -0,0 +1,150 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: vtctlservice.proto + +package vtctlservice + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + vtctldata "github.com/stackql/stackql-parser/go/vt/proto/vtctldata" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("vtctlservice.proto", fileDescriptor_27055cdbb1148d2b) } + +var fileDescriptor_27055cdbb1148d2b = []byte{ + // 146 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x49, 0x2e, + 0xc9, 0x29, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, + 0x41, 0x16, 0x93, 0xe2, 0x07, 0xf3, 0x52, 0x12, 0x4b, 0x12, 0x21, 0xd2, 0x46, 0x85, 0x5c, 0xac, + 0x61, 0x20, 0x21, 0xa1, 0x0c, 0x2e, 0x61, 0xd7, 0x8a, 0xd4, 0xe4, 0xd2, 0x92, 0x54, 0x30, 0xdf, + 0x39, 0x3f, 0x37, 0x37, 0x31, 0x2f, 0x45, 0x48, 0x55, 0x0f, 0xa1, 0x03, 0x8b, 0x7c, 0x50, 0x6a, + 0x61, 0x69, 0x6a, 0x71, 0x89, 0x94, 0x1a, 0x21, 0x65, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x4a, + 0x0c, 0x06, 0x8c, 0x4e, 0xda, 0x51, 0x9a, 0x65, 0x99, 0x25, 0xa9, 0xc5, 0xc5, 0x7a, 0x99, 0xf9, + 0xfa, 0x10, 0x96, 0x7e, 0x7a, 0xbe, 0x7e, 0x59, 0x89, 0x3e, 0xd8, 0x49, 0xfa, 0xc8, 0x0e, 0x4e, + 0x62, 0x03, 0x8b, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x9d, 0xb5, 0x06, 0x92, 0xdb, 0x00, + 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// VtctlClient is the client API for Vtctl service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type VtctlClient interface { + ExecuteVtctlCommand(ctx context.Context, in *vtctldata.ExecuteVtctlCommandRequest, opts ...grpc.CallOption) (Vtctl_ExecuteVtctlCommandClient, error) +} + +type vtctlClient struct { + cc *grpc.ClientConn +} + +func NewVtctlClient(cc *grpc.ClientConn) VtctlClient { + return &vtctlClient{cc} +} + +func (c *vtctlClient) ExecuteVtctlCommand(ctx context.Context, in *vtctldata.ExecuteVtctlCommandRequest, opts ...grpc.CallOption) (Vtctl_ExecuteVtctlCommandClient, error) { + stream, err := c.cc.NewStream(ctx, &_Vtctl_serviceDesc.Streams[0], "/vtctlservice.Vtctl/ExecuteVtctlCommand", opts...) + if err != nil { + return nil, err + } + x := &vtctlExecuteVtctlCommandClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Vtctl_ExecuteVtctlCommandClient interface { + Recv() (*vtctldata.ExecuteVtctlCommandResponse, error) + grpc.ClientStream +} + +type vtctlExecuteVtctlCommandClient struct { + grpc.ClientStream +} + +func (x *vtctlExecuteVtctlCommandClient) Recv() (*vtctldata.ExecuteVtctlCommandResponse, error) { + m := new(vtctldata.ExecuteVtctlCommandResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// VtctlServer is the server API for Vtctl service. +type VtctlServer interface { + ExecuteVtctlCommand(*vtctldata.ExecuteVtctlCommandRequest, Vtctl_ExecuteVtctlCommandServer) error +} + +// UnimplementedVtctlServer can be embedded to have forward compatible implementations. +type UnimplementedVtctlServer struct { +} + +func (*UnimplementedVtctlServer) ExecuteVtctlCommand(req *vtctldata.ExecuteVtctlCommandRequest, srv Vtctl_ExecuteVtctlCommandServer) error { + return status.Errorf(codes.Unimplemented, "method ExecuteVtctlCommand not implemented") +} + +func RegisterVtctlServer(s *grpc.Server, srv VtctlServer) { + s.RegisterService(&_Vtctl_serviceDesc, srv) +} + +func _Vtctl_ExecuteVtctlCommand_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(vtctldata.ExecuteVtctlCommandRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VtctlServer).ExecuteVtctlCommand(m, &vtctlExecuteVtctlCommandServer{stream}) +} + +type Vtctl_ExecuteVtctlCommandServer interface { + Send(*vtctldata.ExecuteVtctlCommandResponse) error + grpc.ServerStream +} + +type vtctlExecuteVtctlCommandServer struct { + grpc.ServerStream +} + +func (x *vtctlExecuteVtctlCommandServer) Send(m *vtctldata.ExecuteVtctlCommandResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Vtctl_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vtctlservice.Vtctl", + HandlerType: (*VtctlServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ExecuteVtctlCommand", + Handler: _Vtctl_ExecuteVtctlCommand_Handler, + ServerStreams: true, + }, + }, + Metadata: "vtctlservice.proto", +} diff --git a/internal/stackql-parser-fork/go/vt/proto/vtgate/vtgate.pb.go b/internal/stackql-parser-fork/go/vt/proto/vtgate/vtgate.pb.go new file mode 100644 index 00000000..ebeada2c --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/vtgate/vtgate.pb.go @@ -0,0 +1,1055 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: vtgate.proto + +package vtgate + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + binlogdata "github.com/stackql/stackql-parser/go/vt/proto/binlogdata" + query "github.com/stackql/stackql-parser/go/vt/proto/query" + topodata "github.com/stackql/stackql-parser/go/vt/proto/topodata" + vtrpc "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// TransactionMode controls the execution of distributed transaction +// across multiple shards. +type TransactionMode int32 + +const ( + // UNSPECIFIED uses the transaction mode set by the VTGate flag 'transaction_mode'. + TransactionMode_UNSPECIFIED TransactionMode = 0 + // SINGLE disallows distributed transactions. + TransactionMode_SINGLE TransactionMode = 1 + // MULTI allows distributed transactions with best effort commit. + TransactionMode_MULTI TransactionMode = 2 + // TWOPC is for distributed transactions with atomic commits. + TransactionMode_TWOPC TransactionMode = 3 +) + +var TransactionMode_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SINGLE", + 2: "MULTI", + 3: "TWOPC", +} + +var TransactionMode_value = map[string]int32{ + "UNSPECIFIED": 0, + "SINGLE": 1, + "MULTI": 2, + "TWOPC": 3, +} + +func (x TransactionMode) String() string { + return proto.EnumName(TransactionMode_name, int32(x)) +} + +func (TransactionMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{0} +} + +// CommitOrder is used to designate which of the ShardSessions +// get used for transactions. +type CommitOrder int32 + +const ( + // NORMAL is the default commit order. + CommitOrder_NORMAL CommitOrder = 0 + // PRE is used to designate pre_sessions. + CommitOrder_PRE CommitOrder = 1 + // POST is used to designate post_sessions. + CommitOrder_POST CommitOrder = 2 + // AUTOCOMMIT is used to run the statement as autocommitted transaction. + CommitOrder_AUTOCOMMIT CommitOrder = 3 +) + +var CommitOrder_name = map[int32]string{ + 0: "NORMAL", + 1: "PRE", + 2: "POST", + 3: "AUTOCOMMIT", +} + +var CommitOrder_value = map[string]int32{ + "NORMAL": 0, + "PRE": 1, + "POST": 2, + "AUTOCOMMIT": 3, +} + +func (x CommitOrder) String() string { + return proto.EnumName(CommitOrder_name, int32(x)) +} + +func (CommitOrder) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{1} +} + +// Session objects are exchanged like cookies through various +// calls to VTGate. The behavior differs between V2 & V3 APIs. +// V3 APIs are Execute, ExecuteBatch and StreamExecute. All +// other APIs are V2. For the V3 APIs, the session +// must be sent with every call to Execute or ExecuteBatch. +// For the V2 APIs, Begin does not accept a session. It instead +// returns a brand new one with in_transaction set to true. +// After a call to Commit or Rollback, the session can be +// discarded. If you're not in a transaction, Session is +// an optional parameter for the V2 APIs. +type Session struct { + // in_transaction is set to true if the session is in a transaction. + InTransaction bool `protobuf:"varint,1,opt,name=in_transaction,json=inTransaction,proto3" json:"in_transaction,omitempty"` + // shard_sessions keep track of per-shard transaction info. + ShardSessions []*Session_ShardSession `protobuf:"bytes,2,rep,name=shard_sessions,json=shardSessions,proto3" json:"shard_sessions,omitempty"` + // autocommit specifies if the session is in autocommit mode. + // This is used only for V3. + Autocommit bool `protobuf:"varint,4,opt,name=autocommit,proto3" json:"autocommit,omitempty"` + // target_string is the target expressed as a string. Valid + // names are: keyspace:shard@target, keyspace@target or @target. + // This is used only for V3. + TargetString string `protobuf:"bytes,5,opt,name=target_string,json=targetString,proto3" json:"target_string,omitempty"` + // options is used only for V3. + Options *query.ExecuteOptions `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` + // transaction_mode specifies the current transaction mode. + TransactionMode TransactionMode `protobuf:"varint,7,opt,name=transaction_mode,json=transactionMode,proto3,enum=vtgate.TransactionMode" json:"transaction_mode,omitempty"` + // warnings contains non-fatal warnings from the previous query + Warnings []*query.QueryWarning `protobuf:"bytes,8,rep,name=warnings,proto3" json:"warnings,omitempty"` + // pre_sessions contains sessions that have to be committed first. + PreSessions []*Session_ShardSession `protobuf:"bytes,9,rep,name=pre_sessions,json=preSessions,proto3" json:"pre_sessions,omitempty"` + // post_sessions contains sessions that have to be committed last. + PostSessions []*Session_ShardSession `protobuf:"bytes,10,rep,name=post_sessions,json=postSessions,proto3" json:"post_sessions,omitempty"` + // last_insert_id keeps track of the last seen insert_id for this session + LastInsertId uint64 `protobuf:"varint,11,opt,name=last_insert_id,json=lastInsertId,proto3" json:"last_insert_id,omitempty"` + // found_rows keeps track of how many rows the last query returned + FoundRows uint64 `protobuf:"varint,12,opt,name=found_rows,json=foundRows,proto3" json:"found_rows,omitempty"` + // user_defined_variables contains all the @variables defined for this session + UserDefinedVariables map[string]*query.BindVariable `protobuf:"bytes,13,rep,name=user_defined_variables,json=userDefinedVariables,proto3" json:"user_defined_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // system_variables keeps track of all session variables set for this connection + // TODO: systay should we keep this so we can apply it ordered? + SystemVariables map[string]string `protobuf:"bytes,14,rep,name=system_variables,json=systemVariables,proto3" json:"system_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // row_count keeps track of the last seen rows affected for this session + RowCount int64 `protobuf:"varint,15,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Session) Reset() { *m = Session{} } +func (m *Session) String() string { return proto.CompactTextString(m) } +func (*Session) ProtoMessage() {} +func (*Session) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{0} +} + +func (m *Session) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Session.Unmarshal(m, b) +} +func (m *Session) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Session.Marshal(b, m, deterministic) +} +func (m *Session) XXX_Merge(src proto.Message) { + xxx_messageInfo_Session.Merge(m, src) +} +func (m *Session) XXX_Size() int { + return xxx_messageInfo_Session.Size(m) +} +func (m *Session) XXX_DiscardUnknown() { + xxx_messageInfo_Session.DiscardUnknown(m) +} + +var xxx_messageInfo_Session proto.InternalMessageInfo + +func (m *Session) GetInTransaction() bool { + if m != nil { + return m.InTransaction + } + return false +} + +func (m *Session) GetShardSessions() []*Session_ShardSession { + if m != nil { + return m.ShardSessions + } + return nil +} + +func (m *Session) GetAutocommit() bool { + if m != nil { + return m.Autocommit + } + return false +} + +func (m *Session) GetTargetString() string { + if m != nil { + return m.TargetString + } + return "" +} + +func (m *Session) GetOptions() *query.ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *Session) GetTransactionMode() TransactionMode { + if m != nil { + return m.TransactionMode + } + return TransactionMode_UNSPECIFIED +} + +func (m *Session) GetWarnings() []*query.QueryWarning { + if m != nil { + return m.Warnings + } + return nil +} + +func (m *Session) GetPreSessions() []*Session_ShardSession { + if m != nil { + return m.PreSessions + } + return nil +} + +func (m *Session) GetPostSessions() []*Session_ShardSession { + if m != nil { + return m.PostSessions + } + return nil +} + +func (m *Session) GetLastInsertId() uint64 { + if m != nil { + return m.LastInsertId + } + return 0 +} + +func (m *Session) GetFoundRows() uint64 { + if m != nil { + return m.FoundRows + } + return 0 +} + +func (m *Session) GetUserDefinedVariables() map[string]*query.BindVariable { + if m != nil { + return m.UserDefinedVariables + } + return nil +} + +func (m *Session) GetSystemVariables() map[string]string { + if m != nil { + return m.SystemVariables + } + return nil +} + +func (m *Session) GetRowCount() int64 { + if m != nil { + return m.RowCount + } + return 0 +} + +type Session_ShardSession struct { + Target *query.Target `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,2,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + // reserved connection if a dedicated connection is needed + ReservedId int64 `protobuf:"varint,4,opt,name=reserved_id,json=reservedId,proto3" json:"reserved_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Session_ShardSession) Reset() { *m = Session_ShardSession{} } +func (m *Session_ShardSession) String() string { return proto.CompactTextString(m) } +func (*Session_ShardSession) ProtoMessage() {} +func (*Session_ShardSession) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{0, 0} +} + +func (m *Session_ShardSession) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Session_ShardSession.Unmarshal(m, b) +} +func (m *Session_ShardSession) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Session_ShardSession.Marshal(b, m, deterministic) +} +func (m *Session_ShardSession) XXX_Merge(src proto.Message) { + xxx_messageInfo_Session_ShardSession.Merge(m, src) +} +func (m *Session_ShardSession) XXX_Size() int { + return xxx_messageInfo_Session_ShardSession.Size(m) +} +func (m *Session_ShardSession) XXX_DiscardUnknown() { + xxx_messageInfo_Session_ShardSession.DiscardUnknown(m) +} + +var xxx_messageInfo_Session_ShardSession proto.InternalMessageInfo + +func (m *Session_ShardSession) GetTarget() *query.Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *Session_ShardSession) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *Session_ShardSession) GetTabletAlias() *topodata.TabletAlias { + if m != nil { + return m.TabletAlias + } + return nil +} + +func (m *Session_ShardSession) GetReservedId() int64 { + if m != nil { + return m.ReservedId + } + return 0 +} + +// ExecuteRequest is the payload to Execute. +type ExecuteRequest struct { + // caller_id identifies the caller. This is the effective caller ID, + // set by the application to further identify the caller. + CallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=caller_id,json=callerId,proto3" json:"caller_id,omitempty"` + // session carries the session state. + Session *Session `protobuf:"bytes,2,opt,name=session,proto3" json:"session,omitempty"` + // query is the query and bind variables to execute. + Query *query.BoundQuery `protobuf:"bytes,3,opt,name=query,proto3" json:"query,omitempty"` + // These values are deprecated. Use session instead. + // TODO(sougou): remove in 3.1 + TabletType topodata.TabletType `protobuf:"varint,4,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + KeyspaceShard string `protobuf:"bytes,6,opt,name=keyspace_shard,json=keyspaceShard,proto3" json:"keyspace_shard,omitempty"` + Options *query.ExecuteOptions `protobuf:"bytes,7,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteRequest) Reset() { *m = ExecuteRequest{} } +func (m *ExecuteRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteRequest) ProtoMessage() {} +func (*ExecuteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{1} +} + +func (m *ExecuteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteRequest.Unmarshal(m, b) +} +func (m *ExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteRequest.Marshal(b, m, deterministic) +} +func (m *ExecuteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteRequest.Merge(m, src) +} +func (m *ExecuteRequest) XXX_Size() int { + return xxx_messageInfo_ExecuteRequest.Size(m) +} +func (m *ExecuteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteRequest proto.InternalMessageInfo + +func (m *ExecuteRequest) GetCallerId() *vtrpc.CallerID { + if m != nil { + return m.CallerId + } + return nil +} + +func (m *ExecuteRequest) GetSession() *Session { + if m != nil { + return m.Session + } + return nil +} + +func (m *ExecuteRequest) GetQuery() *query.BoundQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *ExecuteRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *ExecuteRequest) GetKeyspaceShard() string { + if m != nil { + return m.KeyspaceShard + } + return "" +} + +func (m *ExecuteRequest) GetOptions() *query.ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +// ExecuteResponse is the returned value from Execute. +type ExecuteResponse struct { + // error contains an application level error if necessary. Note the + // session may have changed, even when an error is returned (for + // instance if a database integrity error happened). + Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + // session is the updated session information. + Session *Session `protobuf:"bytes,2,opt,name=session,proto3" json:"session,omitempty"` + // result contains the query result, only set if error is unset. + Result *query.QueryResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteResponse) Reset() { *m = ExecuteResponse{} } +func (m *ExecuteResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteResponse) ProtoMessage() {} +func (*ExecuteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{2} +} + +func (m *ExecuteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteResponse.Unmarshal(m, b) +} +func (m *ExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteResponse.Marshal(b, m, deterministic) +} +func (m *ExecuteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteResponse.Merge(m, src) +} +func (m *ExecuteResponse) XXX_Size() int { + return xxx_messageInfo_ExecuteResponse.Size(m) +} +func (m *ExecuteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteResponse proto.InternalMessageInfo + +func (m *ExecuteResponse) GetError() *vtrpc.RPCError { + if m != nil { + return m.Error + } + return nil +} + +func (m *ExecuteResponse) GetSession() *Session { + if m != nil { + return m.Session + } + return nil +} + +func (m *ExecuteResponse) GetResult() *query.QueryResult { + if m != nil { + return m.Result + } + return nil +} + +// ExecuteBatchRequest is the payload to ExecuteBatch. +type ExecuteBatchRequest struct { + // caller_id identifies the caller. This is the effective caller ID, + // set by the application to further identify the caller. + CallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=caller_id,json=callerId,proto3" json:"caller_id,omitempty"` + // session carries the session state. + Session *Session `protobuf:"bytes,2,opt,name=session,proto3" json:"session,omitempty"` + // queries is a list of query and bind variables to execute. + Queries []*query.BoundQuery `protobuf:"bytes,3,rep,name=queries,proto3" json:"queries,omitempty"` + // These values are deprecated. Use session instead. + // TODO(sougou): remove in 3.1 + TabletType topodata.TabletType `protobuf:"varint,4,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + AsTransaction bool `protobuf:"varint,5,opt,name=as_transaction,json=asTransaction,proto3" json:"as_transaction,omitempty"` + KeyspaceShard string `protobuf:"bytes,6,opt,name=keyspace_shard,json=keyspaceShard,proto3" json:"keyspace_shard,omitempty"` + Options *query.ExecuteOptions `protobuf:"bytes,7,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteBatchRequest) Reset() { *m = ExecuteBatchRequest{} } +func (m *ExecuteBatchRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteBatchRequest) ProtoMessage() {} +func (*ExecuteBatchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{3} +} + +func (m *ExecuteBatchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteBatchRequest.Unmarshal(m, b) +} +func (m *ExecuteBatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteBatchRequest.Marshal(b, m, deterministic) +} +func (m *ExecuteBatchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteBatchRequest.Merge(m, src) +} +func (m *ExecuteBatchRequest) XXX_Size() int { + return xxx_messageInfo_ExecuteBatchRequest.Size(m) +} +func (m *ExecuteBatchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteBatchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteBatchRequest proto.InternalMessageInfo + +func (m *ExecuteBatchRequest) GetCallerId() *vtrpc.CallerID { + if m != nil { + return m.CallerId + } + return nil +} + +func (m *ExecuteBatchRequest) GetSession() *Session { + if m != nil { + return m.Session + } + return nil +} + +func (m *ExecuteBatchRequest) GetQueries() []*query.BoundQuery { + if m != nil { + return m.Queries + } + return nil +} + +func (m *ExecuteBatchRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *ExecuteBatchRequest) GetAsTransaction() bool { + if m != nil { + return m.AsTransaction + } + return false +} + +func (m *ExecuteBatchRequest) GetKeyspaceShard() string { + if m != nil { + return m.KeyspaceShard + } + return "" +} + +func (m *ExecuteBatchRequest) GetOptions() *query.ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +// ExecuteBatchResponse is the returned value from ExecuteBatch. +type ExecuteBatchResponse struct { + // error contains an application level error if necessary. Note the + // session may have changed, even when an error is returned (for + // instance if a database integrity error happened). + Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + // session is the updated session information. + Session *Session `protobuf:"bytes,2,opt,name=session,proto3" json:"session,omitempty"` + // results contains the query results, only set if application level error is unset. + Results []*query.ResultWithError `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteBatchResponse) Reset() { *m = ExecuteBatchResponse{} } +func (m *ExecuteBatchResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteBatchResponse) ProtoMessage() {} +func (*ExecuteBatchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{4} +} + +func (m *ExecuteBatchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteBatchResponse.Unmarshal(m, b) +} +func (m *ExecuteBatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteBatchResponse.Marshal(b, m, deterministic) +} +func (m *ExecuteBatchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteBatchResponse.Merge(m, src) +} +func (m *ExecuteBatchResponse) XXX_Size() int { + return xxx_messageInfo_ExecuteBatchResponse.Size(m) +} +func (m *ExecuteBatchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteBatchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteBatchResponse proto.InternalMessageInfo + +func (m *ExecuteBatchResponse) GetError() *vtrpc.RPCError { + if m != nil { + return m.Error + } + return nil +} + +func (m *ExecuteBatchResponse) GetSession() *Session { + if m != nil { + return m.Session + } + return nil +} + +func (m *ExecuteBatchResponse) GetResults() []*query.ResultWithError { + if m != nil { + return m.Results + } + return nil +} + +// StreamExecuteRequest is the payload to StreamExecute. +type StreamExecuteRequest struct { + // caller_id identifies the caller. This is the effective caller ID, + // set by the application to further identify the caller. + CallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=caller_id,json=callerId,proto3" json:"caller_id,omitempty"` + // query is the query and bind variables to execute. + Query *query.BoundQuery `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` + // These values are deprecated. Use session instead. + // TODO(sougou): remove in 3.1 + TabletType topodata.TabletType `protobuf:"varint,3,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + KeyspaceShard string `protobuf:"bytes,4,opt,name=keyspace_shard,json=keyspaceShard,proto3" json:"keyspace_shard,omitempty"` + Options *query.ExecuteOptions `protobuf:"bytes,5,opt,name=options,proto3" json:"options,omitempty"` + // session carries the session state. + Session *Session `protobuf:"bytes,6,opt,name=session,proto3" json:"session,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamExecuteRequest) Reset() { *m = StreamExecuteRequest{} } +func (m *StreamExecuteRequest) String() string { return proto.CompactTextString(m) } +func (*StreamExecuteRequest) ProtoMessage() {} +func (*StreamExecuteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{5} +} + +func (m *StreamExecuteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamExecuteRequest.Unmarshal(m, b) +} +func (m *StreamExecuteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamExecuteRequest.Marshal(b, m, deterministic) +} +func (m *StreamExecuteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamExecuteRequest.Merge(m, src) +} +func (m *StreamExecuteRequest) XXX_Size() int { + return xxx_messageInfo_StreamExecuteRequest.Size(m) +} +func (m *StreamExecuteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StreamExecuteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamExecuteRequest proto.InternalMessageInfo + +func (m *StreamExecuteRequest) GetCallerId() *vtrpc.CallerID { + if m != nil { + return m.CallerId + } + return nil +} + +func (m *StreamExecuteRequest) GetQuery() *query.BoundQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *StreamExecuteRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *StreamExecuteRequest) GetKeyspaceShard() string { + if m != nil { + return m.KeyspaceShard + } + return "" +} + +func (m *StreamExecuteRequest) GetOptions() *query.ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *StreamExecuteRequest) GetSession() *Session { + if m != nil { + return m.Session + } + return nil +} + +// StreamExecuteResponse is the returned value from StreamExecute. +// The session is currently not returned because StreamExecute is +// not expected to modify it. +type StreamExecuteResponse struct { + // result contains the result data. + // The first value contains only Fields information. + // The next values contain the actual rows, a few values per result. + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamExecuteResponse) Reset() { *m = StreamExecuteResponse{} } +func (m *StreamExecuteResponse) String() string { return proto.CompactTextString(m) } +func (*StreamExecuteResponse) ProtoMessage() {} +func (*StreamExecuteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{6} +} + +func (m *StreamExecuteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamExecuteResponse.Unmarshal(m, b) +} +func (m *StreamExecuteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamExecuteResponse.Marshal(b, m, deterministic) +} +func (m *StreamExecuteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamExecuteResponse.Merge(m, src) +} +func (m *StreamExecuteResponse) XXX_Size() int { + return xxx_messageInfo_StreamExecuteResponse.Size(m) +} +func (m *StreamExecuteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StreamExecuteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamExecuteResponse proto.InternalMessageInfo + +func (m *StreamExecuteResponse) GetResult() *query.QueryResult { + if m != nil { + return m.Result + } + return nil +} + +// ResolveTransactionRequest is the payload to ResolveTransaction. +type ResolveTransactionRequest struct { + // caller_id identifies the caller. This is the effective caller ID, + // set by the application to further identify the caller. + CallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=caller_id,json=callerId,proto3" json:"caller_id,omitempty"` + // dtid is the dtid of the transaction to be resolved. + Dtid string `protobuf:"bytes,2,opt,name=dtid,proto3" json:"dtid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveTransactionRequest) Reset() { *m = ResolveTransactionRequest{} } +func (m *ResolveTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*ResolveTransactionRequest) ProtoMessage() {} +func (*ResolveTransactionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{7} +} + +func (m *ResolveTransactionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResolveTransactionRequest.Unmarshal(m, b) +} +func (m *ResolveTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResolveTransactionRequest.Marshal(b, m, deterministic) +} +func (m *ResolveTransactionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveTransactionRequest.Merge(m, src) +} +func (m *ResolveTransactionRequest) XXX_Size() int { + return xxx_messageInfo_ResolveTransactionRequest.Size(m) +} +func (m *ResolveTransactionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveTransactionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveTransactionRequest proto.InternalMessageInfo + +func (m *ResolveTransactionRequest) GetCallerId() *vtrpc.CallerID { + if m != nil { + return m.CallerId + } + return nil +} + +func (m *ResolveTransactionRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// ResolveTransactionResponse is the returned value from Rollback. +type ResolveTransactionResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveTransactionResponse) Reset() { *m = ResolveTransactionResponse{} } +func (m *ResolveTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*ResolveTransactionResponse) ProtoMessage() {} +func (*ResolveTransactionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{8} +} + +func (m *ResolveTransactionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResolveTransactionResponse.Unmarshal(m, b) +} +func (m *ResolveTransactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResolveTransactionResponse.Marshal(b, m, deterministic) +} +func (m *ResolveTransactionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveTransactionResponse.Merge(m, src) +} +func (m *ResolveTransactionResponse) XXX_Size() int { + return xxx_messageInfo_ResolveTransactionResponse.Size(m) +} +func (m *ResolveTransactionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveTransactionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveTransactionResponse proto.InternalMessageInfo + +// VStreamRequest is the payload for VStream. +type VStreamRequest struct { + CallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=caller_id,json=callerId,proto3" json:"caller_id,omitempty"` + TabletType topodata.TabletType `protobuf:"varint,2,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + // position specifies the starting point of the bin log positions + // as well as the keyspace-shards to pull events from. + // position is of the form 'ks1:0@MySQL56/|ks2:-80@MySQL56/'. + Vgtid *binlogdata.VGtid `protobuf:"bytes,3,opt,name=vgtid,proto3" json:"vgtid,omitempty"` + Filter *binlogdata.Filter `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamRequest) Reset() { *m = VStreamRequest{} } +func (m *VStreamRequest) String() string { return proto.CompactTextString(m) } +func (*VStreamRequest) ProtoMessage() {} +func (*VStreamRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{9} +} + +func (m *VStreamRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VStreamRequest.Unmarshal(m, b) +} +func (m *VStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VStreamRequest.Marshal(b, m, deterministic) +} +func (m *VStreamRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamRequest.Merge(m, src) +} +func (m *VStreamRequest) XXX_Size() int { + return xxx_messageInfo_VStreamRequest.Size(m) +} +func (m *VStreamRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamRequest proto.InternalMessageInfo + +func (m *VStreamRequest) GetCallerId() *vtrpc.CallerID { + if m != nil { + return m.CallerId + } + return nil +} + +func (m *VStreamRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *VStreamRequest) GetVgtid() *binlogdata.VGtid { + if m != nil { + return m.Vgtid + } + return nil +} + +func (m *VStreamRequest) GetFilter() *binlogdata.Filter { + if m != nil { + return m.Filter + } + return nil +} + +// VStreamResponse is streamed by VStream. +type VStreamResponse struct { + Events []*binlogdata.VEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamResponse) Reset() { *m = VStreamResponse{} } +func (m *VStreamResponse) String() string { return proto.CompactTextString(m) } +func (*VStreamResponse) ProtoMessage() {} +func (*VStreamResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_aab96496ceaf1ebb, []int{10} +} + +func (m *VStreamResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VStreamResponse.Unmarshal(m, b) +} +func (m *VStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VStreamResponse.Marshal(b, m, deterministic) +} +func (m *VStreamResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamResponse.Merge(m, src) +} +func (m *VStreamResponse) XXX_Size() int { + return xxx_messageInfo_VStreamResponse.Size(m) +} +func (m *VStreamResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamResponse proto.InternalMessageInfo + +func (m *VStreamResponse) GetEvents() []*binlogdata.VEvent { + if m != nil { + return m.Events + } + return nil +} + +func init() { + proto.RegisterEnum("vtgate.TransactionMode", TransactionMode_name, TransactionMode_value) + proto.RegisterEnum("vtgate.CommitOrder", CommitOrder_name, CommitOrder_value) + proto.RegisterType((*Session)(nil), "vtgate.Session") + proto.RegisterMapType((map[string]string)(nil), "vtgate.Session.SystemVariablesEntry") + proto.RegisterMapType((map[string]*query.BindVariable)(nil), "vtgate.Session.UserDefinedVariablesEntry") + proto.RegisterType((*Session_ShardSession)(nil), "vtgate.Session.ShardSession") + proto.RegisterType((*ExecuteRequest)(nil), "vtgate.ExecuteRequest") + proto.RegisterType((*ExecuteResponse)(nil), "vtgate.ExecuteResponse") + proto.RegisterType((*ExecuteBatchRequest)(nil), "vtgate.ExecuteBatchRequest") + proto.RegisterType((*ExecuteBatchResponse)(nil), "vtgate.ExecuteBatchResponse") + proto.RegisterType((*StreamExecuteRequest)(nil), "vtgate.StreamExecuteRequest") + proto.RegisterType((*StreamExecuteResponse)(nil), "vtgate.StreamExecuteResponse") + proto.RegisterType((*ResolveTransactionRequest)(nil), "vtgate.ResolveTransactionRequest") + proto.RegisterType((*ResolveTransactionResponse)(nil), "vtgate.ResolveTransactionResponse") + proto.RegisterType((*VStreamRequest)(nil), "vtgate.VStreamRequest") + proto.RegisterType((*VStreamResponse)(nil), "vtgate.VStreamResponse") +} + +func init() { proto.RegisterFile("vtgate.proto", fileDescriptor_aab96496ceaf1ebb) } + +var fileDescriptor_aab96496ceaf1ebb = []byte{ + // 1140 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0xee, 0xfa, 0xdf, 0xc7, 0x7f, 0xcb, 0xd4, 0x2d, 0x5b, 0x53, 0xc0, 0x72, 0x5b, 0xd5, 0x0d, + 0xc8, 0x46, 0x41, 0xa0, 0x0a, 0x81, 0x50, 0xe2, 0xb8, 0x95, 0xab, 0x24, 0x0e, 0x63, 0x27, 0x91, + 0x10, 0x68, 0xb5, 0xf1, 0x4e, 0x9c, 0x55, 0x9d, 0x1d, 0x77, 0x66, 0xec, 0xe0, 0xa7, 0xe0, 0x9e, + 0x17, 0xe0, 0x11, 0x78, 0x07, 0xee, 0xb8, 0xe5, 0x69, 0xd0, 0xfc, 0xac, 0xbd, 0x31, 0x81, 0xa6, + 0xa9, 0x72, 0xb3, 0x9a, 0xf3, 0x33, 0x67, 0xcf, 0xf9, 0xbe, 0x73, 0x66, 0x06, 0x8a, 0x73, 0x31, + 0xf6, 0x04, 0x69, 0x4d, 0x19, 0x15, 0x14, 0x65, 0xb4, 0x54, 0xb3, 0x4f, 0x82, 0x70, 0x42, 0xc7, + 0xbe, 0x27, 0x3c, 0x6d, 0xa9, 0x15, 0xde, 0xcc, 0x08, 0x5b, 0x18, 0xa1, 0x2c, 0xe8, 0x94, 0xc6, + 0x8d, 0x73, 0xc1, 0xa6, 0x23, 0x2d, 0x34, 0xfe, 0xce, 0x41, 0x76, 0x40, 0x38, 0x0f, 0x68, 0x88, + 0x9e, 0x40, 0x39, 0x08, 0x5d, 0xc1, 0xbc, 0x90, 0x7b, 0x23, 0x11, 0xd0, 0xd0, 0xb1, 0xea, 0x56, + 0x33, 0x87, 0x4b, 0x41, 0x38, 0x5c, 0x29, 0x51, 0x07, 0xca, 0xfc, 0xcc, 0x63, 0xbe, 0xcb, 0xf5, + 0x3e, 0xee, 0x24, 0xea, 0xc9, 0x66, 0x61, 0xf3, 0x61, 0xcb, 0x64, 0x67, 0xe2, 0xb5, 0x06, 0xd2, + 0xcb, 0x08, 0xb8, 0xc4, 0x63, 0x12, 0x47, 0x9f, 0x00, 0x78, 0x33, 0x41, 0x47, 0xf4, 0xfc, 0x3c, + 0x10, 0x4e, 0x4a, 0xfd, 0x27, 0xa6, 0x41, 0x8f, 0xa0, 0x24, 0x3c, 0x36, 0x26, 0xc2, 0xe5, 0x82, + 0x05, 0xe1, 0xd8, 0x49, 0xd7, 0xad, 0x66, 0x1e, 0x17, 0xb5, 0x72, 0xa0, 0x74, 0xa8, 0x0d, 0x59, + 0x3a, 0x15, 0x2a, 0x85, 0x4c, 0xdd, 0x6a, 0x16, 0x36, 0xef, 0xb5, 0x74, 0xe1, 0xdd, 0x5f, 0xc8, + 0x68, 0x26, 0x48, 0x5f, 0x1b, 0x71, 0xe4, 0x85, 0xb6, 0xc1, 0x8e, 0x95, 0xe7, 0x9e, 0x53, 0x9f, + 0x38, 0xd9, 0xba, 0xd5, 0x2c, 0x6f, 0x7e, 0x18, 0x25, 0x1f, 0xab, 0x74, 0x8f, 0xfa, 0x04, 0x57, + 0xc4, 0x65, 0x05, 0x6a, 0x43, 0xee, 0xc2, 0x63, 0x61, 0x10, 0x8e, 0xb9, 0x93, 0x53, 0x85, 0xdf, + 0x35, 0x7f, 0xfd, 0x41, 0x7e, 0x8f, 0xb5, 0x0d, 0x2f, 0x9d, 0xd0, 0xf7, 0x50, 0x9c, 0x32, 0xb2, + 0x42, 0x2b, 0x7f, 0x0d, 0xb4, 0x0a, 0x53, 0x46, 0x96, 0x58, 0x6d, 0x41, 0x69, 0x4a, 0xb9, 0x58, + 0x45, 0x80, 0x6b, 0x44, 0x28, 0xca, 0x2d, 0xcb, 0x10, 0x8f, 0xa1, 0x3c, 0xf1, 0xb8, 0x70, 0x83, + 0x90, 0x13, 0x26, 0xdc, 0xc0, 0x77, 0x0a, 0x75, 0xab, 0x99, 0xc2, 0x45, 0xa9, 0xed, 0x29, 0x65, + 0xcf, 0x47, 0x1f, 0x03, 0x9c, 0xd2, 0x59, 0xe8, 0xbb, 0x8c, 0x5e, 0x70, 0xa7, 0xa8, 0x3c, 0xf2, + 0x4a, 0x83, 0xe9, 0x05, 0x47, 0x2e, 0xdc, 0x9f, 0x71, 0xc2, 0x5c, 0x9f, 0x9c, 0x06, 0x21, 0xf1, + 0xdd, 0xb9, 0xc7, 0x02, 0xef, 0x64, 0x42, 0xb8, 0x53, 0x52, 0x09, 0x3d, 0x5b, 0x4f, 0xe8, 0x90, + 0x13, 0xb6, 0xa3, 0x9d, 0x8f, 0x22, 0xdf, 0x6e, 0x28, 0xd8, 0x02, 0x57, 0x67, 0x57, 0x98, 0x50, + 0x1f, 0x6c, 0xbe, 0xe0, 0x82, 0x9c, 0xc7, 0x42, 0x97, 0x55, 0xe8, 0xc7, 0xff, 0xaa, 0x55, 0xf9, + 0xad, 0x45, 0xad, 0xf0, 0xcb, 0x5a, 0xf4, 0x11, 0xe4, 0x19, 0xbd, 0x70, 0x47, 0x74, 0x16, 0x0a, + 0xa7, 0x52, 0xb7, 0x9a, 0x49, 0x9c, 0x63, 0xf4, 0xa2, 0x23, 0xe5, 0xda, 0x1f, 0x16, 0x14, 0xe3, + 0x90, 0xa1, 0x27, 0x90, 0xd1, 0xed, 0xa5, 0xfa, 0xbe, 0xb0, 0x59, 0x32, 0xbc, 0x0e, 0x95, 0x12, + 0x1b, 0xa3, 0x1c, 0x93, 0x78, 0x13, 0x05, 0xbe, 0x93, 0x50, 0x91, 0x4b, 0x31, 0x6d, 0xcf, 0x47, + 0xcf, 0xa1, 0x28, 0x64, 0x16, 0xc2, 0xf5, 0x26, 0x81, 0xc7, 0x9d, 0xa4, 0xe9, 0xd0, 0xe5, 0x34, + 0x0e, 0x95, 0x75, 0x4b, 0x1a, 0x71, 0x41, 0xac, 0x04, 0xf4, 0x29, 0x14, 0x18, 0xe1, 0x84, 0xcd, + 0x89, 0x2f, 0xa3, 0xa7, 0x54, 0x74, 0x88, 0x54, 0x3d, 0xbf, 0xf6, 0x13, 0x3c, 0xf8, 0x4f, 0x68, + 0x91, 0x0d, 0xc9, 0xd7, 0x64, 0xa1, 0x4a, 0xc8, 0x63, 0xb9, 0x44, 0xcf, 0x20, 0x3d, 0xf7, 0x26, + 0x33, 0xa2, 0xf2, 0x5c, 0xb5, 0xeb, 0x76, 0x10, 0x2e, 0xf7, 0x62, 0xed, 0xf1, 0x4d, 0xe2, 0xb9, + 0x55, 0xdb, 0x86, 0xea, 0x55, 0xe8, 0x5e, 0x11, 0xb8, 0x1a, 0x0f, 0x9c, 0x8f, 0xc5, 0x78, 0x95, + 0xca, 0x25, 0xed, 0x54, 0xe3, 0xf7, 0x04, 0x94, 0xcd, 0x28, 0x62, 0xf2, 0x66, 0x46, 0xb8, 0x40, + 0x9f, 0x43, 0x7e, 0xe4, 0x4d, 0x26, 0x84, 0xc9, 0xca, 0x34, 0xcc, 0x95, 0x96, 0x3e, 0x90, 0x3a, + 0x4a, 0xdf, 0xdb, 0xc1, 0x39, 0xed, 0xd1, 0xf3, 0xd1, 0x33, 0xc8, 0x9a, 0xa6, 0x37, 0xb9, 0x57, + 0xd6, 0xfa, 0x00, 0x47, 0x76, 0xf4, 0x14, 0xd2, 0xaa, 0x2c, 0x83, 0xf3, 0x07, 0x51, 0x91, 0xb2, + 0x7b, 0xd5, 0x60, 0x62, 0x6d, 0x47, 0x5f, 0x81, 0x01, 0xdb, 0x15, 0x8b, 0x29, 0x51, 0xe8, 0x96, + 0x37, 0xab, 0xeb, 0xb4, 0x0c, 0x17, 0x53, 0x82, 0x41, 0x2c, 0xd7, 0x92, 0xf5, 0xd7, 0x64, 0xc1, + 0xa7, 0xde, 0x88, 0xb8, 0xea, 0x28, 0x53, 0x47, 0x4e, 0x1e, 0x97, 0x22, 0xad, 0x6a, 0xa5, 0xf8, + 0x91, 0x94, 0xbd, 0xce, 0x91, 0xf4, 0x2a, 0x95, 0x4b, 0xdb, 0x99, 0xc6, 0xaf, 0x16, 0x54, 0x96, + 0x48, 0xf1, 0x29, 0x0d, 0xb9, 0xfc, 0x63, 0x9a, 0x30, 0x46, 0xd9, 0x1a, 0x4c, 0xf8, 0xa0, 0xd3, + 0x95, 0x6a, 0xac, 0xad, 0xef, 0x82, 0xd1, 0x06, 0x64, 0x18, 0xe1, 0xb3, 0x89, 0x30, 0x20, 0xa1, + 0xf8, 0xc1, 0x85, 0x95, 0x05, 0x1b, 0x8f, 0xc6, 0x5f, 0x09, 0xb8, 0x6b, 0x32, 0xda, 0xf6, 0xc4, + 0xe8, 0xec, 0xd6, 0x09, 0xfc, 0x0c, 0xb2, 0x32, 0x9b, 0x80, 0xc8, 0x51, 0x49, 0x5e, 0x4d, 0x61, + 0xe4, 0xf1, 0x1e, 0x24, 0x7a, 0xfc, 0xd2, 0x0d, 0x97, 0xd6, 0x37, 0x9c, 0xc7, 0xe3, 0x37, 0xdc, + 0x2d, 0x71, 0xdd, 0xf8, 0xcd, 0x82, 0xea, 0x65, 0x4c, 0x6f, 0x8d, 0xea, 0x2f, 0x20, 0xab, 0x89, + 0x8c, 0xd0, 0xbc, 0x6f, 0x72, 0xd3, 0x34, 0x1f, 0x07, 0xe2, 0x4c, 0x87, 0x8e, 0xdc, 0xe4, 0xb0, + 0x56, 0x07, 0x82, 0x11, 0xef, 0xfc, 0xbd, 0x46, 0x76, 0x39, 0x87, 0x89, 0x77, 0x9b, 0xc3, 0xe4, + 0x8d, 0xe7, 0x30, 0xf5, 0x16, 0x6e, 0xd2, 0xd7, 0x7a, 0x1a, 0xc4, 0xb0, 0xcd, 0xfc, 0x3f, 0xb6, + 0x8d, 0x0e, 0xdc, 0x5b, 0x03, 0xca, 0xd0, 0xb8, 0x9a, 0x2f, 0xeb, 0xad, 0xf3, 0xf5, 0x33, 0x3c, + 0xc0, 0x84, 0xd3, 0xc9, 0x9c, 0xc4, 0x3a, 0xef, 0x66, 0x90, 0x23, 0x48, 0xf9, 0xc2, 0x5c, 0x43, + 0x79, 0xac, 0xd6, 0x8d, 0x87, 0x50, 0xbb, 0x2a, 0xbc, 0x4e, 0xb4, 0xf1, 0xa7, 0x05, 0xe5, 0x23, + 0x5d, 0xc3, 0xcd, 0x7e, 0xb9, 0x46, 0x5e, 0xe2, 0x9a, 0xe4, 0x3d, 0x85, 0xf4, 0x7c, 0x2c, 0x53, + 0x8d, 0x0e, 0xe9, 0xd8, 0xcb, 0xf5, 0xe8, 0xa5, 0x08, 0x7c, 0xac, 0xed, 0x12, 0xc9, 0xd3, 0x60, + 0x22, 0x08, 0x53, 0xec, 0x4a, 0x24, 0x63, 0x9e, 0x2f, 0x94, 0x05, 0x1b, 0x8f, 0xc6, 0x77, 0x50, + 0x59, 0xd6, 0xb2, 0x22, 0x82, 0xcc, 0x49, 0x28, 0xb8, 0x63, 0xa9, 0xe6, 0xbf, 0xb4, 0xfd, 0xa8, + 0x2b, 0x4d, 0xd8, 0x78, 0x6c, 0xec, 0x40, 0x65, 0xed, 0xcd, 0x87, 0x2a, 0x50, 0x38, 0xdc, 0x1f, + 0x1c, 0x74, 0x3b, 0xbd, 0x17, 0xbd, 0xee, 0x8e, 0x7d, 0x07, 0x01, 0x64, 0x06, 0xbd, 0xfd, 0x97, + 0xbb, 0x5d, 0xdb, 0x42, 0x79, 0x48, 0xef, 0x1d, 0xee, 0x0e, 0x7b, 0x76, 0x42, 0x2e, 0x87, 0xc7, + 0xfd, 0x83, 0x8e, 0x9d, 0xdc, 0xf8, 0x16, 0x0a, 0x1d, 0xf5, 0x72, 0xed, 0x33, 0x9f, 0x30, 0xb9, + 0x61, 0xbf, 0x8f, 0xf7, 0xb6, 0x76, 0xed, 0x3b, 0x28, 0x0b, 0xc9, 0x03, 0x2c, 0x77, 0xe6, 0x20, + 0x75, 0xd0, 0x1f, 0x0c, 0xed, 0x04, 0x2a, 0x03, 0x6c, 0x1d, 0x0e, 0xfb, 0x9d, 0xfe, 0xde, 0x5e, + 0x6f, 0x68, 0x27, 0xb7, 0xbf, 0x86, 0x4a, 0x40, 0x5b, 0xf3, 0x40, 0x10, 0xce, 0xf5, 0xc3, 0xfc, + 0xc7, 0x47, 0x46, 0x0a, 0x68, 0x5b, 0xaf, 0xda, 0x63, 0xda, 0x9e, 0x8b, 0xb6, 0xb2, 0xb6, 0x75, + 0x6b, 0x9e, 0x64, 0x94, 0xf4, 0xe5, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x57, 0xac, 0xe8, 0xa9, + 0x18, 0x0c, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/vtgateservice/vtgateservice.pb.go b/internal/stackql-parser-fork/go/vt/proto/vtgateservice/vtgateservice.pb.go new file mode 100644 index 00000000..2f4ccfb6 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/vtgateservice/vtgateservice.pb.go @@ -0,0 +1,360 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: vtgateservice.proto + +package vtgateservice + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + vtgate "github.com/stackql/stackql-parser/go/vt/proto/vtgate" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("vtgateservice.proto", fileDescriptor_601ae27c95081e0f) } + +var fileDescriptor_601ae27c95081e0f = []byte{ + // 247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0x3f, 0x4b, 0x03, 0x41, + 0x10, 0xc5, 0x15, 0x21, 0x81, 0x25, 0x69, 0x46, 0x51, 0x88, 0x5a, 0x98, 0xd2, 0xe2, 0x56, 0xb4, + 0x15, 0x8b, 0x03, 0x2b, 0x1b, 0x89, 0x92, 0x42, 0xb0, 0x58, 0x97, 0xe1, 0x5c, 0xd0, 0x9b, 0x73, + 0x67, 0xb2, 0xf8, 0x01, 0xfc, 0xe0, 0xc2, 0xed, 0x9f, 0x70, 0x9e, 0xda, 0xdd, 0xfd, 0xde, 0x9b, + 0xb7, 0xc3, 0x3c, 0xb5, 0x1f, 0xa4, 0x31, 0x82, 0x8c, 0x3e, 0x38, 0x8b, 0x55, 0xe7, 0x49, 0x08, + 0xe6, 0x03, 0xb8, 0x98, 0xc5, 0xdf, 0x28, 0x5e, 0x7e, 0xed, 0xa9, 0xc9, 0xda, 0x09, 0x32, 0xc3, + 0xb5, 0x9a, 0xde, 0x7e, 0xa2, 0xdd, 0x08, 0xc2, 0x61, 0x95, 0x4c, 0x09, 0xac, 0xf0, 0x63, 0x83, + 0x2c, 0x8b, 0xa3, 0x11, 0xe7, 0x8e, 0x5a, 0xc6, 0xe5, 0x0e, 0xdc, 0xa9, 0x59, 0x82, 0xb5, 0x11, + 0xfb, 0x0a, 0xc7, 0x3f, 0xac, 0x3d, 0xcd, 0x39, 0x27, 0xbf, 0x8b, 0x25, 0xec, 0x5e, 0xcd, 0x1f, + 0xc4, 0xa3, 0x79, 0xcf, 0x0b, 0x95, 0x81, 0x01, 0xce, 0x71, 0xa7, 0x7f, 0xa8, 0x39, 0xef, 0x62, + 0x17, 0x9e, 0x15, 0xac, 0x90, 0xe9, 0x2d, 0xe0, 0xa3, 0x37, 0x2d, 0x1b, 0x2b, 0x8e, 0x5a, 0x38, + 0xcb, 0x83, 0x63, 0x2d, 0x67, 0x2f, 0xff, 0xb3, 0x94, 0x85, 0x6f, 0xd4, 0x74, 0x1d, 0x1f, 0xdf, + 0xde, 0x2e, 0x81, 0xd1, 0xed, 0x0a, 0xdf, 0xae, 0x57, 0xd7, 0xea, 0xc0, 0x51, 0x15, 0xfa, 0x22, + 0x62, 0x33, 0x55, 0xe3, 0x3b, 0xfb, 0x74, 0x9e, 0x90, 0x23, 0x1d, 0xbf, 0x74, 0x43, 0x3a, 0x88, + 0xee, 0x2d, 0x7a, 0x50, 0xec, 0xcb, 0xa4, 0x87, 0x57, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd5, + 0x14, 0x87, 0x30, 0x05, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// VitessClient is the client API for Vitess service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type VitessClient interface { + // Execute tries to route the query to the right shard. + // It depends on the query and bind variables to provide enough + // information in conjunction with the vindexes to route the query. + // API group: v3 + Execute(ctx context.Context, in *vtgate.ExecuteRequest, opts ...grpc.CallOption) (*vtgate.ExecuteResponse, error) + // ExecuteBatch tries to route the list of queries on the right shards. + // It depends on the query and bind variables to provide enough + // information in conjunction with the vindexes to route the query. + // API group: v3 + ExecuteBatch(ctx context.Context, in *vtgate.ExecuteBatchRequest, opts ...grpc.CallOption) (*vtgate.ExecuteBatchResponse, error) + // StreamExecute executes a streaming query based on shards. + // It depends on the query and bind variables to provide enough + // information in conjunction with the vindexes to route the query. + // Use this method if the query returns a large number of rows. + // API group: v3 + StreamExecute(ctx context.Context, in *vtgate.StreamExecuteRequest, opts ...grpc.CallOption) (Vitess_StreamExecuteClient, error) + // ResolveTransaction resolves a transaction. + // API group: Transactions + ResolveTransaction(ctx context.Context, in *vtgate.ResolveTransactionRequest, opts ...grpc.CallOption) (*vtgate.ResolveTransactionResponse, error) + // VStream streams binlog events from the requested sources. + VStream(ctx context.Context, in *vtgate.VStreamRequest, opts ...grpc.CallOption) (Vitess_VStreamClient, error) +} + +type vitessClient struct { + cc *grpc.ClientConn +} + +func NewVitessClient(cc *grpc.ClientConn) VitessClient { + return &vitessClient{cc} +} + +func (c *vitessClient) Execute(ctx context.Context, in *vtgate.ExecuteRequest, opts ...grpc.CallOption) (*vtgate.ExecuteResponse, error) { + out := new(vtgate.ExecuteResponse) + err := c.cc.Invoke(ctx, "/vtgateservice.Vitess/Execute", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vitessClient) ExecuteBatch(ctx context.Context, in *vtgate.ExecuteBatchRequest, opts ...grpc.CallOption) (*vtgate.ExecuteBatchResponse, error) { + out := new(vtgate.ExecuteBatchResponse) + err := c.cc.Invoke(ctx, "/vtgateservice.Vitess/ExecuteBatch", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vitessClient) StreamExecute(ctx context.Context, in *vtgate.StreamExecuteRequest, opts ...grpc.CallOption) (Vitess_StreamExecuteClient, error) { + stream, err := c.cc.NewStream(ctx, &_Vitess_serviceDesc.Streams[0], "/vtgateservice.Vitess/StreamExecute", opts...) + if err != nil { + return nil, err + } + x := &vitessStreamExecuteClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Vitess_StreamExecuteClient interface { + Recv() (*vtgate.StreamExecuteResponse, error) + grpc.ClientStream +} + +type vitessStreamExecuteClient struct { + grpc.ClientStream +} + +func (x *vitessStreamExecuteClient) Recv() (*vtgate.StreamExecuteResponse, error) { + m := new(vtgate.StreamExecuteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *vitessClient) ResolveTransaction(ctx context.Context, in *vtgate.ResolveTransactionRequest, opts ...grpc.CallOption) (*vtgate.ResolveTransactionResponse, error) { + out := new(vtgate.ResolveTransactionResponse) + err := c.cc.Invoke(ctx, "/vtgateservice.Vitess/ResolveTransaction", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vitessClient) VStream(ctx context.Context, in *vtgate.VStreamRequest, opts ...grpc.CallOption) (Vitess_VStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_Vitess_serviceDesc.Streams[1], "/vtgateservice.Vitess/VStream", opts...) + if err != nil { + return nil, err + } + x := &vitessVStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Vitess_VStreamClient interface { + Recv() (*vtgate.VStreamResponse, error) + grpc.ClientStream +} + +type vitessVStreamClient struct { + grpc.ClientStream +} + +func (x *vitessVStreamClient) Recv() (*vtgate.VStreamResponse, error) { + m := new(vtgate.VStreamResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// VitessServer is the server API for Vitess service. +type VitessServer interface { + // Execute tries to route the query to the right shard. + // It depends on the query and bind variables to provide enough + // information in conjunction with the vindexes to route the query. + // API group: v3 + Execute(context.Context, *vtgate.ExecuteRequest) (*vtgate.ExecuteResponse, error) + // ExecuteBatch tries to route the list of queries on the right shards. + // It depends on the query and bind variables to provide enough + // information in conjunction with the vindexes to route the query. + // API group: v3 + ExecuteBatch(context.Context, *vtgate.ExecuteBatchRequest) (*vtgate.ExecuteBatchResponse, error) + // StreamExecute executes a streaming query based on shards. + // It depends on the query and bind variables to provide enough + // information in conjunction with the vindexes to route the query. + // Use this method if the query returns a large number of rows. + // API group: v3 + StreamExecute(*vtgate.StreamExecuteRequest, Vitess_StreamExecuteServer) error + // ResolveTransaction resolves a transaction. + // API group: Transactions + ResolveTransaction(context.Context, *vtgate.ResolveTransactionRequest) (*vtgate.ResolveTransactionResponse, error) + // VStream streams binlog events from the requested sources. + VStream(*vtgate.VStreamRequest, Vitess_VStreamServer) error +} + +// UnimplementedVitessServer can be embedded to have forward compatible implementations. +type UnimplementedVitessServer struct { +} + +func (*UnimplementedVitessServer) Execute(ctx context.Context, req *vtgate.ExecuteRequest) (*vtgate.ExecuteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Execute not implemented") +} +func (*UnimplementedVitessServer) ExecuteBatch(ctx context.Context, req *vtgate.ExecuteBatchRequest) (*vtgate.ExecuteBatchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExecuteBatch not implemented") +} +func (*UnimplementedVitessServer) StreamExecute(req *vtgate.StreamExecuteRequest, srv Vitess_StreamExecuteServer) error { + return status.Errorf(codes.Unimplemented, "method StreamExecute not implemented") +} +func (*UnimplementedVitessServer) ResolveTransaction(ctx context.Context, req *vtgate.ResolveTransactionRequest) (*vtgate.ResolveTransactionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResolveTransaction not implemented") +} +func (*UnimplementedVitessServer) VStream(req *vtgate.VStreamRequest, srv Vitess_VStreamServer) error { + return status.Errorf(codes.Unimplemented, "method VStream not implemented") +} + +func RegisterVitessServer(s *grpc.Server, srv VitessServer) { + s.RegisterService(&_Vitess_serviceDesc, srv) +} + +func _Vitess_Execute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtgate.ExecuteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VitessServer).Execute(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtgateservice.Vitess/Execute", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VitessServer).Execute(ctx, req.(*vtgate.ExecuteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vitess_ExecuteBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtgate.ExecuteBatchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VitessServer).ExecuteBatch(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtgateservice.Vitess/ExecuteBatch", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VitessServer).ExecuteBatch(ctx, req.(*vtgate.ExecuteBatchRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vitess_StreamExecute_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(vtgate.StreamExecuteRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VitessServer).StreamExecute(m, &vitessStreamExecuteServer{stream}) +} + +type Vitess_StreamExecuteServer interface { + Send(*vtgate.StreamExecuteResponse) error + grpc.ServerStream +} + +type vitessStreamExecuteServer struct { + grpc.ServerStream +} + +func (x *vitessStreamExecuteServer) Send(m *vtgate.StreamExecuteResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Vitess_ResolveTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtgate.ResolveTransactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VitessServer).ResolveTransaction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtgateservice.Vitess/ResolveTransaction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VitessServer).ResolveTransaction(ctx, req.(*vtgate.ResolveTransactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vitess_VStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(vtgate.VStreamRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VitessServer).VStream(m, &vitessVStreamServer{stream}) +} + +type Vitess_VStreamServer interface { + Send(*vtgate.VStreamResponse) error + grpc.ServerStream +} + +type vitessVStreamServer struct { + grpc.ServerStream +} + +func (x *vitessVStreamServer) Send(m *vtgate.VStreamResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Vitess_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vtgateservice.Vitess", + HandlerType: (*VitessServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Execute", + Handler: _Vitess_Execute_Handler, + }, + { + MethodName: "ExecuteBatch", + Handler: _Vitess_ExecuteBatch_Handler, + }, + { + MethodName: "ResolveTransaction", + Handler: _Vitess_ResolveTransaction_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamExecute", + Handler: _Vitess_StreamExecute_Handler, + ServerStreams: true, + }, + { + StreamName: "VStream", + Handler: _Vitess_VStream_Handler, + ServerStreams: true, + }, + }, + Metadata: "vtgateservice.proto", +} diff --git a/internal/stackql-parser-fork/go/vt/proto/vtrpc/vtrpc.pb.go b/internal/stackql-parser-fork/go/vt/proto/vtrpc/vtrpc.pb.go new file mode 100644 index 00000000..8a673537 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/vtrpc/vtrpc.pb.go @@ -0,0 +1,471 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: vtrpc.proto + +package vtrpc + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Code represents canonical error codes. The names, numbers and comments +// must match the ones defined by grpc: +// https://godoc.org/google.golang.org/grpc/codes. +type Code int32 + +const ( + // OK is returned on success. + Code_OK Code = 0 + // CANCELED indicates the operation was cancelled (typically by the caller). + Code_CANCELED Code = 1 + // UNKNOWN error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Code_UNKNOWN Code = 2 + // INVALID_ARGUMENT indicates client specified an invalid argument. + // Note that this differs from FAILED_PRECONDITION. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + Code_INVALID_ARGUMENT Code = 3 + // DEADLINE_EXCEEDED means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + Code_DEADLINE_EXCEEDED Code = 4 + // NOT_FOUND means some requested entity (e.g., file or directory) was + // not found. + Code_NOT_FOUND Code = 5 + // ALREADY_EXISTS means an attempt to create an entity failed because one + // already exists. + Code_ALREADY_EXISTS Code = 6 + // PERMISSION_DENIED indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use RESOURCE_EXHAUSTED + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + Code_PERMISSION_DENIED Code = 7 + // UNAUTHENTICATED indicates the request does not have valid + // authentication credentials for the operation. + Code_UNAUTHENTICATED Code = 16 + // RESOURCE_EXHAUSTED indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + Code_RESOURCE_EXHAUSTED Code = 8 + // FAILED_PRECONDITION indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE: + // (a) Use UNAVAILABLE if the client can retry just the failing call. + // (b) Use ABORTED if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FAILED_PRECONDITION if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FAILED_PRECONDITION + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FAILED_PRECONDITION if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + Code_FAILED_PRECONDITION Code = 9 + // ABORTED indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + Code_ABORTED Code = 10 + // OUT_OF_RANGE means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike INVALID_ARGUMENT, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate INVALID_ARGUMENT if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OUT_OF_RANGE if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FAILED_PRECONDITION and + // OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OUT_OF_RANGE error to detect when + // they are done. + Code_OUT_OF_RANGE Code = 11 + // UNIMPLEMENTED indicates operation is not implemented or not + // supported/enabled in this service. + Code_UNIMPLEMENTED Code = 12 + // INTERNAL errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Code_INTERNAL Code = 13 + // UNAVAILABLE indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + Code_UNAVAILABLE Code = 14 + // DATA_LOSS indicates unrecoverable data loss or corruption. + Code_DATA_LOSS Code = 15 +) + +var Code_name = map[int32]string{ + 0: "OK", + 1: "CANCELED", + 2: "UNKNOWN", + 3: "INVALID_ARGUMENT", + 4: "DEADLINE_EXCEEDED", + 5: "NOT_FOUND", + 6: "ALREADY_EXISTS", + 7: "PERMISSION_DENIED", + 16: "UNAUTHENTICATED", + 8: "RESOURCE_EXHAUSTED", + 9: "FAILED_PRECONDITION", + 10: "ABORTED", + 11: "OUT_OF_RANGE", + 12: "UNIMPLEMENTED", + 13: "INTERNAL", + 14: "UNAVAILABLE", + 15: "DATA_LOSS", +} + +var Code_value = map[string]int32{ + "OK": 0, + "CANCELED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "UNAUTHENTICATED": 16, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, +} + +func (x Code) String() string { + return proto.EnumName(Code_name, int32(x)) +} + +func (Code) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_750b4cf641561858, []int{0} +} + +// LegacyErrorCode is the enum values for Errors. This type is deprecated. +// Use Code instead. Background: In the initial design, we thought +// that we may end up with a different list of canonical error codes +// than the ones defined by grpc. In hindsight, we realize that +// the grpc error codes are fairly generic and mostly sufficient. +// In order to avoid confusion, this type will be deprecated in +// favor of the new Code that matches exactly what grpc defines. +// Some names below have a _LEGACY suffix. This is to prevent +// name collisions with Code. +type LegacyErrorCode int32 + +const ( + // SUCCESS_LEGACY is returned from a successful call. + LegacyErrorCode_SUCCESS_LEGACY LegacyErrorCode = 0 + // CANCELLED_LEGACY means that the context was cancelled (and noticed in the app layer, + // as opposed to the RPC layer). + LegacyErrorCode_CANCELLED_LEGACY LegacyErrorCode = 1 + // UNKNOWN_ERROR_LEGACY includes: + // 1. MySQL error codes that we don't explicitly handle. + // 2. MySQL response that wasn't as expected. For example, we might expect a MySQL + // timestamp to be returned in a particular way, but it wasn't. + // 3. Anything else that doesn't fall into a different bucket. + LegacyErrorCode_UNKNOWN_ERROR_LEGACY LegacyErrorCode = 2 + // BAD_INPUT_LEGACY is returned when an end-user either sends SQL that couldn't be parsed correctly, + // or tries a query that isn't supported by Vitess. + LegacyErrorCode_BAD_INPUT_LEGACY LegacyErrorCode = 3 + // DEADLINE_EXCEEDED_LEGACY is returned when an action is taking longer than a given timeout. + LegacyErrorCode_DEADLINE_EXCEEDED_LEGACY LegacyErrorCode = 4 + // INTEGRITY_ERROR_LEGACY is returned on integrity error from MySQL, usually due to + // duplicate primary keys. + LegacyErrorCode_INTEGRITY_ERROR_LEGACY LegacyErrorCode = 5 + // PERMISSION_DENIED_LEGACY errors are returned when a user requests access to something + // that they don't have permissions for. + LegacyErrorCode_PERMISSION_DENIED_LEGACY LegacyErrorCode = 6 + // RESOURCE_EXHAUSTED_LEGACY is returned when a query exceeds its quota in some dimension + // and can't be completed due to that. Queries that return RESOURCE_EXHAUSTED + // should not be retried, as it could be detrimental to the server's health. + // Examples of errors that will cause the RESOURCE_EXHAUSTED code: + // 1. TxPoolFull: this is retried server-side, and is only returned as an error + // if the server-side retries failed. + // 2. Query is killed due to it taking too long. + LegacyErrorCode_RESOURCE_EXHAUSTED_LEGACY LegacyErrorCode = 7 + // QUERY_NOT_SERVED_LEGACY means that a query could not be served right now. + // Client can interpret it as: "the tablet that you sent this query to cannot + // serve the query right now, try a different tablet or try again later." + // This could be due to various reasons: QueryService is not serving, should + // not be serving, wrong shard, wrong tablet type, blacklisted table, etc. + // Clients that receive this error should usually retry the query, but after taking + // the appropriate steps to make sure that the query will get sent to the correct + // tablet. + LegacyErrorCode_QUERY_NOT_SERVED_LEGACY LegacyErrorCode = 8 + // NOT_IN_TX_LEGACY means that we're not currently in a transaction, but we should be. + LegacyErrorCode_NOT_IN_TX_LEGACY LegacyErrorCode = 9 + // INTERNAL_ERROR_LEGACY means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + LegacyErrorCode_INTERNAL_ERROR_LEGACY LegacyErrorCode = 10 + // TRANSIENT_ERROR_LEGACY is used for when there is some error that we expect we can + // recover from automatically - often due to a resource limit temporarily being + // reached. Retrying this error, with an exponential backoff, should succeed. + // Clients should be able to successfully retry the query on the same backends. + // Examples of things that can trigger this error: + // 1. Query has been throttled + // 2. VtGate could have request backlog + LegacyErrorCode_TRANSIENT_ERROR_LEGACY LegacyErrorCode = 11 + // UNAUTHENTICATED_LEGACY errors are returned when a user requests access to something, + // and we're unable to verify the user's authentication. + LegacyErrorCode_UNAUTHENTICATED_LEGACY LegacyErrorCode = 12 +) + +var LegacyErrorCode_name = map[int32]string{ + 0: "SUCCESS_LEGACY", + 1: "CANCELLED_LEGACY", + 2: "UNKNOWN_ERROR_LEGACY", + 3: "BAD_INPUT_LEGACY", + 4: "DEADLINE_EXCEEDED_LEGACY", + 5: "INTEGRITY_ERROR_LEGACY", + 6: "PERMISSION_DENIED_LEGACY", + 7: "RESOURCE_EXHAUSTED_LEGACY", + 8: "QUERY_NOT_SERVED_LEGACY", + 9: "NOT_IN_TX_LEGACY", + 10: "INTERNAL_ERROR_LEGACY", + 11: "TRANSIENT_ERROR_LEGACY", + 12: "UNAUTHENTICATED_LEGACY", +} + +var LegacyErrorCode_value = map[string]int32{ + "SUCCESS_LEGACY": 0, + "CANCELLED_LEGACY": 1, + "UNKNOWN_ERROR_LEGACY": 2, + "BAD_INPUT_LEGACY": 3, + "DEADLINE_EXCEEDED_LEGACY": 4, + "INTEGRITY_ERROR_LEGACY": 5, + "PERMISSION_DENIED_LEGACY": 6, + "RESOURCE_EXHAUSTED_LEGACY": 7, + "QUERY_NOT_SERVED_LEGACY": 8, + "NOT_IN_TX_LEGACY": 9, + "INTERNAL_ERROR_LEGACY": 10, + "TRANSIENT_ERROR_LEGACY": 11, + "UNAUTHENTICATED_LEGACY": 12, +} + +func (x LegacyErrorCode) String() string { + return proto.EnumName(LegacyErrorCode_name, int32(x)) +} + +func (LegacyErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_750b4cf641561858, []int{1} +} + +// CallerID is passed along RPCs to identify the originating client +// for a request. It is not meant to be secure, but only +// informational. The client can put whatever info they want in these +// fields, and they will be trusted by the servers. The fields will +// just be used for logging purposes, and to easily find a client. +// VtGate propagates it to VtTablet, and VtTablet may use this +// information for monitoring purposes, to display on dashboards, or +// for blacklisting purposes. +type CallerID struct { + // principal is the effective user identifier. It is usually filled in + // with whoever made the request to the appserver, if the request + // came from an automated job or another system component. + // If the request comes directly from the Internet, or if the Vitess client + // takes action on its own accord, it is okay for this field to be absent. + Principal string `protobuf:"bytes,1,opt,name=principal,proto3" json:"principal,omitempty"` + // component describes the running process of the effective caller. + // It can for instance be the hostname:port of the servlet initiating the + // database call, or the container engine ID used by the servlet. + Component string `protobuf:"bytes,2,opt,name=component,proto3" json:"component,omitempty"` + // subcomponent describes a component inisde the immediate caller which + // is responsible for generating is request. Suggested values are a + // servlet name or an API endpoint name. + Subcomponent string `protobuf:"bytes,3,opt,name=subcomponent,proto3" json:"subcomponent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CallerID) Reset() { *m = CallerID{} } +func (m *CallerID) String() string { return proto.CompactTextString(m) } +func (*CallerID) ProtoMessage() {} +func (*CallerID) Descriptor() ([]byte, []int) { + return fileDescriptor_750b4cf641561858, []int{0} +} + +func (m *CallerID) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CallerID.Unmarshal(m, b) +} +func (m *CallerID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CallerID.Marshal(b, m, deterministic) +} +func (m *CallerID) XXX_Merge(src proto.Message) { + xxx_messageInfo_CallerID.Merge(m, src) +} +func (m *CallerID) XXX_Size() int { + return xxx_messageInfo_CallerID.Size(m) +} +func (m *CallerID) XXX_DiscardUnknown() { + xxx_messageInfo_CallerID.DiscardUnknown(m) +} + +var xxx_messageInfo_CallerID proto.InternalMessageInfo + +func (m *CallerID) GetPrincipal() string { + if m != nil { + return m.Principal + } + return "" +} + +func (m *CallerID) GetComponent() string { + if m != nil { + return m.Component + } + return "" +} + +func (m *CallerID) GetSubcomponent() string { + if m != nil { + return m.Subcomponent + } + return "" +} + +// RPCError is an application-level error structure returned by +// VtTablet (and passed along by VtGate if appropriate). +// We use this so the clients don't have to parse the error messages, +// but instead can depend on the value of the code. +type RPCError struct { + LegacyCode LegacyErrorCode `protobuf:"varint,1,opt,name=legacy_code,json=legacyCode,proto3,enum=vtrpc.LegacyErrorCode" json:"legacy_code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Code Code `protobuf:"varint,3,opt,name=code,proto3,enum=vtrpc.Code" json:"code,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RPCError) Reset() { *m = RPCError{} } +func (m *RPCError) String() string { return proto.CompactTextString(m) } +func (*RPCError) ProtoMessage() {} +func (*RPCError) Descriptor() ([]byte, []int) { + return fileDescriptor_750b4cf641561858, []int{1} +} + +func (m *RPCError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RPCError.Unmarshal(m, b) +} +func (m *RPCError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RPCError.Marshal(b, m, deterministic) +} +func (m *RPCError) XXX_Merge(src proto.Message) { + xxx_messageInfo_RPCError.Merge(m, src) +} +func (m *RPCError) XXX_Size() int { + return xxx_messageInfo_RPCError.Size(m) +} +func (m *RPCError) XXX_DiscardUnknown() { + xxx_messageInfo_RPCError.DiscardUnknown(m) +} + +var xxx_messageInfo_RPCError proto.InternalMessageInfo + +func (m *RPCError) GetLegacyCode() LegacyErrorCode { + if m != nil { + return m.LegacyCode + } + return LegacyErrorCode_SUCCESS_LEGACY +} + +func (m *RPCError) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *RPCError) GetCode() Code { + if m != nil { + return m.Code + } + return Code_OK +} + +func init() { + proto.RegisterEnum("vtrpc.Code", Code_name, Code_value) + proto.RegisterEnum("vtrpc.LegacyErrorCode", LegacyErrorCode_name, LegacyErrorCode_value) + proto.RegisterType((*CallerID)(nil), "vtrpc.CallerID") + proto.RegisterType((*RPCError)(nil), "vtrpc.RPCError") +} + +func init() { proto.RegisterFile("vtrpc.proto", fileDescriptor_750b4cf641561858) } + +var fileDescriptor_750b4cf641561858 = []byte{ + // 605 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0x4d, 0x4f, 0x1b, 0x3b, + 0x14, 0x86, 0xc9, 0x07, 0xf9, 0x38, 0x13, 0x88, 0x31, 0x5f, 0xe1, 0x5e, 0xae, 0xee, 0x55, 0x56, + 0x57, 0x2c, 0x88, 0xd4, 0xaa, 0xea, 0xda, 0x19, 0x1f, 0x82, 0xc5, 0xe0, 0x49, 0x3d, 0x36, 0x25, + 0xdd, 0x58, 0x21, 0x8c, 0x50, 0xaa, 0xc0, 0x44, 0x93, 0x14, 0xa9, 0x9b, 0xfe, 0xac, 0xfe, 0xa6, + 0xfe, 0x8c, 0xca, 0x4e, 0xa6, 0x28, 0xb0, 0x9b, 0xf3, 0x3e, 0xc7, 0xc7, 0xef, 0x79, 0x9d, 0x40, + 0xf0, 0xbc, 0xcc, 0xe7, 0x93, 0xf3, 0x79, 0x9e, 0x2d, 0x33, 0xba, 0xed, 0x8b, 0xee, 0x57, 0x68, + 0x84, 0xe3, 0xd9, 0x2c, 0xcd, 0x05, 0xa7, 0xa7, 0xd0, 0x9c, 0xe7, 0xd3, 0xa7, 0xc9, 0x74, 0x3e, + 0x9e, 0x75, 0x4a, 0xff, 0x95, 0xfe, 0x6f, 0xaa, 0x17, 0xc1, 0xd1, 0x49, 0xf6, 0x38, 0xcf, 0x9e, + 0xd2, 0xa7, 0x65, 0xa7, 0xbc, 0xa2, 0x7f, 0x04, 0xda, 0x85, 0xd6, 0xe2, 0xdb, 0xdd, 0x4b, 0x43, + 0xc5, 0x37, 0x6c, 0x68, 0xdd, 0x1f, 0xd0, 0x50, 0xc3, 0x10, 0xf3, 0x3c, 0xcb, 0xe9, 0x47, 0x08, + 0x66, 0xe9, 0xc3, 0x78, 0xf2, 0xdd, 0x4e, 0xb2, 0xfb, 0xd4, 0xdf, 0xb6, 0xfb, 0xee, 0xe8, 0x7c, + 0xe5, 0x30, 0xf2, 0xc4, 0x37, 0x86, 0xd9, 0x7d, 0xaa, 0x60, 0xd5, 0xea, 0xbe, 0x69, 0x07, 0xea, + 0x8f, 0xe9, 0x62, 0x31, 0x7e, 0x48, 0xd7, 0x26, 0x8a, 0x92, 0xfe, 0x0b, 0x55, 0x3f, 0xab, 0xe2, + 0x67, 0x05, 0xeb, 0x59, 0x7e, 0x80, 0x07, 0x67, 0x3f, 0xcb, 0x50, 0xf5, 0x33, 0x6a, 0x50, 0x8e, + 0xaf, 0xc8, 0x16, 0x6d, 0x41, 0x23, 0x64, 0x32, 0xc4, 0x08, 0x39, 0x29, 0xd1, 0x00, 0xea, 0x46, + 0x5e, 0xc9, 0xf8, 0xb3, 0x24, 0x65, 0x7a, 0x00, 0x44, 0xc8, 0x1b, 0x16, 0x09, 0x6e, 0x99, 0x1a, + 0x98, 0x6b, 0x94, 0x9a, 0x54, 0xe8, 0x21, 0xec, 0x71, 0x64, 0x3c, 0x12, 0x12, 0x2d, 0xde, 0x86, + 0x88, 0x1c, 0x39, 0xa9, 0xd2, 0x1d, 0x68, 0xca, 0x58, 0xdb, 0x8b, 0xd8, 0x48, 0x4e, 0xb6, 0x29, + 0x85, 0x5d, 0x16, 0x29, 0x64, 0x7c, 0x64, 0xf1, 0x56, 0x24, 0x3a, 0x21, 0x35, 0x77, 0x72, 0x88, + 0xea, 0x5a, 0x24, 0x89, 0x88, 0xa5, 0xe5, 0x28, 0x05, 0x72, 0x52, 0xa7, 0xfb, 0xd0, 0x36, 0x92, + 0x19, 0x7d, 0x89, 0x52, 0x8b, 0x90, 0x69, 0xe4, 0x84, 0xd0, 0x23, 0xa0, 0x0a, 0x93, 0xd8, 0xa8, + 0xd0, 0xdd, 0x72, 0xc9, 0x4c, 0xe2, 0xf4, 0x06, 0x3d, 0x86, 0xfd, 0x0b, 0x26, 0x22, 0xe4, 0x76, + 0xa8, 0x30, 0x8c, 0x25, 0x17, 0x5a, 0xc4, 0x92, 0x34, 0x9d, 0x73, 0xd6, 0x8f, 0x95, 0xeb, 0x02, + 0x4a, 0xa0, 0x15, 0x1b, 0x6d, 0xe3, 0x0b, 0xab, 0x98, 0x1c, 0x20, 0x09, 0xe8, 0x1e, 0xec, 0x18, + 0x29, 0xae, 0x87, 0x11, 0xba, 0x35, 0x90, 0x93, 0x96, 0xdb, 0x5c, 0x48, 0x8d, 0x4a, 0xb2, 0x88, + 0xec, 0xd0, 0x36, 0x04, 0x46, 0xb2, 0x1b, 0x26, 0x22, 0xd6, 0x8f, 0x90, 0xec, 0xba, 0x85, 0x38, + 0xd3, 0xcc, 0x46, 0x71, 0x92, 0x90, 0xf6, 0xd9, 0xaf, 0x32, 0xb4, 0x5f, 0xbd, 0x89, 0x5b, 0x32, + 0x31, 0x61, 0x88, 0x49, 0x62, 0x23, 0x1c, 0xb0, 0x70, 0x44, 0xb6, 0x5c, 0x68, 0xab, 0x3c, 0x9d, + 0xc7, 0xb5, 0x5a, 0xa2, 0x1d, 0x38, 0x58, 0xe7, 0x6a, 0x51, 0xa9, 0x58, 0x15, 0xc4, 0x87, 0xdc, + 0x67, 0xdc, 0x0a, 0x39, 0x34, 0xba, 0x50, 0x2b, 0xf4, 0x14, 0x3a, 0x6f, 0x42, 0x2e, 0x68, 0x95, + 0xfe, 0x05, 0x47, 0xce, 0xf9, 0x40, 0x09, 0x3d, 0xda, 0x9c, 0xb7, 0xed, 0x4e, 0xbe, 0x09, 0xb9, + 0xa0, 0x35, 0xfa, 0x0f, 0x9c, 0xbc, 0x8d, 0xb5, 0xc0, 0x75, 0xfa, 0x37, 0x1c, 0x7f, 0x32, 0xa8, + 0x46, 0xd6, 0x3d, 0x65, 0x82, 0xea, 0xe6, 0x05, 0x36, 0x9c, 0x53, 0x27, 0x0b, 0x69, 0xf5, 0x6d, + 0xa1, 0x36, 0xe9, 0x09, 0x1c, 0x16, 0x29, 0x6e, 0x5a, 0x01, 0x67, 0x53, 0x2b, 0x26, 0x13, 0x81, + 0x52, 0x6f, 0xb2, 0xc0, 0xb1, 0x57, 0x8f, 0x5e, 0xb0, 0x56, 0xff, 0x03, 0xb4, 0xa7, 0xd9, 0xf9, + 0xf3, 0x74, 0x99, 0x2e, 0x16, 0xab, 0x7f, 0xea, 0x97, 0xee, 0xba, 0x9a, 0x66, 0xbd, 0xd5, 0x57, + 0xef, 0x21, 0xeb, 0x3d, 0x2f, 0x7b, 0x9e, 0xf6, 0xfc, 0xaf, 0xfc, 0xae, 0xe6, 0x8b, 0xf7, 0xbf, + 0x03, 0x00, 0x00, 0xff, 0xff, 0x27, 0xae, 0x20, 0x34, 0xe3, 0x03, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/vttest/vttest.pb.go b/internal/stackql-parser-fork/go/vt/proto/vttest/vttest.pb.go new file mode 100644 index 00000000..7dad543b --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/vttest/vttest.pb.go @@ -0,0 +1,254 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: vttest.proto + +package vttest + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Shard describes a single shard in a keyspace. +type Shard struct { + // name has to be unique in a keyspace. For unsharded keyspaces, it + // should be '0'. For sharded keyspace, it should be derived from + // the keyrange, like '-80' or '40-80'. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // db_name_override is the mysql db name for this shard. Has to be + // globally unique. If not specified, we will by default use + // 'vt__'. + DbNameOverride string `protobuf:"bytes,2,opt,name=db_name_override,json=dbNameOverride,proto3" json:"db_name_override,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Shard) Reset() { *m = Shard{} } +func (m *Shard) String() string { return proto.CompactTextString(m) } +func (*Shard) ProtoMessage() {} +func (*Shard) Descriptor() ([]byte, []int) { + return fileDescriptor_b9b3dc07179a1ec9, []int{0} +} + +func (m *Shard) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Shard.Unmarshal(m, b) +} +func (m *Shard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Shard.Marshal(b, m, deterministic) +} +func (m *Shard) XXX_Merge(src proto.Message) { + xxx_messageInfo_Shard.Merge(m, src) +} +func (m *Shard) XXX_Size() int { + return xxx_messageInfo_Shard.Size(m) +} +func (m *Shard) XXX_DiscardUnknown() { + xxx_messageInfo_Shard.DiscardUnknown(m) +} + +var xxx_messageInfo_Shard proto.InternalMessageInfo + +func (m *Shard) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Shard) GetDbNameOverride() string { + if m != nil { + return m.DbNameOverride + } + return "" +} + +// Keyspace describes a single keyspace. +type Keyspace struct { + // name has to be unique in a VTTestTopology. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // shards inside this keyspace. Ignored if redirect is set. + Shards []*Shard `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty"` + // sharding_column_name for this keyspace. Used for v2 calls, but not for v3. + ShardingColumnName string `protobuf:"bytes,3,opt,name=sharding_column_name,json=shardingColumnName,proto3" json:"sharding_column_name,omitempty"` + // sharding_column_type for this keyspace. Used for v2 calls, but not for v3. + ShardingColumnType string `protobuf:"bytes,4,opt,name=sharding_column_type,json=shardingColumnType,proto3" json:"sharding_column_type,omitempty"` + // redirects all traffic to another keyspace. If set, shards is ignored. + ServedFrom string `protobuf:"bytes,5,opt,name=served_from,json=servedFrom,proto3" json:"served_from,omitempty"` + // number of replica tablets to instantiate. This includes the master tablet. + ReplicaCount int32 `protobuf:"varint,6,opt,name=replica_count,json=replicaCount,proto3" json:"replica_count,omitempty"` + // number of rdonly tablets to instantiate. + RdonlyCount int32 `protobuf:"varint,7,opt,name=rdonly_count,json=rdonlyCount,proto3" json:"rdonly_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Keyspace) Reset() { *m = Keyspace{} } +func (m *Keyspace) String() string { return proto.CompactTextString(m) } +func (*Keyspace) ProtoMessage() {} +func (*Keyspace) Descriptor() ([]byte, []int) { + return fileDescriptor_b9b3dc07179a1ec9, []int{1} +} + +func (m *Keyspace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Keyspace.Unmarshal(m, b) +} +func (m *Keyspace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Keyspace.Marshal(b, m, deterministic) +} +func (m *Keyspace) XXX_Merge(src proto.Message) { + xxx_messageInfo_Keyspace.Merge(m, src) +} +func (m *Keyspace) XXX_Size() int { + return xxx_messageInfo_Keyspace.Size(m) +} +func (m *Keyspace) XXX_DiscardUnknown() { + xxx_messageInfo_Keyspace.DiscardUnknown(m) +} + +var xxx_messageInfo_Keyspace proto.InternalMessageInfo + +func (m *Keyspace) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Keyspace) GetShards() []*Shard { + if m != nil { + return m.Shards + } + return nil +} + +func (m *Keyspace) GetShardingColumnName() string { + if m != nil { + return m.ShardingColumnName + } + return "" +} + +func (m *Keyspace) GetShardingColumnType() string { + if m != nil { + return m.ShardingColumnType + } + return "" +} + +func (m *Keyspace) GetServedFrom() string { + if m != nil { + return m.ServedFrom + } + return "" +} + +func (m *Keyspace) GetReplicaCount() int32 { + if m != nil { + return m.ReplicaCount + } + return 0 +} + +func (m *Keyspace) GetRdonlyCount() int32 { + if m != nil { + return m.RdonlyCount + } + return 0 +} + +// VTTestTopology describes the keyspaces in the topology. +type VTTestTopology struct { + // all keyspaces in the topology. + Keyspaces []*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` + // list of cells the keyspaces reside in. Vtgate is started in only the first cell. + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VTTestTopology) Reset() { *m = VTTestTopology{} } +func (m *VTTestTopology) String() string { return proto.CompactTextString(m) } +func (*VTTestTopology) ProtoMessage() {} +func (*VTTestTopology) Descriptor() ([]byte, []int) { + return fileDescriptor_b9b3dc07179a1ec9, []int{2} +} + +func (m *VTTestTopology) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VTTestTopology.Unmarshal(m, b) +} +func (m *VTTestTopology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VTTestTopology.Marshal(b, m, deterministic) +} +func (m *VTTestTopology) XXX_Merge(src proto.Message) { + xxx_messageInfo_VTTestTopology.Merge(m, src) +} +func (m *VTTestTopology) XXX_Size() int { + return xxx_messageInfo_VTTestTopology.Size(m) +} +func (m *VTTestTopology) XXX_DiscardUnknown() { + xxx_messageInfo_VTTestTopology.DiscardUnknown(m) +} + +var xxx_messageInfo_VTTestTopology proto.InternalMessageInfo + +func (m *VTTestTopology) GetKeyspaces() []*Keyspace { + if m != nil { + return m.Keyspaces + } + return nil +} + +func (m *VTTestTopology) GetCells() []string { + if m != nil { + return m.Cells + } + return nil +} + +func init() { + proto.RegisterType((*Shard)(nil), "vttest.Shard") + proto.RegisterType((*Keyspace)(nil), "vttest.Keyspace") + proto.RegisterType((*VTTestTopology)(nil), "vttest.VTTestTopology") +} + +func init() { proto.RegisterFile("vttest.proto", fileDescriptor_b9b3dc07179a1ec9) } + +var fileDescriptor_b9b3dc07179a1ec9 = []byte{ + // 322 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xcb, 0x6a, 0xe3, 0x40, + 0x10, 0x44, 0xb6, 0xa5, 0x5d, 0xb7, 0x1f, 0x98, 0xc1, 0x87, 0xb9, 0xad, 0xd7, 0xc6, 0xa0, 0x93, + 0xb4, 0x6c, 0xfe, 0x20, 0x26, 0xb9, 0x04, 0x12, 0x50, 0x84, 0x0f, 0xb9, 0x08, 0x59, 0xea, 0x38, + 0x22, 0x92, 0x5a, 0xcc, 0x8c, 0x05, 0xfa, 0x8d, 0x7c, 0x71, 0x50, 0x8f, 0x4c, 0x2e, 0xbe, 0x55, + 0x57, 0xd5, 0x74, 0x35, 0x35, 0x30, 0x6f, 0x8d, 0x41, 0x6d, 0x82, 0x46, 0x91, 0x21, 0xe1, 0xd9, + 0x69, 0xfb, 0x00, 0xee, 0xeb, 0x47, 0xaa, 0x72, 0x21, 0x60, 0x52, 0xa7, 0x15, 0x4a, 0x67, 0xe3, + 0xf8, 0xd3, 0x88, 0xb1, 0xf0, 0x61, 0x95, 0x9f, 0x92, 0x1e, 0x26, 0xd4, 0xa2, 0x52, 0x45, 0x8e, + 0x72, 0xc4, 0xfa, 0x32, 0x3f, 0x3d, 0xa7, 0x15, 0xbe, 0x0c, 0xec, 0xf6, 0x6b, 0x04, 0xbf, 0x9f, + 0xb0, 0xd3, 0x4d, 0x9a, 0xe1, 0xcd, 0x55, 0x7b, 0xf0, 0x74, 0x9f, 0xa3, 0xe5, 0x68, 0x33, 0xf6, + 0x67, 0xff, 0x17, 0xc1, 0x70, 0x0e, 0xa7, 0x47, 0x83, 0x28, 0xfe, 0xc1, 0x9a, 0x51, 0x51, 0x9f, + 0x93, 0x8c, 0xca, 0x4b, 0x55, 0x73, 0xbc, 0x1c, 0xf3, 0x2a, 0x71, 0xd5, 0x0e, 0x2c, 0xf5, 0x17, + 0xdc, 0x7a, 0x61, 0xba, 0x06, 0xe5, 0xe4, 0xd6, 0x8b, 0xb8, 0x6b, 0x50, 0xfc, 0x81, 0x99, 0x46, + 0xd5, 0x62, 0x9e, 0xbc, 0x2b, 0xaa, 0xa4, 0xcb, 0x46, 0xb0, 0xd4, 0xa3, 0xa2, 0x4a, 0xec, 0x60, + 0xa1, 0xb0, 0x29, 0x8b, 0x2c, 0x4d, 0x32, 0xba, 0xd4, 0x46, 0x7a, 0x1b, 0xc7, 0x77, 0xa3, 0xf9, + 0x40, 0x1e, 0x7a, 0x4e, 0xfc, 0x85, 0xb9, 0xca, 0xa9, 0x2e, 0xbb, 0xc1, 0xf3, 0x8b, 0x3d, 0x33, + 0xcb, 0xb1, 0x65, 0x7b, 0x84, 0xe5, 0x31, 0x8e, 0x51, 0x9b, 0x98, 0x1a, 0x2a, 0xe9, 0xdc, 0x89, + 0x00, 0xa6, 0x9f, 0x43, 0x4b, 0x5a, 0x3a, 0x5c, 0xc4, 0xea, 0x5a, 0xc4, 0xb5, 0xbe, 0xe8, 0xc7, + 0x22, 0xd6, 0xe0, 0x66, 0x58, 0x96, 0xb6, 0xb4, 0x69, 0x64, 0x87, 0xfb, 0xfd, 0xdb, 0xae, 0x2d, + 0x0c, 0x6a, 0x1d, 0x14, 0x14, 0x5a, 0x14, 0x9e, 0x29, 0x6c, 0x4d, 0xc8, 0x7f, 0x1b, 0xda, 0x85, + 0x27, 0x8f, 0xa7, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x57, 0x0f, 0xe6, 0xb4, 0xf9, 0x01, + 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/vttime/vttime.pb.go b/internal/stackql-parser-fork/go/vt/proto/vttime/vttime.pb.go new file mode 100644 index 00000000..95c6f63d --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/vttime/vttime.pb.go @@ -0,0 +1,89 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: vttime.proto + +package vttime + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Time represents a time stamp in nanoseconds. In go, use logutil library +// to convert times. +type Time struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanoseconds int32 `protobuf:"varint,2,opt,name=nanoseconds,proto3" json:"nanoseconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Time) Reset() { *m = Time{} } +func (m *Time) String() string { return proto.CompactTextString(m) } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { + return fileDescriptor_bbeb0d3434911dee, []int{0} +} + +func (m *Time) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Time.Unmarshal(m, b) +} +func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Time.Marshal(b, m, deterministic) +} +func (m *Time) XXX_Merge(src proto.Message) { + xxx_messageInfo_Time.Merge(m, src) +} +func (m *Time) XXX_Size() int { + return xxx_messageInfo_Time.Size(m) +} +func (m *Time) XXX_DiscardUnknown() { + xxx_messageInfo_Time.DiscardUnknown(m) +} + +var xxx_messageInfo_Time proto.InternalMessageInfo + +func (m *Time) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Time) GetNanoseconds() int32 { + if m != nil { + return m.Nanoseconds + } + return 0 +} + +func init() { + proto.RegisterType((*Time)(nil), "vttime.Time") +} + +func init() { proto.RegisterFile("vttime.proto", fileDescriptor_bbeb0d3434911dee) } + +var fileDescriptor_bbeb0d3434911dee = []byte{ + // 120 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x2b, 0x29, 0xc9, + 0xcc, 0x4d, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, 0xf0, 0x94, 0x9c, 0xb8, 0x58, + 0x42, 0x32, 0x73, 0x53, 0x85, 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, + 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, 0x5c, 0x21, 0x05, 0x2e, 0xee, 0xbc, 0xc4, 0xbc, 0x7c, + 0x98, 0x2c, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0xb2, 0x90, 0x93, 0x6a, 0x94, 0x72, 0x59, 0x66, + 0x49, 0x6a, 0x71, 0xb1, 0x5e, 0x66, 0xbe, 0x3e, 0x84, 0xa5, 0x9f, 0x9e, 0xaf, 0x5f, 0x56, 0xa2, + 0x0f, 0xb6, 0x4b, 0x1f, 0x62, 0x55, 0x12, 0x1b, 0x98, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, + 0x35, 0x46, 0xf4, 0x16, 0x89, 0x00, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/vtworkerdata/vtworkerdata.pb.go b/internal/stackql-parser-fork/go/vt/proto/vtworkerdata/vtworkerdata.pb.go new file mode 100644 index 00000000..ac008e75 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/vtworkerdata/vtworkerdata.pb.go @@ -0,0 +1,125 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: vtworkerdata.proto + +package vtworkerdata + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + logutil "github.com/stackql/stackql-parser/go/vt/proto/logutil" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// ExecuteVtworkerCommandRequest is the payload for ExecuteVtworkerCommand. +type ExecuteVtworkerCommandRequest struct { + Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteVtworkerCommandRequest) Reset() { *m = ExecuteVtworkerCommandRequest{} } +func (m *ExecuteVtworkerCommandRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteVtworkerCommandRequest) ProtoMessage() {} +func (*ExecuteVtworkerCommandRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_32a791ab99179e8e, []int{0} +} + +func (m *ExecuteVtworkerCommandRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteVtworkerCommandRequest.Unmarshal(m, b) +} +func (m *ExecuteVtworkerCommandRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteVtworkerCommandRequest.Marshal(b, m, deterministic) +} +func (m *ExecuteVtworkerCommandRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteVtworkerCommandRequest.Merge(m, src) +} +func (m *ExecuteVtworkerCommandRequest) XXX_Size() int { + return xxx_messageInfo_ExecuteVtworkerCommandRequest.Size(m) +} +func (m *ExecuteVtworkerCommandRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteVtworkerCommandRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteVtworkerCommandRequest proto.InternalMessageInfo + +func (m *ExecuteVtworkerCommandRequest) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +// ExecuteVtworkerCommandResponse is streamed back by ExecuteVtworkerCommand. +type ExecuteVtworkerCommandResponse struct { + Event *logutil.Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecuteVtworkerCommandResponse) Reset() { *m = ExecuteVtworkerCommandResponse{} } +func (m *ExecuteVtworkerCommandResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteVtworkerCommandResponse) ProtoMessage() {} +func (*ExecuteVtworkerCommandResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_32a791ab99179e8e, []int{1} +} + +func (m *ExecuteVtworkerCommandResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecuteVtworkerCommandResponse.Unmarshal(m, b) +} +func (m *ExecuteVtworkerCommandResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecuteVtworkerCommandResponse.Marshal(b, m, deterministic) +} +func (m *ExecuteVtworkerCommandResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecuteVtworkerCommandResponse.Merge(m, src) +} +func (m *ExecuteVtworkerCommandResponse) XXX_Size() int { + return xxx_messageInfo_ExecuteVtworkerCommandResponse.Size(m) +} +func (m *ExecuteVtworkerCommandResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecuteVtworkerCommandResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecuteVtworkerCommandResponse proto.InternalMessageInfo + +func (m *ExecuteVtworkerCommandResponse) GetEvent() *logutil.Event { + if m != nil { + return m.Event + } + return nil +} + +func init() { + proto.RegisterType((*ExecuteVtworkerCommandRequest)(nil), "vtworkerdata.ExecuteVtworkerCommandRequest") + proto.RegisterType((*ExecuteVtworkerCommandResponse)(nil), "vtworkerdata.ExecuteVtworkerCommandResponse") +} + +func init() { proto.RegisterFile("vtworkerdata.proto", fileDescriptor_32a791ab99179e8e) } + +var fileDescriptor_32a791ab99179e8e = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x29, 0xcf, + 0x2f, 0xca, 0x4e, 0x2d, 0x4a, 0x49, 0x2c, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, + 0x41, 0x16, 0x93, 0xe2, 0xcd, 0xc9, 0x4f, 0x2f, 0x2d, 0xc9, 0xcc, 0x81, 0x48, 0x2a, 0x19, 0x73, + 0xc9, 0xba, 0x56, 0xa4, 0x26, 0x97, 0x96, 0xa4, 0x86, 0x41, 0x55, 0x39, 0xe7, 0xe7, 0xe6, 0x26, + 0xe6, 0xa5, 0x04, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x09, 0x71, 0xb1, 0x24, 0x16, 0xa5, + 0x17, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x81, 0xd9, 0x4a, 0x6e, 0x5c, 0x72, 0xb8, 0x34, + 0x15, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0xa9, 0x70, 0xb1, 0xa6, 0x96, 0xa5, 0xe6, 0x95, 0x48, + 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0xf1, 0xe9, 0xc1, 0x6c, 0x75, 0x05, 0x89, 0x06, 0x41, 0x24, + 0x9d, 0xb4, 0xa3, 0x34, 0xcb, 0x32, 0x4b, 0x52, 0x8b, 0x8b, 0xf5, 0x32, 0xf3, 0xf5, 0x21, 0x2c, + 0xfd, 0xf4, 0x7c, 0xfd, 0xb2, 0x12, 0x7d, 0xb0, 0xe3, 0xf4, 0x91, 0x1d, 0x9e, 0xc4, 0x06, 0x16, + 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x82, 0xc8, 0x11, 0xe3, 0x00, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/proto/vtworkerservice/vtworkerservice.pb.go b/internal/stackql-parser-fork/go/vt/proto/vtworkerservice/vtworkerservice.pb.go new file mode 100644 index 00000000..4fbade9b --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/vtworkerservice/vtworkerservice.pb.go @@ -0,0 +1,154 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: vtworkerservice.proto + +package vtworkerservice + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + vtworkerdata "github.com/stackql/stackql-parser/go/vt/proto/vtworkerdata" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("vtworkerservice.proto", fileDescriptor_884fe2c3e67151b3) } + +var fileDescriptor_884fe2c3e67151b3 = []byte{ + // 151 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2d, 0x2b, 0x29, 0xcf, + 0x2f, 0xca, 0x4e, 0x2d, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0xe2, 0x47, 0x13, 0x96, 0x12, 0x82, 0x09, 0xa4, 0x24, 0x96, 0x24, 0x42, 0x14, 0x19, + 0x35, 0x33, 0x72, 0x71, 0x84, 0x41, 0x85, 0x85, 0xca, 0xb9, 0xc4, 0x5c, 0x2b, 0x52, 0x93, 0x4b, + 0x4b, 0x52, 0x61, 0x42, 0xce, 0xf9, 0xb9, 0xb9, 0x89, 0x79, 0x29, 0x42, 0xda, 0x7a, 0x28, 0x7a, + 0xb1, 0xab, 0x0a, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x91, 0xd2, 0x21, 0x4e, 0x71, 0x71, 0x41, + 0x7e, 0x5e, 0x71, 0xaa, 0x12, 0x83, 0x01, 0xa3, 0x93, 0x5e, 0x94, 0x4e, 0x59, 0x66, 0x49, 0x6a, + 0x71, 0xb1, 0x5e, 0x66, 0xbe, 0x3e, 0x84, 0xa5, 0x9f, 0x9e, 0xaf, 0x5f, 0x56, 0xa2, 0x0f, 0x76, + 0xa5, 0x3e, 0x9a, 0x4f, 0x92, 0xd8, 0xc0, 0xc2, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1c, + 0x01, 0x4d, 0x17, 0xfa, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// VtworkerClient is the client API for Vtworker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type VtworkerClient interface { + // ExecuteVtworkerCommand allows to run a vtworker command by specifying the + // same arguments as on the command line. + ExecuteVtworkerCommand(ctx context.Context, in *vtworkerdata.ExecuteVtworkerCommandRequest, opts ...grpc.CallOption) (Vtworker_ExecuteVtworkerCommandClient, error) +} + +type vtworkerClient struct { + cc *grpc.ClientConn +} + +func NewVtworkerClient(cc *grpc.ClientConn) VtworkerClient { + return &vtworkerClient{cc} +} + +func (c *vtworkerClient) ExecuteVtworkerCommand(ctx context.Context, in *vtworkerdata.ExecuteVtworkerCommandRequest, opts ...grpc.CallOption) (Vtworker_ExecuteVtworkerCommandClient, error) { + stream, err := c.cc.NewStream(ctx, &_Vtworker_serviceDesc.Streams[0], "/vtworkerservice.Vtworker/ExecuteVtworkerCommand", opts...) + if err != nil { + return nil, err + } + x := &vtworkerExecuteVtworkerCommandClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Vtworker_ExecuteVtworkerCommandClient interface { + Recv() (*vtworkerdata.ExecuteVtworkerCommandResponse, error) + grpc.ClientStream +} + +type vtworkerExecuteVtworkerCommandClient struct { + grpc.ClientStream +} + +func (x *vtworkerExecuteVtworkerCommandClient) Recv() (*vtworkerdata.ExecuteVtworkerCommandResponse, error) { + m := new(vtworkerdata.ExecuteVtworkerCommandResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// VtworkerServer is the server API for Vtworker service. +type VtworkerServer interface { + // ExecuteVtworkerCommand allows to run a vtworker command by specifying the + // same arguments as on the command line. + ExecuteVtworkerCommand(*vtworkerdata.ExecuteVtworkerCommandRequest, Vtworker_ExecuteVtworkerCommandServer) error +} + +// UnimplementedVtworkerServer can be embedded to have forward compatible implementations. +type UnimplementedVtworkerServer struct { +} + +func (*UnimplementedVtworkerServer) ExecuteVtworkerCommand(req *vtworkerdata.ExecuteVtworkerCommandRequest, srv Vtworker_ExecuteVtworkerCommandServer) error { + return status.Errorf(codes.Unimplemented, "method ExecuteVtworkerCommand not implemented") +} + +func RegisterVtworkerServer(s *grpc.Server, srv VtworkerServer) { + s.RegisterService(&_Vtworker_serviceDesc, srv) +} + +func _Vtworker_ExecuteVtworkerCommand_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(vtworkerdata.ExecuteVtworkerCommandRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VtworkerServer).ExecuteVtworkerCommand(m, &vtworkerExecuteVtworkerCommandServer{stream}) +} + +type Vtworker_ExecuteVtworkerCommandServer interface { + Send(*vtworkerdata.ExecuteVtworkerCommandResponse) error + grpc.ServerStream +} + +type vtworkerExecuteVtworkerCommandServer struct { + grpc.ServerStream +} + +func (x *vtworkerExecuteVtworkerCommandServer) Send(m *vtworkerdata.ExecuteVtworkerCommandResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Vtworker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vtworkerservice.Vtworker", + HandlerType: (*VtworkerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ExecuteVtworkerCommand", + Handler: _Vtworker_ExecuteVtworkerCommand_Handler, + ServerStreams: true, + }, + }, + Metadata: "vtworkerservice.proto", +} diff --git a/internal/stackql-parser-fork/go/vt/proto/workflow/workflow.pb.go b/internal/stackql-parser-fork/go/vt/proto/workflow/workflow.pb.go new file mode 100644 index 00000000..44e4c451 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/proto/workflow/workflow.pb.go @@ -0,0 +1,388 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: workflow.proto + +package workflow + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// WorkflowState describes the state of a workflow. +// This constant should match the Node object described in +// web/vtctld2/src/app/workflows/node.ts as it is exposed as JSON to +// the Angular 2 web app. +type WorkflowState int32 + +const ( + WorkflowState_NotStarted WorkflowState = 0 + WorkflowState_Running WorkflowState = 1 + WorkflowState_Done WorkflowState = 2 +) + +var WorkflowState_name = map[int32]string{ + 0: "NotStarted", + 1: "Running", + 2: "Done", +} + +var WorkflowState_value = map[string]int32{ + "NotStarted": 0, + "Running": 1, + "Done": 2, +} + +func (x WorkflowState) String() string { + return proto.EnumName(WorkflowState_name, int32(x)) +} + +func (WorkflowState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_892c7f566756b0be, []int{0} +} + +type TaskState int32 + +const ( + TaskState_TaskNotStarted TaskState = 0 + TaskState_TaskRunning TaskState = 1 + TaskState_TaskDone TaskState = 2 +) + +var TaskState_name = map[int32]string{ + 0: "TaskNotStarted", + 1: "TaskRunning", + 2: "TaskDone", +} + +var TaskState_value = map[string]int32{ + "TaskNotStarted": 0, + "TaskRunning": 1, + "TaskDone": 2, +} + +func (x TaskState) String() string { + return proto.EnumName(TaskState_name, int32(x)) +} + +func (TaskState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_892c7f566756b0be, []int{1} +} + +// Workflow is the persisted state of a long-running workflow. +type Workflow struct { + // uuid is set when the workflow is created, and immutable after + // that. + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + // factory_name is set with the name of the factory that created the + // job (and can also restart it). It is set at creation time, and + // immutable after that. + FactoryName string `protobuf:"bytes,2,opt,name=factory_name,json=factoryName,proto3" json:"factory_name,omitempty"` + // name is the display name of the workflow. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // state describes the state of the job. A job is created as + // NotStarted, then the Workflow Manager picks it up and starts it, + // switching it to Running (and populating 'start_time'). The + // workflow can then fail over to a new Workflow Manager is + // necessary, and still be in Running state. When done, it goes to + // Done, 'end_time' is populated, and 'error' is set if there was an + // error. + State WorkflowState `protobuf:"varint,4,opt,name=state,proto3,enum=workflow.WorkflowState" json:"state,omitempty"` + // data is workflow-specific stored data. It is usually a binary + // proto-encoded data structure. It can vary throughout the + // execution of the workflow. It will not change after the workflow + // is Done. + Data []byte `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` + // error is set if the job finished with an error. This field only + // makes sense if 'state' is Done. + Error string `protobuf:"bytes,6,opt,name=error,proto3" json:"error,omitempty"` + // start_time is set when the workflow manager starts a workflow for + // the first time. This field only makes sense if 'state' is Running + // or Done. + StartTime int64 `protobuf:"varint,7,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // end_time is set when the workflow is finished. + // This field only makes sense if 'state' is Done. + EndTime int64 `protobuf:"varint,8,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // create_time is set when the workflow is created. + CreateTime int64 `protobuf:"varint,9,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Workflow) Reset() { *m = Workflow{} } +func (m *Workflow) String() string { return proto.CompactTextString(m) } +func (*Workflow) ProtoMessage() {} +func (*Workflow) Descriptor() ([]byte, []int) { + return fileDescriptor_892c7f566756b0be, []int{0} +} + +func (m *Workflow) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Workflow.Unmarshal(m, b) +} +func (m *Workflow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Workflow.Marshal(b, m, deterministic) +} +func (m *Workflow) XXX_Merge(src proto.Message) { + xxx_messageInfo_Workflow.Merge(m, src) +} +func (m *Workflow) XXX_Size() int { + return xxx_messageInfo_Workflow.Size(m) +} +func (m *Workflow) XXX_DiscardUnknown() { + xxx_messageInfo_Workflow.DiscardUnknown(m) +} + +var xxx_messageInfo_Workflow proto.InternalMessageInfo + +func (m *Workflow) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +func (m *Workflow) GetFactoryName() string { + if m != nil { + return m.FactoryName + } + return "" +} + +func (m *Workflow) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Workflow) GetState() WorkflowState { + if m != nil { + return m.State + } + return WorkflowState_NotStarted +} + +func (m *Workflow) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Workflow) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *Workflow) GetStartTime() int64 { + if m != nil { + return m.StartTime + } + return 0 +} + +func (m *Workflow) GetEndTime() int64 { + if m != nil { + return m.EndTime + } + return 0 +} + +func (m *Workflow) GetCreateTime() int64 { + if m != nil { + return m.CreateTime + } + return 0 +} + +type WorkflowCheckpoint struct { + // code_version is used to detect incompabilities between the version of the + // running workflow and the one which wrote the checkpoint. If they don't + // match, the workflow must not continue. The author of workflow must update + // this variable in their implementation when incompabilities are introduced. + CodeVersion int32 `protobuf:"varint,1,opt,name=code_version,json=codeVersion,proto3" json:"code_version,omitempty"` + // Task is the data structure that stores the execution status and the + // attributes of a task. + Tasks map[string]*Task `protobuf:"bytes,2,rep,name=tasks,proto3" json:"tasks,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // settings includes workflow specific data, e.g. the resharding workflow + // would store the source shards and destination shards. + Settings map[string]string `protobuf:"bytes,3,rep,name=settings,proto3" json:"settings,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowCheckpoint) Reset() { *m = WorkflowCheckpoint{} } +func (m *WorkflowCheckpoint) String() string { return proto.CompactTextString(m) } +func (*WorkflowCheckpoint) ProtoMessage() {} +func (*WorkflowCheckpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_892c7f566756b0be, []int{1} +} + +func (m *WorkflowCheckpoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WorkflowCheckpoint.Unmarshal(m, b) +} +func (m *WorkflowCheckpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WorkflowCheckpoint.Marshal(b, m, deterministic) +} +func (m *WorkflowCheckpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowCheckpoint.Merge(m, src) +} +func (m *WorkflowCheckpoint) XXX_Size() int { + return xxx_messageInfo_WorkflowCheckpoint.Size(m) +} +func (m *WorkflowCheckpoint) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowCheckpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowCheckpoint proto.InternalMessageInfo + +func (m *WorkflowCheckpoint) GetCodeVersion() int32 { + if m != nil { + return m.CodeVersion + } + return 0 +} + +func (m *WorkflowCheckpoint) GetTasks() map[string]*Task { + if m != nil { + return m.Tasks + } + return nil +} + +func (m *WorkflowCheckpoint) GetSettings() map[string]string { + if m != nil { + return m.Settings + } + return nil +} + +type Task struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + State TaskState `protobuf:"varint,2,opt,name=state,proto3,enum=workflow.TaskState" json:"state,omitempty"` + // attributes includes the parameters the task needs. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Task) Reset() { *m = Task{} } +func (m *Task) String() string { return proto.CompactTextString(m) } +func (*Task) ProtoMessage() {} +func (*Task) Descriptor() ([]byte, []int) { + return fileDescriptor_892c7f566756b0be, []int{2} +} + +func (m *Task) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Task.Unmarshal(m, b) +} +func (m *Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Task.Marshal(b, m, deterministic) +} +func (m *Task) XXX_Merge(src proto.Message) { + xxx_messageInfo_Task.Merge(m, src) +} +func (m *Task) XXX_Size() int { + return xxx_messageInfo_Task.Size(m) +} +func (m *Task) XXX_DiscardUnknown() { + xxx_messageInfo_Task.DiscardUnknown(m) +} + +var xxx_messageInfo_Task proto.InternalMessageInfo + +func (m *Task) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Task) GetState() TaskState { + if m != nil { + return m.State + } + return TaskState_TaskNotStarted +} + +func (m *Task) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Task) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func init() { + proto.RegisterEnum("workflow.WorkflowState", WorkflowState_name, WorkflowState_value) + proto.RegisterEnum("workflow.TaskState", TaskState_name, TaskState_value) + proto.RegisterType((*Workflow)(nil), "workflow.Workflow") + proto.RegisterType((*WorkflowCheckpoint)(nil), "workflow.WorkflowCheckpoint") + proto.RegisterMapType((map[string]string)(nil), "workflow.WorkflowCheckpoint.SettingsEntry") + proto.RegisterMapType((map[string]*Task)(nil), "workflow.WorkflowCheckpoint.TasksEntry") + proto.RegisterType((*Task)(nil), "workflow.Task") + proto.RegisterMapType((map[string]string)(nil), "workflow.Task.AttributesEntry") +} + +func init() { proto.RegisterFile("workflow.proto", fileDescriptor_892c7f566756b0be) } + +var fileDescriptor_892c7f566756b0be = []byte{ + // 517 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x6f, 0x8b, 0xd3, 0x4e, + 0x10, 0xfe, 0x25, 0x6d, 0xae, 0xe9, 0xa4, 0x97, 0x2b, 0xf3, 0x3b, 0x30, 0x16, 0xd4, 0x5a, 0x94, + 0xab, 0x05, 0x5b, 0xa8, 0x20, 0xa2, 0xdc, 0x81, 0x7f, 0xf1, 0xd5, 0xbd, 0x48, 0x0f, 0x05, 0xdf, + 0x94, 0xbd, 0x66, 0xaf, 0x2e, 0xbd, 0xee, 0x1e, 0x9b, 0x69, 0x8f, 0x7e, 0x04, 0x3f, 0x98, 0x5f, + 0xc1, 0xcf, 0x23, 0xbb, 0xdb, 0xa4, 0x8d, 0x8a, 0xe0, 0xbb, 0x99, 0x79, 0xe6, 0x79, 0x26, 0x3b, + 0xf3, 0x04, 0xe2, 0x5b, 0xa5, 0x17, 0x57, 0xd7, 0xea, 0x76, 0x78, 0xa3, 0x15, 0x29, 0x0c, 0x8b, + 0xbc, 0xf7, 0xcd, 0x87, 0xf0, 0xf3, 0x36, 0x41, 0x84, 0xfa, 0x6a, 0x25, 0xb2, 0xc4, 0xeb, 0x7a, + 0xfd, 0x66, 0x6a, 0x63, 0x7c, 0x08, 0xad, 0x2b, 0x36, 0x23, 0xa5, 0x37, 0x53, 0xc9, 0x96, 0x3c, + 0xf1, 0x2d, 0x16, 0x6d, 0x6b, 0xe7, 0x6c, 0xc9, 0x0d, 0xcd, 0x42, 0x35, 0x47, 0x33, 0x31, 0x3e, + 0x85, 0x20, 0x27, 0x46, 0x3c, 0xa9, 0x77, 0xbd, 0x7e, 0x3c, 0xbe, 0x33, 0x2c, 0xbf, 0xa0, 0x98, + 0x36, 0x31, 0x70, 0xea, 0xba, 0x8c, 0x44, 0xc6, 0x88, 0x25, 0x41, 0xd7, 0xeb, 0xb7, 0x52, 0x1b, + 0xe3, 0x31, 0x04, 0x5c, 0x6b, 0xa5, 0x93, 0x03, 0xab, 0xeb, 0x12, 0xbc, 0x07, 0x90, 0x13, 0xd3, + 0x34, 0x25, 0xb1, 0xe4, 0x49, 0xa3, 0xeb, 0xf5, 0x6b, 0x69, 0xd3, 0x56, 0x2e, 0xc4, 0x92, 0xe3, + 0x5d, 0x08, 0xb9, 0xcc, 0x1c, 0x18, 0x5a, 0xb0, 0xc1, 0x65, 0x66, 0xa1, 0x07, 0x10, 0xcd, 0x34, + 0x67, 0xc4, 0x1d, 0xda, 0xb4, 0x28, 0xb8, 0x92, 0x69, 0xe8, 0x7d, 0xf7, 0x01, 0x8b, 0xaf, 0x7b, + 0xfb, 0x95, 0xcf, 0x16, 0x37, 0x4a, 0x48, 0x32, 0x1b, 0x98, 0xa9, 0x8c, 0x4f, 0xd7, 0x5c, 0xe7, + 0x42, 0x49, 0xbb, 0x9d, 0x20, 0x8d, 0x4c, 0xed, 0x93, 0x2b, 0xe1, 0x29, 0x04, 0xc4, 0xf2, 0x45, + 0x9e, 0xf8, 0xdd, 0x5a, 0x3f, 0x1a, 0x9f, 0xfc, 0xfe, 0xda, 0x9d, 0xde, 0xf0, 0xc2, 0x74, 0xbe, + 0x97, 0xa4, 0x37, 0xa9, 0x63, 0xe1, 0x07, 0x08, 0x73, 0x4e, 0x24, 0xe4, 0x3c, 0x4f, 0x6a, 0x56, + 0x61, 0xf0, 0x57, 0x85, 0xc9, 0xb6, 0xd9, 0x89, 0x94, 0xdc, 0xce, 0x47, 0x80, 0x9d, 0x38, 0xb6, + 0xa1, 0xb6, 0xe0, 0x9b, 0xed, 0x31, 0x4d, 0x88, 0x8f, 0x20, 0x58, 0xb3, 0xeb, 0x95, 0x3b, 0x62, + 0x34, 0x8e, 0x77, 0x43, 0x0c, 0x2d, 0x75, 0xe0, 0x4b, 0xff, 0x85, 0xd7, 0x79, 0x05, 0x87, 0x95, + 0x21, 0x7f, 0x10, 0x3b, 0xde, 0x17, 0x6b, 0xee, 0x91, 0x7b, 0x3f, 0x3c, 0xa8, 0x1b, 0x41, 0x8c, + 0xc1, 0x2f, 0xdd, 0xe4, 0x8b, 0x0c, 0x9f, 0x14, 0xa6, 0xf0, 0xad, 0x29, 0xfe, 0xaf, 0xce, 0xaf, + 0x18, 0xe2, 0x0c, 0x80, 0x11, 0x69, 0x71, 0xb9, 0x22, 0x5e, 0x2c, 0xe5, 0x7e, 0xb5, 0x7f, 0xf8, + 0xba, 0x6c, 0x70, 0x8b, 0xd8, 0x63, 0xec, 0xcc, 0x53, 0xdf, 0x33, 0x4f, 0xe7, 0x14, 0x8e, 0x7e, + 0x21, 0xfd, 0xcb, 0xc3, 0x06, 0xcf, 0xe1, 0xb0, 0xe2, 0x5e, 0x8c, 0x01, 0xce, 0x15, 0x4d, 0x8c, + 0xfb, 0x78, 0xd6, 0xfe, 0x0f, 0x23, 0x68, 0xa4, 0x2b, 0x29, 0x85, 0x9c, 0xb7, 0x3d, 0x0c, 0xa1, + 0xfe, 0x4e, 0x49, 0xde, 0xf6, 0x07, 0x67, 0xd0, 0x2c, 0x1f, 0x88, 0x08, 0xb1, 0x49, 0x2a, 0xbc, + 0x23, 0x88, 0xec, 0x05, 0x4a, 0x6e, 0x0b, 0x42, 0x53, 0x70, 0xfc, 0x37, 0x27, 0x5f, 0x1e, 0xaf, + 0x05, 0xf1, 0x3c, 0x1f, 0x0a, 0x35, 0x72, 0xd1, 0x68, 0xae, 0x46, 0x6b, 0x1a, 0xd9, 0xdf, 0x79, + 0x54, 0xac, 0xe5, 0xf2, 0xc0, 0xe6, 0xcf, 0x7e, 0x06, 0x00, 0x00, 0xff, 0xff, 0x75, 0x1d, 0xcd, + 0x85, 0xf0, 0x03, 0x00, 0x00, +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/analyzer.go b/internal/stackql-parser-fork/go/vt/sqlparser/analyzer.go new file mode 100644 index 00000000..eb123fa0 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/analyzer.go @@ -0,0 +1,387 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +// analyzer.go contains utility analysis functions. + +import ( + "fmt" + "strings" + "unicode" + + "github.com/stackql/stackql-parser/go/sqltypes" + "github.com/stackql/stackql-parser/go/vt/vterrors" + + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" +) + +// StatementType encodes the type of a SQL statement +type StatementType int + +// These constants are used to identify the SQL statement type. +// Changing this list will require reviewing all calls to Preview. +const ( + StmtSelect StatementType = iota + StmtStream + StmtInsert + StmtReplace + StmtUpdate + StmtDelete + StmtDDL + StmtBegin + StmtCommit + StmtRollback + StmtSet + StmtShow + StmtUse + StmtOther + StmtUnknown + StmtComment + StmtPriv + StmtExplain +) + +// ASTToStatementType returns a StatementType from an AST stmt +func ASTToStatementType(stmt Statement) StatementType { + switch stmt.(type) { + case *Select, *Union: + return StmtSelect + case *Insert: + return StmtInsert + case *Update: + return StmtUpdate + case *Delete: + return StmtDelete + case *Set: + return StmtSet + case *Show: + return StmtShow + case *DDL, *DBDDL: + return StmtDDL + case *Use: + return StmtUse + case *OtherRead, *OtherAdmin: + return StmtOther + case *Explain: + return StmtExplain + case *Begin: + return StmtBegin + case *Commit: + return StmtCommit + case *Rollback: + return StmtRollback + default: + return StmtUnknown + } +} + +// CanNormalize takes Statement and returns if the statement can be normalized. +func CanNormalize(stmt Statement) bool { + switch stmt.(type) { + case *Select, *Union, *Insert, *Update, *Delete, *Set: + return true + } + return false +} + +// IsSetStatement takes Statement and returns if the statement is set statement. +func IsSetStatement(stmt Statement) bool { + switch stmt.(type) { + case *Set: + return true + } + return false +} + +// Preview analyzes the beginning of the query using a simpler and faster +// textual comparison to identify the statement type. +func Preview(sql string) StatementType { + trimmed := StripLeadingComments(sql) + + if strings.Index(trimmed, "/*!") == 0 { + return StmtComment + } + + isNotLetter := func(r rune) bool { return !unicode.IsLetter(r) } + firstWord := strings.TrimLeftFunc(trimmed, isNotLetter) + + if end := strings.IndexFunc(firstWord, unicode.IsSpace); end != -1 { + firstWord = firstWord[:end] + } + // Comparison is done in order of priority. + loweredFirstWord := strings.ToLower(firstWord) + switch loweredFirstWord { + case "select": + return StmtSelect + case "stream": + return StmtStream + case "insert": + return StmtInsert + case "replace": + return StmtReplace + case "update": + return StmtUpdate + case "delete": + return StmtDelete + } + // For the following statements it is not sufficient to rely + // on loweredFirstWord. This is because they are not statements + // in the grammar and we are relying on Preview to parse them. + // For instance, we don't want: "BEGIN JUNK" to be parsed + // as StmtBegin. + trimmedNoComments, _ := SplitMarginComments(trimmed) + switch strings.ToLower(trimmedNoComments) { + case "begin", "start transaction": + return StmtBegin + case "commit": + return StmtCommit + case "rollback": + return StmtRollback + } + switch loweredFirstWord { + case "create", "alter", "rename", "drop", "truncate", "flush": + return StmtDDL + case "set": + return StmtSet + case "show": + return StmtShow + case "use": + return StmtUse + case "describe", "desc", "explain": + return StmtExplain + case "analyze", "repair", "optimize": + return StmtOther + case "grant", "revoke": + return StmtPriv + } + return StmtUnknown +} + +func (s StatementType) String() string { + switch s { + case StmtSelect: + return "SELECT" + case StmtStream: + return "STREAM" + case StmtInsert: + return "INSERT" + case StmtReplace: + return "REPLACE" + case StmtUpdate: + return "UPDATE" + case StmtDelete: + return "DELETE" + case StmtDDL: + return "DDL" + case StmtBegin: + return "BEGIN" + case StmtCommit: + return "COMMIT" + case StmtRollback: + return "ROLLBACK" + case StmtSet: + return "SET" + case StmtShow: + return "SHOW" + case StmtUse: + return "USE" + case StmtOther: + return "OTHER" + case StmtPriv: + return "PRIV" + case StmtExplain: + return "EXPLAIN" + default: + return "UNKNOWN" + } +} + +// IsDML returns true if the query is an INSERT, UPDATE or DELETE statement. +func IsDML(sql string) bool { + switch Preview(sql) { + case StmtInsert, StmtReplace, StmtUpdate, StmtDelete: + return true + } + return false +} + +// IsDMLStatement returns true if the query is an INSERT, UPDATE or DELETE statement. +func IsDMLStatement(stmt Statement) bool { + switch stmt.(type) { + case *Insert, *Update, *Delete: + return true + } + + return false +} + +// IsVschemaDDL returns true if the query is an Vschema alter ddl. +func IsVschemaDDL(ddl *DDL) bool { + switch ddl.Action { + case CreateVindexStr, DropVindexStr, AddVschemaTableStr, DropVschemaTableStr, AddColVindexStr, DropColVindexStr, AddSequenceStr, AddAutoIncStr: + return true + } + return false +} + +// SplitAndExpression breaks up the Expr into AND-separated conditions +// and appends them to filters. Outer parenthesis are removed. Precedence +// should be taken into account if expressions are recombined. +func SplitAndExpression(filters []Expr, node Expr) []Expr { + if node == nil { + return filters + } + switch node := node.(type) { + case *AndExpr: + filters = SplitAndExpression(filters, node.Left) + return SplitAndExpression(filters, node.Right) + } + return append(filters, node) +} + +// TableFromStatement returns the qualified table name for the query. +// This works only for select statements. +func TableFromStatement(sql string) (TableName, error) { + stmt, err := Parse(sql) + if err != nil { + return TableName{}, err + } + sel, ok := stmt.(*Select) + if !ok { + return TableName{}, fmt.Errorf("unrecognized statement: %s", sql) + } + if len(sel.From) != 1 { + return TableName{}, fmt.Errorf("table expression is complex") + } + aliased, ok := sel.From[0].(*AliasedTableExpr) + if !ok { + return TableName{}, fmt.Errorf("table expression is complex") + } + tableName, ok := aliased.Expr.(TableName) + if !ok { + return TableName{}, fmt.Errorf("table expression is complex") + } + return tableName, nil +} + +// GetTableName returns the table name from the SimpleTableExpr +// only if it's a simple expression. Otherwise, it returns "". +func GetTableName(node SimpleTableExpr) TableIdent { + if n, ok := node.(TableName); ok && n.Qualifier.IsEmpty() { + return n.Name + } + // sub-select or '.' expression + return NewTableIdent("") +} + +// IsColName returns true if the Expr is a *ColName. +func IsColName(node Expr) bool { + _, ok := node.(*ColName) + return ok +} + +// IsValue returns true if the Expr is a string, integral or value arg. +// NULL is not considered to be a value. +func IsValue(node Expr) bool { + switch v := node.(type) { + case *SQLVal: + switch v.Type { + case StrVal, HexVal, IntVal, ValArg: + return true + } + } + return false +} + +// IsNull returns true if the Expr is SQL NULL +func IsNull(node Expr) bool { + switch node.(type) { + case *NullVal: + return true + } + return false +} + +// IsSimpleTuple returns true if the Expr is a ValTuple that +// contains simple values or if it's a list arg. +func IsSimpleTuple(node Expr) bool { + switch vals := node.(type) { + case ValTuple: + for _, n := range vals { + if !IsValue(n) { + return false + } + } + return true + case ListArg: + return true + } + // It's a subquery + return false +} + +// NewPlanValue builds a sqltypes.PlanValue from an Expr. +func NewPlanValue(node Expr) (sqltypes.PlanValue, error) { + switch node := node.(type) { + case *SQLVal: + switch node.Type { + case ValArg: + return sqltypes.PlanValue{Key: string(node.Val[1:])}, nil + case IntVal: + n, err := sqltypes.NewIntegral(string(node.Val)) + if err != nil { + return sqltypes.PlanValue{}, err + } + return sqltypes.PlanValue{Value: n}, nil + case FloatVal: + return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.Float64, node.Val)}, nil + case StrVal: + return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, node.Val)}, nil + case HexVal: + v, err := node.HexDecode() + if err != nil { + return sqltypes.PlanValue{}, err + } + return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, v)}, nil + } + case ListArg: + return sqltypes.PlanValue{ListKey: string(node[2:])}, nil + case ValTuple: + pv := sqltypes.PlanValue{ + Values: make([]sqltypes.PlanValue, 0, len(node)), + } + for _, val := range node { + innerpv, err := NewPlanValue(val) + if err != nil { + return sqltypes.PlanValue{}, err + } + if innerpv.ListKey != "" || innerpv.Values != nil { + return sqltypes.PlanValue{}, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: nested lists") + } + pv.Values = append(pv.Values, innerpv) + } + return pv, nil + case *NullVal: + return sqltypes.PlanValue{}, nil + case *UnaryExpr: + switch node.Operator { + case UBinaryStr, Utf8mb4Str, Utf8Str, Latin1Str: // for some charset introducers, we can just ignore them + return NewPlanValue(node.Expr) + } + } + return sqltypes.PlanValue{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "expression is too complex '%v'", String(node)) +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/analyzer_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/analyzer_test.go new file mode 100644 index 00000000..3d1ee5ea --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/analyzer_test.go @@ -0,0 +1,501 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "testing" + + "github.com/stackql/stackql-parser/go/test/utils" + + "github.com/stretchr/testify/require" + + "github.com/stackql/stackql-parser/go/sqltypes" + + "github.com/stretchr/testify/assert" +) + +func TestPreview(t *testing.T) { + testcases := []struct { + sql string + want StatementType + }{ + {"select ...", StmtSelect}, + {" select ...", StmtSelect}, + {"(select ...", StmtSelect}, + {"( select ...", StmtSelect}, + {"insert ...", StmtInsert}, + {"replace ....", StmtReplace}, + {" update ...", StmtUpdate}, + {"Update", StmtUpdate}, + {"UPDATE ...", StmtUpdate}, + {"\n\t delete ...", StmtDelete}, + {"", StmtUnknown}, + {" ", StmtUnknown}, + {"begin", StmtBegin}, + {" begin", StmtBegin}, + {" begin ", StmtBegin}, + {"\n\t begin ", StmtBegin}, + {"... begin ", StmtUnknown}, + {"begin ...", StmtUnknown}, + {"begin /* ... */", StmtBegin}, + {"begin /* ... *//*test*/", StmtBegin}, + {"begin;", StmtBegin}, + {"begin ;", StmtBegin}, + {"begin; /*...*/", StmtBegin}, + {"start transaction", StmtBegin}, + {"commit", StmtCommit}, + {"commit /*...*/", StmtCommit}, + {"rollback", StmtRollback}, + {"rollback /*...*/", StmtRollback}, + {"create", StmtDDL}, + {"alter", StmtDDL}, + {"rename", StmtDDL}, + {"drop", StmtDDL}, + {"set", StmtSet}, + {"show", StmtShow}, + {"use", StmtUse}, + {"analyze", StmtOther}, + {"describe", StmtExplain}, + {"desc", StmtExplain}, + {"explain", StmtExplain}, + {"repair", StmtOther}, + {"optimize", StmtOther}, + {"grant", StmtPriv}, + {"revoke", StmtPriv}, + {"truncate", StmtDDL}, + {"unknown", StmtUnknown}, + + {"/* leading comment */ select ...", StmtSelect}, + {"/* leading comment */ (select ...", StmtSelect}, + {"/* leading comment */ /* leading comment 2 */ select ...", StmtSelect}, + {"/*! MySQL-specific comment */", StmtComment}, + {"/*!50708 MySQL-version comment */", StmtComment}, + {"-- leading single line comment \n select ...", StmtSelect}, + {"-- leading single line comment \n -- leading single line comment 2\n select ...", StmtSelect}, + + {"/* leading comment no end select ...", StmtUnknown}, + {"-- leading single line comment no end select ...", StmtUnknown}, + } + for _, tcase := range testcases { + if got := Preview(tcase.sql); got != tcase.want { + t.Errorf("Preview(%s): %v, want %v", tcase.sql, got, tcase.want) + } + } +} + +func TestIsDML(t *testing.T) { + testcases := []struct { + sql string + want bool + }{ + {" update ...", true}, + {"Update", true}, + {"UPDATE ...", true}, + {"\n\t delete ...", true}, + {"insert ...", true}, + {"replace ...", true}, + {"select ...", false}, + {" select ...", false}, + {"", false}, + {" ", false}, + } + for _, tcase := range testcases { + if got := IsDML(tcase.sql); got != tcase.want { + t.Errorf("IsDML(%s): %v, want %v", tcase.sql, got, tcase.want) + } + } +} + +func TestSplitAndExpression(t *testing.T) { + testcases := []struct { + sql string + out []string + }{{ + sql: "select * from t", + out: nil, + }, { + sql: "select * from t where a = 1", + out: []string{"a = 1"}, + }, { + sql: "select * from t where a = 1 and b = 1", + out: []string{"a = 1", "b = 1"}, + }, { + sql: "select * from t where a = 1 and (b = 1 and c = 1)", + out: []string{"a = 1", "b = 1", "c = 1"}, + }, { + sql: "select * from t where a = 1 and (b = 1 or c = 1)", + out: []string{"a = 1", "b = 1 or c = 1"}, + }, { + sql: "select * from t where a = 1 and b = 1 or c = 1", + out: []string{"a = 1 and b = 1 or c = 1"}, + }, { + sql: "select * from t where a = 1 and b = 1 + (c = 1)", + out: []string{"a = 1", "b = 1 + (c = 1)"}, + }, { + sql: "select * from t where (a = 1 and ((b = 1 and c = 1)))", + out: []string{"a = 1", "b = 1", "c = 1"}, + }} + for _, tcase := range testcases { + stmt, err := Parse(tcase.sql) + assert.NoError(t, err) + var expr Expr + if where := stmt.(*Select).Where; where != nil { + expr = where.Expr + } + splits := SplitAndExpression(nil, expr) + var got []string + for _, split := range splits { + got = append(got, String(split)) + } + assert.Equal(t, tcase.out, got) + } +} + +func TestTableFromStatement(t *testing.T) { + testcases := []struct { + in, out string + }{{ + in: "select * from t", + out: "t", + }, { + in: "select * from t.t", + out: "t.t", + }, { + in: "select * from t1, t2", + out: "table expression is complex", + }, { + in: "select * from (t)", + out: "table expression is complex", + }, { + in: "select * from t1 join t2", + out: "table expression is complex", + }, { + in: "select * from (select * from t) as tt", + out: "table expression is complex", + }, { + in: "update t set a=1", + out: "unrecognized statement: update t set a=1", + }, { + in: "bad query", + out: "syntax error at position 4 near 'bad'", + }} + + for _, tc := range testcases { + name, err := TableFromStatement(tc.in) + var got string + if err != nil { + got = err.Error() + } else { + got = String(name) + } + if got != tc.out { + t.Errorf("TableFromStatement('%s'): %s, want %s", tc.in, got, tc.out) + } + } +} + +func TestGetTableName(t *testing.T) { + testcases := []struct { + in, out string + }{{ + in: "select * from t", + out: "t", + }, { + in: "select * from t.t", + out: "", + }, { + in: "select * from (select * from t) as tt", + out: "", + }} + + for _, tc := range testcases { + tree, err := Parse(tc.in) + if err != nil { + t.Error(err) + continue + } + out := GetTableName(tree.(*Select).From[0].(*AliasedTableExpr).Expr) + if out.String() != tc.out { + t.Errorf("GetTableName('%s'): %s, want %s", tc.in, out, tc.out) + } + } +} + +func TestIsColName(t *testing.T) { + testcases := []struct { + in Expr + out bool + }{{ + in: &ColName{}, + out: true, + }, { + in: newHexVal(""), + }} + for _, tc := range testcases { + out := IsColName(tc.in) + if out != tc.out { + t.Errorf("IsColName(%T): %v, want %v", tc.in, out, tc.out) + } + } +} + +func TestIsValue(t *testing.T) { + testcases := []struct { + in Expr + out bool + }{{ + in: newStrVal("aa"), + out: true, + }, { + in: newHexVal("3131"), + out: true, + }, { + in: newIntVal("1"), + out: true, + }, { + in: newValArg(":a"), + out: true, + }, { + in: &NullVal{}, + out: false, + }} + for _, tc := range testcases { + out := IsValue(tc.in) + if out != tc.out { + t.Errorf("IsValue(%T): %v, want %v", tc.in, out, tc.out) + } + if tc.out { + // NewPlanValue should not fail for valid values. + if _, err := NewPlanValue(tc.in); err != nil { + t.Error(err) + } + } + } +} + +func TestIsNull(t *testing.T) { + testcases := []struct { + in Expr + out bool + }{{ + in: &NullVal{}, + out: true, + }, { + in: newStrVal(""), + }} + for _, tc := range testcases { + out := IsNull(tc.in) + if out != tc.out { + t.Errorf("IsNull(%T): %v, want %v", tc.in, out, tc.out) + } + } +} + +func TestIsSimpleTuple(t *testing.T) { + testcases := []struct { + in Expr + out bool + }{{ + in: ValTuple{newStrVal("aa")}, + out: true, + }, { + in: ValTuple{&ColName{}}, + }, { + in: ListArg("::a"), + out: true, + }, { + in: &ColName{}, + }} + for _, tc := range testcases { + out := IsSimpleTuple(tc.in) + if out != tc.out { + t.Errorf("IsSimpleTuple(%T): %v, want %v", tc.in, out, tc.out) + } + if tc.out { + // NewPlanValue should not fail for valid tuples. + if _, err := NewPlanValue(tc.in); err != nil { + t.Error(err) + } + } + } +} + +func TestNewPlanValue(t *testing.T) { + tcases := []struct { + in Expr + out sqltypes.PlanValue + err string + }{{ + in: &SQLVal{ + Type: ValArg, + Val: []byte(":valarg"), + }, + out: sqltypes.PlanValue{Key: "valarg"}, + }, { + in: &SQLVal{ + Type: IntVal, + Val: []byte("10"), + }, + out: sqltypes.PlanValue{Value: sqltypes.NewInt64(10)}, + }, { + in: &SQLVal{ + Type: IntVal, + Val: []byte("1111111111111111111111111111111111111111"), + }, + err: "value out of range", + }, { + in: &SQLVal{ + Type: StrVal, + Val: []byte("strval"), + }, + out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("strval")}, + }, { + in: &SQLVal{ + Type: BitVal, + Val: []byte("01100001"), + }, + err: "expression is too complex", + }, { + in: &SQLVal{ + Type: HexVal, + Val: []byte("3131"), + }, + out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("11")}, + }, { + in: &SQLVal{ + Type: HexVal, + Val: []byte("313"), + }, + err: "odd length hex string", + }, { + in: ListArg("::list"), + out: sqltypes.PlanValue{ListKey: "list"}, + }, { + in: ValTuple{ + &SQLVal{ + Type: ValArg, + Val: []byte(":valarg"), + }, + &SQLVal{ + Type: StrVal, + Val: []byte("strval"), + }, + }, + out: sqltypes.PlanValue{ + Values: []sqltypes.PlanValue{{ + Key: "valarg", + }, { + Value: sqltypes.NewVarBinary("strval"), + }}, + }, + }, { + in: ValTuple{ + ListArg("::list"), + }, + err: "unsupported: nested lists", + }, { + in: &NullVal{}, + out: sqltypes.PlanValue{}, + }, { + in: &SQLVal{ + Type: FloatVal, + Val: []byte("2.1"), + }, + out: sqltypes.PlanValue{Value: sqltypes.NewFloat64(2.1)}, + }, { + in: &UnaryExpr{ + Operator: Latin1Str, + Expr: &SQLVal{ + Type: StrVal, + Val: []byte("strval"), + }, + }, + out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("strval")}, + }, { + in: &UnaryExpr{ + Operator: UBinaryStr, + Expr: &SQLVal{ + Type: StrVal, + Val: []byte("strval"), + }, + }, + out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("strval")}, + }, { + in: &UnaryExpr{ + Operator: Utf8mb4Str, + Expr: &SQLVal{ + Type: StrVal, + Val: []byte("strval"), + }, + }, + out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("strval")}, + }, { + in: &UnaryExpr{ + Operator: Utf8Str, + Expr: &SQLVal{ + Type: StrVal, + Val: []byte("strval"), + }, + }, + out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("strval")}, + }, { + in: &UnaryExpr{ + Operator: MinusStr, + Expr: &SQLVal{ + Type: FloatVal, + Val: []byte("2.1"), + }, + }, + err: "expression is too complex", + }} + for _, tc := range tcases { + t.Run(String(tc.in), func(t *testing.T) { + got, err := NewPlanValue(tc.in) + if tc.err != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.err) + return + } + + require.NoError(t, err) + mustMatch(t, tc.out, got, "wut!") + }) + } +} + +var mustMatch = utils.MustMatchFn( + []interface{}{ // types with unexported fields + sqltypes.Value{}, + }, + []string{".Conn"}, // ignored fields +) + +func newStrVal(in string) *SQLVal { + return NewStrVal([]byte(in)) +} + +func newIntVal(in string) *SQLVal { + return NewIntVal([]byte(in)) +} + +func newHexVal(in string) *SQLVal { + return NewHexVal([]byte(in)) +} + +func newValArg(in string) *SQLVal { + return NewValArg([]byte(in)) +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/ast.go b/internal/stackql-parser-fork/go/vt/sqlparser/ast.go new file mode 100644 index 00000000..24a4a764 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/ast.go @@ -0,0 +1,2240 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "fmt" + "strings" + + "github.com/stackql/stackql-parser/go/sqltypes" +) + +/* +This is the Vitess AST. This file should only contain pure struct declarations, +or methods used to mark a struct as implementing an interface. All other methods +related to these structs live in ast_funcs.go +*/ + +// SQLNode defines the interface for all nodes +// generated by the parser. +type SQLNode interface { + Format(buf *TrackedBuffer) + Accept(SQLAstVisitor) error +} + +// Statements +type ( + // Statement represents a statement. + Statement interface { + iStatement() + SQLNode + } + + // SelectStatement any SELECT statement. + SelectStatement interface { + iSelectStatement() + iStatement() + iInsertRows() + AddOrder(*Order) + SetLimit(*Limit) + SetLock(lock string) + SQLNode + } + + // Select represents a SELECT statement. + Select struct { + With *With + Cache *bool // a reference here so it can be nil + Distinct bool + StraightJoinHint bool + SQLCalcFoundRows bool + Comments Comments + SelectExprs SelectExprs + From TableExprs + Where *Where + GroupBy GroupBy + Having *Where + OrderBy OrderBy + Limit *Limit + Lock string + } + + // With represents a WITH clause (CTE - Common Table Expression) + With struct { + Recursive bool + CTEs []*CommonTableExpr + } + + // CommonTableExpr represents a single CTE definition + CommonTableExpr struct { + Name TableIdent + Columns Columns + Subquery *Subquery + } + + // Exec represents an EXEC statement + Exec struct { + Await bool + Comments Comments + MethodName TableName + ExecVarDefs []ExecVarDef + OptExecPayload *ExecVarDef + } + + // Purge represents a PURGE statement + Purge struct { + Await bool + Comments Comments + IsGlobal bool + Target TableName + } + + // Purge represents a PURGE statement + NativeQuery struct { + Await bool + Comments Comments + QueryString string + } + + Sleep struct { + Duration *SQLVal + } + + // UnionSelect represents union type and select statement after first select statement. + UnionSelect struct { + Type string + Statement SelectStatement + } + + // Union represents a UNION statement. + Union struct { + FirstStatement SelectStatement + UnionSelects []*UnionSelect + OrderBy OrderBy + Limit *Limit + Lock string + } + + // Stream represents a SELECT statement. + Stream struct { + Comments Comments + SelectExpr SelectExpr + Table TableName + } + + // Insert represents an INSERT or REPLACE statement. + // Per the MySQL docs, http://dev.mysql.com/doc/refman/5.7/en/replace.html + // Replace is the counterpart to `INSERT IGNORE`, and works exactly like a + // normal INSERT except if the row exists. In that case it first deletes + // the row and re-inserts with new values. For that reason we keep it as an Insert struct. + // Replaces are currently disallowed in sharded schemas because + // of the implications the deletion part may have on vindexes. + // If you add fields here, consider adding them to calls to validateUnshardedRoute. + Insert struct { + Action string + Comments Comments + Ignore string + Table TableName + Partitions Partitions + Columns Columns + Rows InsertRows + OnDup OnDup + SelectExprs SelectExprs + } + + // Update represents an UPDATE statement. + // If you add fields here, consider adding them to calls to validateUnshardedRoute. + Update struct { + Action string + Comments Comments + Ignore string + TableExprs TableExprs + Exprs UpdateExprs + From TableExprs + Where *Where + OrderBy OrderBy + Limit *Limit + SelectExprs SelectExprs + } + + // Delete represents a DELETE statement. + // If you add fields here, consider adding them to calls to validateUnshardedRoute. + Delete struct { + Comments Comments + Targets TableNames + TableExprs TableExprs + Partitions Partitions + Where *Where + OrderBy OrderBy + Limit *Limit + SelectExprs SelectExprs + } + + // Set represents a SET statement. + Set struct { + Comments Comments + Exprs SetExprs + } + + // SetTransaction represents a SET TRANSACTION statement. + SetTransaction struct { + SQLNode + Comments Comments + Scope string + Characteristics []Characteristic + } + + // Characteristic is a transaction related change + Characteristic interface { + SQLNode + iChar() + } + + // IsolationLevel is self-explanatory in this context + IsolationLevel struct { + Level string + } + + // AccessMode is ReadOnly/ReadWrite + AccessMode struct { + Mode string + } + + // DBDDL represents a CREATE, DROP, or ALTER database statement. + DBDDL struct { + Action string + DBName string + IfExists bool + IfNotExists bool + Collate string + Charset string + } + + RefreshMaterializedView struct { + Concurrently bool + ViewName TableName + WithData bool + ImplicitSelect SelectStatement // This is mutated during analysis. + } + + // DDL represents a CREATE, ALTER, DROP, RENAME, TRUNCATE or ANALYZE statement. + DDL struct { + Action string + + // FromTables is set if Action is RenameStr or DropStr. + FromTables TableNames + + // ToTables is set if Action is RenameStr. + ToTables TableNames + + // Table is set if Action is other than RenameStr or DropStr. + Table TableName + + // The following fields are set if a DDL was fully analyzed. + IfExists bool + IfNotExists bool + TableSpec *TableSpec + OptLike *OptLike + OrReplace bool + PartitionSpec *PartitionSpec + + // VindexSpec is set for CreateVindexStr, DropVindexStr, AddColVindexStr, DropColVindexStr. + VindexSpec *VindexSpec + + // VindexCols is set for AddColVindexStr. + VindexCols []ColIdent + + // AutoIncSpec is set for AddAutoIncStr. + AutoIncSpec *AutoIncSpec + + // SelectStatement is set for Create View. + SelectStatement SelectStatement + + // Modifier is optional for Create View and Create Table. + Modifier string + } + + // ParenSelect is a parenthesized SELECT statement. + ParenSelect struct { + Select SelectStatement + } + + // Show represents a show statement. + Show struct { + Extended string + Type string + OnTable TableName + Table TableName + ShowTablesOpt *ShowTablesOpt + Scope string + ShowCollationFilterOpt Expr + Columns Columns + Comments Comments + } + + // Use represents a use statement. + Use struct { + DBName TableIdent + } + + // Begin represents a Begin statement. + Begin struct{} + + // Commit represents a Commit statement. + Commit struct{} + + // Rollback represents a Rollback statement. + Rollback struct{} + + // SRollback represents a rollback to savepoint statement. + SRollback struct { + Name ColIdent + } + + // Savepoint represents a savepoint statement. + Savepoint struct { + Name ColIdent + } + + // Release represents a release savepoint statement. + Release struct { + Name ColIdent + } + + // Explain represents an EXPLAIN statement + Explain struct { + Type string + Statement Statement + } + + // OtherRead represents a DESCRIBE, or EXPLAIN statement. + // It should be used only as an indicator. It does not contain + // the full AST for the statement. + OtherRead struct{} + + DescribeTable struct { + Full string + Extended string + Table TableName + } + + Auth struct { + SessionAuth BoolVal + Provider string + Type string + KeyFilePath string + KeyEnvVar string + } + + Registry struct { + ActionType string + ProviderId string + ProviderVersion string + } + + AuthRevoke struct { + SessionAuth BoolVal + Provider string + } + + // OtherAdmin represents a misc statement that relies on ADMIN privileges, + // such as REPAIR, OPTIMIZE, or TRUNCATE statement. + // It should be used only as an indicator. It does not contain + // the full AST for the statement. + OtherAdmin struct{} +) + +func (*Union) iStatement() {} +func (*Select) iStatement() {} +func (*Stream) iStatement() {} +func (*Insert) iStatement() {} +func (*Update) iStatement() {} +func (*Delete) iStatement() {} +func (*Set) iStatement() {} +func (*SetTransaction) iStatement() {} +func (*DBDDL) iStatement() {} +func (*DDL) iStatement() {} +func (*Show) iStatement() {} +func (*Use) iStatement() {} +func (*Begin) iStatement() {} +func (*Commit) iStatement() {} +func (*Rollback) iStatement() {} +func (*SRollback) iStatement() {} +func (*Savepoint) iStatement() {} +func (*Release) iStatement() {} +func (*Explain) iStatement() {} +func (*OtherRead) iStatement() {} +func (*Auth) iStatement() {} +func (*Registry) iStatement() {} +func (*AuthRevoke) iStatement() {} +func (*Exec) iStatement() {} +func (*Purge) iStatement() {} +func (*NativeQuery) iStatement() {} +func (*DescribeTable) iStatement() {} +func (*OtherAdmin) iStatement() {} +func (*Select) iSelectStatement() {} +func (*Union) iSelectStatement() {} +func (*ParenSelect) iSelectStatement() {} +func (*RefreshMaterializedView) iStatement() {} +func (*Sleep) iStatement() {} + +// ParenSelect can actually not be a top level statement, +// but we have to allow it because it's a requirement +// of SelectStatement. +func (*ParenSelect) iStatement() {} + +// InsertRows represents the rows for an INSERT statement. +type InsertRows interface { + iInsertRows() + SQLNode +} + +func (*Select) iInsertRows() {} +func (*Union) iInsertRows() {} +func (Values) iInsertRows() {} +func (*ParenSelect) iInsertRows() {} + +// OptLike works for create table xxx like xxx +type OptLike struct { + LikeTable TableName +} + +// PartitionSpec describe partition actions (for alter and create) +type PartitionSpec struct { + Action string + Name ColIdent + Definitions []*PartitionDefinition +} + +// PartitionDefinition describes a very minimal partition definition +type PartitionDefinition struct { + Name ColIdent + Limit Expr + Maxvalue bool +} + +// TableSpec describes the structure of a table from a CREATE TABLE statement +type TableSpec struct { + Columns []*ColumnDefinition + Indexes []*IndexDefinition + Constraints []*ConstraintDefinition + Options string +} + +// ColumnDefinition describes a column in a CREATE TABLE statement +type ColumnDefinition struct { + Name ColIdent + // TODO: Should this not be a reference? + Type ColumnType +} + +// ColumnType represents a sql type in a CREATE TABLE statement +// All optional fields are nil if not specified +type ColumnType struct { + // The base type string + Type string + + // Generic field options. + NotNull BoolVal + Autoincrement BoolVal + Default Expr + OnUpdate Expr + Comment *SQLVal + + // Numeric field options + Length *SQLVal + Unsigned BoolVal + Zerofill BoolVal + Scale *SQLVal + + // Text field options + Charset string + Collate string + + // Enum values + EnumValues []string + + // Key specification + KeyOpt ColumnKeyOption +} + +// IndexDefinition describes an index in a CREATE TABLE statement +type IndexDefinition struct { + Info *IndexInfo + Columns []*IndexColumn + Options []*IndexOption +} + +// IndexInfo describes the name and type of an index in a CREATE TABLE statement +type IndexInfo struct { + Type string + Name ColIdent + Primary bool + Spatial bool + Unique bool +} + +// VindexSpec defines a vindex for a CREATE VINDEX or DROP VINDEX statement +type VindexSpec struct { + Name ColIdent + Type ColIdent + Params []VindexParam +} + +// AutoIncSpec defines and autoincrement value for a ADD AUTO_INCREMENT statement +type AutoIncSpec struct { + Column ColIdent + Sequence TableName +} + +// VindexParam defines a key/value parameter for a CREATE VINDEX statement +type VindexParam struct { + Key ColIdent + Val string +} + +// ConstraintDefinition describes a constraint in a CREATE TABLE statement +type ConstraintDefinition struct { + Name string + Details ConstraintInfo +} + +type ( + // ConstraintInfo details a constraint in a CREATE TABLE statement + ConstraintInfo interface { + SQLNode + iConstraintInfo() + } + + // ForeignKeyDefinition describes a foreign key in a CREATE TABLE statement + ForeignKeyDefinition struct { + Source Columns + ReferencedTable TableName + ReferencedColumns Columns + OnDelete ReferenceAction + OnUpdate ReferenceAction + } +) + +// ShowFilter is show tables filter +type ShowFilter struct { + Like string + Filter Expr +} + +// Comments represents a list of comments. +type Comments [][]byte + +// SelectExprs represents SELECT expressions. +type SelectExprs []SelectExpr + +type ( + // SelectExpr represents a SELECT expression. + SelectExpr interface { + iSelectExpr() + SQLNode + } + + // StarExpr defines a '*' or 'table.*' expression. + StarExpr struct { + TableName TableName + } + + // AliasedExpr defines an aliased SELECT expression. + AliasedExpr struct { + Expr Expr + As ColIdent + } + + // Nextval defines the NEXT VALUE expression. + Nextval struct { + Expr Expr + } +) + +func (*StarExpr) iSelectExpr() {} +func (*AliasedExpr) iSelectExpr() {} +func (Nextval) iSelectExpr() {} + +// Columns represents an insert column list. +type Columns []ColIdent + +// Partitions is a type alias for Columns so we can handle printing efficiently +type Partitions Columns + +// TableExprs represents a list of table expressions. +type TableExprs []TableExpr + +type ( + // TableExpr represents a table expression. + TableExpr interface { + iTableExpr() + SQLNode + } + + // AliasedTableExpr represents a table expression + // coupled with an optional alias or index hint. + // If As is empty, no alias was used. + AliasedTableExpr struct { + Expr SimpleTableExpr + Partitions Partitions + As TableIdent + Hints *IndexHints + } + + // JoinTableExpr represents a TableExpr that's a JOIN operation. + JoinTableExpr struct { + LeftExpr TableExpr + Join string + RightExpr TableExpr + Condition JoinCondition + } + + // ParenTableExpr represents a parenthesized list of TableExpr. + ParenTableExpr struct { + Exprs TableExprs + } + + // TableValuedFuncTableExpr represents a call to a table-valued function. + TableValuedFuncTableExpr struct { + FuncExpr Expr + As TableIdent + } +) + +func (*AliasedTableExpr) iTableExpr() {} +func (*ParenTableExpr) iTableExpr() {} +func (*JoinTableExpr) iTableExpr() {} +func (*ExecSubquery) iTableExpr() {} +func (*Union) iTableExpr() {} +func (*TableValuedFuncTableExpr) iTableExpr() {} + +type ( + // SimpleTableExpr represents a simple table expression. + SimpleTableExpr interface { + iSimpleTableExpr() + SQLNode + } + + // TableName represents a table name. + // Qualifier, if specified, represents a database or keyspace. + // TableName is a value struct whose fields are case sensitive. + // This means two TableName vars can be compared for equality + // and a TableName can also be used as key in a map. + TableName struct { + Name, Qualifier, QualifierSecond, QualifierThird TableIdent + } + + // Subquery represents a subquery. + Subquery struct { + Select SelectStatement + } + + ExecSubquery struct { + Exec *Exec + } +) + +func (TableName) iSimpleTableExpr() {} +func (*Subquery) iSimpleTableExpr() {} +func (*ExecSubquery) iSimpleTableExpr() {} + +// TableNames is a list of TableName. +type TableNames []TableName + +type ExecVarDef struct { + ColIdent ColIdent + Val Expr +} + +// JoinCondition represents the join conditions (either a ON or USING clause) +// of a JoinTableExpr. +type JoinCondition struct { + On Expr + Using Columns +} + +// IndexHints represents a list of index hints. +type IndexHints struct { + Type string + Indexes []ColIdent +} + +// Where represents a WHERE or HAVING clause. +type Where struct { + Type string + Expr Expr +} + +// *********** Expressions +type ( + // Expr represents an expression. + Expr interface { + iExpr() + SQLNode + } + + // AndExpr represents an AND expression. + AndExpr struct { + Left, Right Expr + } + + // OrExpr represents an OR expression. + OrExpr struct { + Left, Right Expr + } + + // XorExpr represents an XOR expression. + XorExpr struct { + Left, Right Expr + } + + // NotExpr represents a NOT expression. + NotExpr struct { + Expr Expr + } + + // ComparisonExpr represents a two-value comparison expression. + ComparisonExpr struct { + Operator string + Left, Right Expr + Escape Expr + } + + // RangeCond represents a BETWEEN or a NOT BETWEEN expression. + RangeCond struct { + Operator string + Left Expr + From, To Expr + } + + // IsExpr represents an IS ... or an IS NOT ... expression. + IsExpr struct { + Operator string + Expr Expr + } + + // ExistsExpr represents an EXISTS expression. + ExistsExpr struct { + Subquery *Subquery + } + + // SQLVal represents a single value. + SQLVal struct { + Type ValType + Val []byte + } + + // NullVal represents a NULL value. + NullVal struct{} + + // BoolVal is true or false. + BoolVal bool + + // ColName represents a column name. + ColName struct { + // Metadata is not populated by the parser. + // It's a placeholder for analyzers to store + // additional data, typically info about which + // table or column this node references. + Metadata interface{} + Name ColIdent + Qualifier TableName + } + + // ColTuple represents a list of column values. + // It can be ValTuple, Subquery, ListArg. + ColTuple interface { + iColTuple() + Expr + } + + // ListArg represents a named list argument. + ListArg []byte + + // ListArgConcatamer represents a concatamer string of apparent list arguments / postgres cast operators. + ListArgConcatamer []ListArg + + // ValTuple represents a tuple of actual values. + ValTuple Exprs + + // BinaryExpr represents a binary value expression. + BinaryExpr struct { + Operator string + Left, Right Expr + } + + // UnaryExpr represents a unary value expression. + UnaryExpr struct { + Operator string + Expr Expr + } + + // UnaryCastConcatamerExpr represents an expression with a casting concatamer suffix. + UnaryCastConcatamerExpr struct { + CastConcatamer ListArgConcatamer + Expr Expr + } + + // IntervalExpr represents a date-time INTERVAL expression. + IntervalExpr struct { + Expr Expr + Unit string + } + + // TimestampFuncExpr represents the function and arguments for TIMESTAMP{ADD,DIFF} functions. + TimestampFuncExpr struct { + Name string + Expr1 Expr + Expr2 Expr + Unit string + } + + // CollateExpr represents dynamic collate operator. + CollateExpr struct { + Expr Expr + Charset string + } + + // FuncExpr represents a function call. + FuncExpr struct { + Qualifier TableIdent + Name ColIdent + Distinct bool + Exprs SelectExprs + Over *OverClause + } + + // OverClause represents an OVER clause for window functions + OverClause struct { + WindowName ColIdent + WindowSpec *WindowSpec + } + + // WindowSpec represents a window specification for window functions + WindowSpec struct { + PartitionBy Exprs + OrderBy OrderBy + Frame *FrameClause + } + + // FrameClause represents a frame clause in a window specification + FrameClause struct { + Unit string // ROWS or RANGE + Start *FramePoint + End *FramePoint + } + + // FramePoint represents a frame boundary point + FramePoint struct { + Type string // UNBOUNDED PRECEDING, CURRENT ROW, etc. + Expr Expr // for N PRECEDING or N FOLLOWING + } + + // GroupConcatExpr represents a call to GROUP_CONCAT + GroupConcatExpr struct { + Distinct string + Exprs SelectExprs + OrderBy OrderBy + Separator string + Limit *Limit + } + + // ValuesFuncExpr represents a function call. + ValuesFuncExpr struct { + Name *ColName + } + + // SubstrExpr represents a call to SubstrExpr(column, value_expression) or SubstrExpr(column, value_expression,value_expression) + // also supported syntax SubstrExpr(column from value_expression for value_expression). + // Additionally to column names, SubstrExpr is also supported for string values, e.g.: + // SubstrExpr('static string value', value_expression, value_expression) + // In this case StrVal will be set instead of Name. + SubstrExpr struct { + Name *ColName + StrVal *SQLVal + From Expr + To Expr + } + + // ConvertExpr represents a call to CONVERT(expr, type) + // or it's equivalent CAST(expr AS type). Both are rewritten to the former. + ConvertExpr struct { + Expr Expr + Type *ConvertType + } + + // ConvertUsingExpr represents a call to CONVERT(expr USING charset). + ConvertUsingExpr struct { + Expr Expr + Type string + } + + // MatchExpr represents a call to the MATCH function + MatchExpr struct { + Columns SelectExprs + Expr Expr + Option string + } + + // CaseExpr represents a CASE expression. + CaseExpr struct { + Expr Expr + Whens []*When + Else Expr + } + + // Default represents a DEFAULT expression. + Default struct { + ColName string + } + + // When represents a WHEN sub-expression. + When struct { + Cond Expr + Val Expr + } + + // CurTimeFuncExpr represents the function and arguments for CURRENT DATE/TIME functions + // supported functions are documented in the grammar + CurTimeFuncExpr struct { + Name ColIdent + Fsp Expr // fractional seconds precision, integer from 0 to 6 + } +) + +// iExpr ensures that only expressions nodes can be assigned to a Expr +func (*AndExpr) iExpr() {} +func (*OrExpr) iExpr() {} +func (*XorExpr) iExpr() {} +func (*NotExpr) iExpr() {} +func (*ComparisonExpr) iExpr() {} +func (*RangeCond) iExpr() {} +func (*IsExpr) iExpr() {} +func (*ExistsExpr) iExpr() {} +func (*SQLVal) iExpr() {} +func (*NullVal) iExpr() {} +func (BoolVal) iExpr() {} +func (*ColName) iExpr() {} +func (ValTuple) iExpr() {} +func (*Subquery) iExpr() {} +func (ListArg) iExpr() {} +func (*BinaryExpr) iExpr() {} +func (*UnaryExpr) iExpr() {} +func (*UnaryCastConcatamerExpr) iExpr() {} +func (*IntervalExpr) iExpr() {} +func (*CollateExpr) iExpr() {} +func (*FuncExpr) iExpr() {} +func (*TimestampFuncExpr) iExpr() {} +func (*CurTimeFuncExpr) iExpr() {} +func (*CaseExpr) iExpr() {} +func (*ValuesFuncExpr) iExpr() {} +func (*ConvertExpr) iExpr() {} +func (*SubstrExpr) iExpr() {} +func (*ConvertUsingExpr) iExpr() {} +func (*MatchExpr) iExpr() {} +func (*GroupConcatExpr) iExpr() {} +func (*Default) iExpr() {} + +// Exprs represents a list of value expressions. +// It's not a valid expression because it's not parenthesized. +type Exprs []Expr + +func (ValTuple) iColTuple() {} +func (*Subquery) iColTuple() {} +func (ListArg) iColTuple() {} + +// ConvertType represents the type in call to CONVERT(expr, type) +type ConvertType struct { + Type string + Length *SQLVal + Scale *SQLVal + Operator string + Charset string +} + +// GroupBy represents a GROUP BY clause. +type GroupBy []Expr + +// OrderBy represents an ORDER By clause. +type OrderBy []*Order + +// Order represents an ordering expression. +type Order struct { + Expr Expr + Direction string +} + +// Limit represents a LIMIT clause. +type Limit struct { + Offset, Rowcount Expr +} + +// Values represents a VALUES clause. +type Values []ValTuple + +// UpdateExprs represents a list of update expressions. +type UpdateExprs []*UpdateExpr + +// UpdateExpr represents an update expression. +type UpdateExpr struct { + Name *ColName + Expr Expr +} + +// SetExprs represents a list of set expressions. +type SetExprs []*SetExpr + +// SetExpr represents a set expression. +type SetExpr struct { + Scope string + Name ColIdent + Expr Expr +} + +// OnDup represents an ON DUPLICATE KEY clause. +type OnDup UpdateExprs + +// ColIdent is a case insensitive SQL identifier. It will be escaped with +// backquotes if necessary. +type ColIdent struct { + // This artifact prevents this struct from being compared + // with itself. It consumes no space as long as it's not the + // last field in the struct. + _ [0]struct{ _ []byte } + val, lowered string + at AtCount +} + +// TableIdent is a case sensitive SQL identifier. It will be escaped with +// backquotes if necessary. +type TableIdent struct { + v string +} + +// Here follow all the Format implementations for AST nodes + +// Format formats the node. +func (node *Select) Format(buf *TrackedBuffer) { + if node.With != nil { + buf.astPrintf(node, "%v ", node.With) + } + var options string + addIf := func(b bool, s string) { + if b { + options += s + } + } + addIf(node.Distinct, DistinctStr) + if node.Cache != nil { + if *node.Cache { + options += SQLCacheStr + } else { + options += SQLNoCacheStr + } + } + addIf(node.StraightJoinHint, StraightJoinHint) + addIf(node.SQLCalcFoundRows, SQLCalcFoundRowsStr) + + buf.astPrintf(node, "select %v%s%v from %v%v%v%v%v%v%s", + node.Comments, options, node.SelectExprs, + node.From, node.Where, + node.GroupBy, node.Having, node.OrderBy, + node.Limit, node.Lock) +} + +// Format formats the node. +func (node *With) Format(buf *TrackedBuffer) { + buf.WriteString("with ") + if node.Recursive { + buf.WriteString("recursive ") + } + for i, cte := range node.CTEs { + if i > 0 { + buf.WriteString(", ") + } + buf.astPrintf(node, "%v", cte) + } +} + +// Format formats the node. +func (node *CommonTableExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v", node.Name) + if len(node.Columns) > 0 { + buf.astPrintf(node, "(%v)", node.Columns) + } + buf.astPrintf(node, " as %v", node.Subquery) +} + +func (node *Exec) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "exec %v %v", node.Comments, node.MethodName) +} + +func (node *Purge) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "purge %v %v", node.Comments, node.Target) +} + +func (node *NativeQuery) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "purge %v '%v'", node.Comments, node.QueryString) +} + +func (node *ExecSubquery) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "( exec %v )", node.Exec) +} + +// Format formats the node. +func (node *ParenSelect) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "(%v)", node.Select) +} + +// Format formats the node. +func (node *TableValuedFuncTableExpr) Format(buf *TrackedBuffer) { + if node.As.IsEmpty() { + buf.astPrintf(node, "%v", node.FuncExpr) + return + } + buf.astPrintf(node, "%v as \"%v\"", node.FuncExpr, node.As) +} + +// Format formats the node. +func (node *Auth) Format(buf *TrackedBuffer) { + var infraql_opt string + if node.SessionAuth { + infraql_opt = "infraql " + } + buf.astPrintf(node, "%sAUTH %s %s %s %s", infraql_opt, node.Provider, node.Type, node.KeyFilePath, node.KeyEnvVar) +} + +// Format formats the node. +func (node *Registry) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "AUTH %s %s", node.ActionType, node.ProviderId) +} + +// Format formats the node. +func (node *AuthRevoke) Format(buf *TrackedBuffer) { + var infraql_opt string + if node.SessionAuth { + infraql_opt = "infraql " + } + buf.astPrintf(node, "%sauth revoke %s", infraql_opt, node.Provider) +} + +// Format formats the node. +func (node *Sleep) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "sleep %v", node.Duration) +} + +// Format formats the node. +func (node *Union) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v", node.FirstStatement) + for _, us := range node.UnionSelects { + buf.astPrintf(node, "%v", us) + } + buf.astPrintf(node, "%v%v%s", node.OrderBy, node.Limit, node.Lock) +} + +// Format formats the node. +func (node *UnionSelect) Format(buf *TrackedBuffer) { + buf.astPrintf(node, " %s %v", node.Type, node.Statement) +} + +// Format formats the node. +func (node *Stream) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "stream %v%v from %v", + node.Comments, node.SelectExpr, node.Table) +} + +// Format formats the node. +func (node *Insert) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", + node.Action, + node.Comments, node.Ignore, + node.Table, node.Partitions, node.Columns, node.Rows, node.OnDup) +} + +// Format formats the node. +func (node *Update) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "update %v%s%v set %v%v%v%v", + node.Comments, node.Ignore, node.TableExprs, + node.Exprs, node.Where, node.OrderBy, node.Limit) +} + +// Format formats the node. +func (node *Delete) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "delete %v", node.Comments) + if node.Targets != nil { + buf.astPrintf(node, "%v ", node.Targets) + } + buf.astPrintf(node, "from %v%v%v%v%v", node.TableExprs, node.Partitions, node.Where, node.OrderBy, node.Limit) +} + +// Format formats the node. +func (node *Set) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "set %v%v", node.Comments, node.Exprs) +} + +// Format formats the node. +func (node *SetTransaction) Format(buf *TrackedBuffer) { + if node.Scope == "" { + buf.astPrintf(node, "set %vtransaction ", node.Comments) + } else { + buf.astPrintf(node, "set %v%s transaction ", node.Comments, node.Scope) + } + + for i, char := range node.Characteristics { + if i > 0 { + buf.WriteString(", ") + } + buf.astPrintf(node, "%v", char) + } +} + +// Format formats the node. +func (node *DBDDL) Format(buf *TrackedBuffer) { + switch node.Action { + case CreateStr, AlterStr: + buf.WriteString(fmt.Sprintf("%s database %s", node.Action, node.DBName)) + case DropStr: + exists := "" + if node.IfExists { + exists = " if exists" + } + buf.WriteString(fmt.Sprintf("%s database%s %v", node.Action, exists, node.DBName)) + } +} + +func (node *RefreshMaterializedView) Format(buf *TrackedBuffer) { + if node.Concurrently { + buf.astPrintf(node, "refresh materialized view concurrently %v", node.ViewName) + return + } + if !node.WithData { + buf.astPrintf(node, "refresh materialized view %v with no data", node.ViewName) + return + } + buf.astPrintf(node, "refresh materialized view %v", node.ViewName) +} + +// Format formats the node. +func (node *DDL) Format(buf *TrackedBuffer) { + switch node.Action { + case CreateStr: + if node.OptLike != nil { + buf.astPrintf(node, "%s table %v %v", node.Action, node.Table, node.OptLike) + } else if node.TableSpec != nil { + buf.astPrintf(node, "%s table %v %v", node.Action, node.Table, node.TableSpec) + } else { + buf.astPrintf(node, "%s table %v", node.Action, node.Table) + } + case DropStr: + exists := "" + if node.IfExists { + exists = " if exists" + } + buf.astPrintf(node, "%s table%s %v", node.Action, exists, node.FromTables) + case RenameStr: + buf.astPrintf(node, "%s table %v to %v", node.Action, node.FromTables[0], node.ToTables[0]) + for i := 1; i < len(node.FromTables); i++ { + buf.astPrintf(node, ", %v to %v", node.FromTables[i], node.ToTables[i]) + } + case AlterStr: + if node.PartitionSpec != nil { + buf.astPrintf(node, "%s table %v %v", node.Action, node.Table, node.PartitionSpec) + } else { + buf.astPrintf(node, "%s table %v", node.Action, node.Table) + } + case FlushStr: + buf.astPrintf(node, "%s", node.Action) + case CreateVindexStr: + buf.astPrintf(node, "alter vschema create vindex %v %v", node.Table, node.VindexSpec) + case DropVindexStr: + buf.astPrintf(node, "alter vschema drop vindex %v", node.Table) + case AddVschemaTableStr: + buf.astPrintf(node, "alter vschema add table %v", node.Table) + case DropVschemaTableStr: + buf.astPrintf(node, "alter vschema drop table %v", node.Table) + case AddColVindexStr: + buf.astPrintf(node, "alter vschema on %v add vindex %v (", node.Table, node.VindexSpec.Name) + for i, col := range node.VindexCols { + if i != 0 { + buf.astPrintf(node, ", %v", col) + } else { + buf.astPrintf(node, "%v", col) + } + } + buf.astPrintf(node, ")") + if node.VindexSpec.Type.String() != "" { + buf.astPrintf(node, " %v", node.VindexSpec) + } + case DropColVindexStr: + buf.astPrintf(node, "alter vschema on %v drop vindex %v", node.Table, node.VindexSpec.Name) + case AddSequenceStr: + buf.astPrintf(node, "alter vschema add sequence %v", node.Table) + case AddAutoIncStr: + buf.astPrintf(node, "alter vschema on %v add auto_increment %v", node.Table, node.AutoIncSpec) + default: + buf.astPrintf(node, "%s table %v", node.Action, node.Table) + } +} + +// Format formats the node. +func (node *OptLike) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "like %v", node.LikeTable) +} + +// Format formats the node. +func (node *PartitionSpec) Format(buf *TrackedBuffer) { + switch node.Action { + case ReorganizeStr: + buf.astPrintf(node, "%s %v into (", node.Action, node.Name) + var prefix string + for _, pd := range node.Definitions { + buf.astPrintf(node, "%s%v", prefix, pd) + prefix = ", " + } + buf.astPrintf(node, ")") + default: + panic("unimplemented") + } +} + +// Format formats the node +func (node *PartitionDefinition) Format(buf *TrackedBuffer) { + if !node.Maxvalue { + buf.astPrintf(node, "partition %v values less than (%v)", node.Name, node.Limit) + } else { + buf.astPrintf(node, "partition %v values less than (maxvalue)", node.Name) + } +} + +// Format formats the node. +func (ts *TableSpec) Format(buf *TrackedBuffer) { + buf.astPrintf(ts, "(\n") + for i, col := range ts.Columns { + if i == 0 { + buf.astPrintf(ts, "\t%v", col) + } else { + buf.astPrintf(ts, ",\n\t%v", col) + } + } + for _, idx := range ts.Indexes { + buf.astPrintf(ts, ",\n\t%v", idx) + } + for _, c := range ts.Constraints { + buf.astPrintf(ts, ",\n\t%v", c) + } + + buf.astPrintf(ts, "\n)%s", strings.Replace(ts.Options, ", ", ",\n ", -1)) +} + +// Format formats the node. +func (col *ColumnDefinition) Format(buf *TrackedBuffer) { + buf.astPrintf(col, "%v %v", col.Name, &col.Type) +} + +// Format returns a canonical string representation of the type and all relevant options +func (ct *ColumnType) Format(buf *TrackedBuffer) { + buf.astPrintf(ct, "%s", ct.Type) + + if ct.Length != nil && ct.Scale != nil { + buf.astPrintf(ct, "(%v,%v)", ct.Length, ct.Scale) + + } else if ct.Length != nil { + buf.astPrintf(ct, "(%v)", ct.Length) + } + + if ct.EnumValues != nil { + buf.astPrintf(ct, "(%s)", strings.Join(ct.EnumValues, ", ")) + } + + opts := make([]string, 0, 16) + if ct.Unsigned { + opts = append(opts, KeywordStrings[UNSIGNED]) + } + if ct.Zerofill { + opts = append(opts, KeywordStrings[ZEROFILL]) + } + if ct.Charset != "" { + opts = append(opts, KeywordStrings[CHARACTER], KeywordStrings[SET], ct.Charset) + } + if ct.Collate != "" { + opts = append(opts, KeywordStrings[COLLATE], ct.Collate) + } + if ct.NotNull { + opts = append(opts, KeywordStrings[NOT], KeywordStrings[NULL]) + } + if ct.Default != nil { + opts = append(opts, KeywordStrings[DEFAULT], String(ct.Default)) + } + if ct.OnUpdate != nil { + opts = append(opts, KeywordStrings[ON], KeywordStrings[UPDATE], String(ct.OnUpdate)) + } + if ct.Autoincrement { + opts = append(opts, KeywordStrings[AUTO_INCREMENT]) + } + if ct.Comment != nil { + opts = append(opts, KeywordStrings[COMMENT_KEYWORD], String(ct.Comment)) + } + if ct.KeyOpt == ColKeyPrimary { + opts = append(opts, KeywordStrings[PRIMARY], KeywordStrings[KEY]) + } + if ct.KeyOpt == ColKeyUnique { + opts = append(opts, KeywordStrings[UNIQUE]) + } + if ct.KeyOpt == ColKeyUniqueKey { + opts = append(opts, KeywordStrings[UNIQUE], KeywordStrings[KEY]) + } + if ct.KeyOpt == ColKeySpatialKey { + opts = append(opts, KeywordStrings[SPATIAL], KeywordStrings[KEY]) + } + if ct.KeyOpt == ColKey { + opts = append(opts, KeywordStrings[KEY]) + } + + if len(opts) != 0 { + buf.astPrintf(ct, " %s", strings.Join(opts, " ")) + } +} + +// Format formats the node. +func (idx *IndexDefinition) Format(buf *TrackedBuffer) { + buf.astPrintf(idx, "%v (", idx.Info) + for i, col := range idx.Columns { + if i != 0 { + buf.astPrintf(idx, ", %v", col.Column) + } else { + buf.astPrintf(idx, "%v", col.Column) + } + if col.Length != nil { + buf.astPrintf(idx, "(%v)", col.Length) + } + } + buf.astPrintf(idx, ")") + + for _, opt := range idx.Options { + buf.astPrintf(idx, " %s", opt.Name) + if opt.Using != "" { + buf.astPrintf(idx, " %s", opt.Using) + } else { + buf.astPrintf(idx, " %v", opt.Value) + } + } +} + +// Format formats the node. +func (ii *IndexInfo) Format(buf *TrackedBuffer) { + if ii.Primary { + buf.astPrintf(ii, "%s", ii.Type) + } else { + buf.astPrintf(ii, "%s", ii.Type) + if !ii.Name.IsEmpty() { + buf.astPrintf(ii, " %v", ii.Name) + } + } +} + +// Format formats the node. +func (node *AutoIncSpec) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v ", node.Column) + buf.astPrintf(node, "using %v", node.Sequence) +} + +// Format formats the node. The "CREATE VINDEX" preamble was formatted in +// the containing DDL node Format, so this just prints the type, any +// parameters, and optionally the owner +func (node *VindexSpec) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "using %v", node.Type) + + numParams := len(node.Params) + if numParams != 0 { + buf.astPrintf(node, " with ") + for i, p := range node.Params { + if i != 0 { + buf.astPrintf(node, ", ") + } + buf.astPrintf(node, "%v", p) + } + } +} + +// Format formats the node. +func (node VindexParam) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s=%s", node.Key.String(), node.Val) +} + +// Format formats the node. +func (c *ConstraintDefinition) Format(buf *TrackedBuffer) { + if c.Name != "" { + buf.astPrintf(c, "constraint %s ", c.Name) + } + c.Details.Format(buf) +} + +// Format formats the node. +func (a ReferenceAction) Format(buf *TrackedBuffer) { + switch a { + case Restrict: + buf.WriteString("restrict") + case Cascade: + buf.WriteString("cascade") + case NoAction: + buf.WriteString("no action") + case SetNull: + buf.WriteString("set null") + case SetDefault: + buf.WriteString("set default") + } +} + +// Format formats the node. +func (f *ForeignKeyDefinition) Format(buf *TrackedBuffer) { + buf.astPrintf(f, "foreign key %v references %v %v", f.Source, f.ReferencedTable, f.ReferencedColumns) + if f.OnDelete != DefaultAction { + buf.astPrintf(f, " on delete %v", f.OnDelete) + } + if f.OnUpdate != DefaultAction { + buf.astPrintf(f, " on update %v", f.OnUpdate) + } +} + +// Format formats the node. +func (node *Show) Format(buf *TrackedBuffer) { + nodeType := strings.ToLower(node.Type) + if (nodeType == "tables" || nodeType == "columns" || nodeType == "fields" || nodeType == "index" || nodeType == "keys" || nodeType == "indexes") && node.ShowTablesOpt != nil { + opt := node.ShowTablesOpt + if node.Extended != "" { + buf.astPrintf(node, "show %s%s", node.Extended, nodeType) + } else { + buf.astPrintf(node, "show %s%s", opt.Full, nodeType) + } + if (nodeType == "columns" || nodeType == "fields") && node.HasOnTable() { + buf.astPrintf(node, " from %v", node.OnTable) + } + if (nodeType == "index" || nodeType == "keys" || nodeType == "indexes") && node.HasOnTable() { + buf.astPrintf(node, " from %v", node.OnTable) + } + if opt.DbName != "" { + buf.astPrintf(node, " from %s", opt.DbName) + } + buf.astPrintf(node, "%v", opt.Filter) + return + } + if node.Scope == "" { + buf.astPrintf(node, "show %s", nodeType) + } else { + buf.astPrintf(node, "show %s %s", node.Scope, nodeType) + } + if node.HasOnTable() { + buf.astPrintf(node, " on %v", node.OnTable) + } + if nodeType == "collation" && node.ShowCollationFilterOpt != nil { + buf.astPrintf(node, " where %v", node.ShowCollationFilterOpt) + } + if nodeType == "charset" && node.ShowTablesOpt != nil { + buf.astPrintf(node, "%v", node.ShowTablesOpt.Filter) + } + if node.HasTable() { + buf.astPrintf(node, " %v", node.Table) + } +} + +// Format formats the node. +func (node *ShowFilter) Format(buf *TrackedBuffer) { + if node == nil { + return + } + if node.Like != "" { + buf.astPrintf(node, " like '%s'", node.Like) + } else { + buf.astPrintf(node, " where %v", node.Filter) + } +} + +// Format formats the node. +func (node *Use) Format(buf *TrackedBuffer) { + if node.DBName.v != "" { + buf.astPrintf(node, "use %v", node.DBName) + } else { + buf.astPrintf(node, "use") + } +} + +// Format formats the node. +func (node *Commit) Format(buf *TrackedBuffer) { + buf.WriteString("commit") +} + +// Format formats the node. +func (node *Begin) Format(buf *TrackedBuffer) { + buf.WriteString("begin") +} + +// Format formats the node. +func (node *Rollback) Format(buf *TrackedBuffer) { + buf.WriteString("rollback") +} + +// Format formats the node. +func (node *SRollback) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "rollback to %v", node.Name) +} + +// Format formats the node. +func (node *Savepoint) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "savepoint %v", node.Name) +} + +// Format formats the node. +func (node *Release) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "release savepoint %v", node.Name) +} + +// Format formats the node. +func (node *Explain) Format(buf *TrackedBuffer) { + format := "" + switch node.Type { + case "": // do nothing + case AnalyzeStr: + format = AnalyzeStr + " " + default: + format = "format = " + node.Type + " " + } + buf.astPrintf(node, "explain %s%v", format, node.Statement) +} + +// Format formats the node. +func (node *OtherRead) Format(buf *TrackedBuffer) { + buf.WriteString("otherread") +} + +// Format formats the node. +func (node *DescribeTable) Format(buf *TrackedBuffer) { + buf.WriteString("describetable") +} + +// Format formats the node. +func (node *OtherAdmin) Format(buf *TrackedBuffer) { + buf.WriteString("otheradmin") +} + +// Format formats the node. +func (node Comments) Format(buf *TrackedBuffer) { + for _, c := range node { + buf.astPrintf(node, "%s ", c) + } +} + +// Format formats the node. +func (node SelectExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node *StarExpr) Format(buf *TrackedBuffer) { + if !node.TableName.IsEmpty() { + buf.astPrintf(node, "%v.", node.TableName) + } + buf.astPrintf(node, "*") +} + +// Format formats the node. +func (node *AliasedExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v", node.Expr) + if !node.As.IsEmpty() { + buf.astPrintf(node, " as %v", node.As) + } +} + +// Format formats the node. +func (node Nextval) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "next %v values", node.Expr) +} + +// Format formats the node. +func (node Columns) Format(buf *TrackedBuffer) { + if node == nil { + return + } + prefix := "(" + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + buf.WriteString(")") +} + +// Format formats the node +func (node Partitions) Format(buf *TrackedBuffer) { + if node == nil { + return + } + prefix := " partition (" + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + buf.WriteString(")") +} + +// Format formats the node. +func (node TableExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node *AliasedTableExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v%v", node.Expr, node.Partitions) + if !node.As.IsEmpty() { + buf.astPrintf(node, " as %v", node.As) + } + if node.Hints != nil { + // Hint node provides the space padding. + buf.astPrintf(node, "%v", node.Hints) + } +} + +// Format formats the node. +func (node TableNames) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node TableName) Format(buf *TrackedBuffer) { + if node.IsEmpty() { + return + } + buf.WriteString(`"`) + buf.WriteString(node.GetRawVal()) + buf.WriteString(`"`) +} + +// Format formats the node. +func (node *ParenTableExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "(%v)", node.Exprs) +} + +// Format formats the node. +func (node JoinCondition) Format(buf *TrackedBuffer) { + if node.On != nil { + buf.astPrintf(node, " on %v", node.On) + } + if node.Using != nil { + buf.astPrintf(node, " using %v", node.Using) + } +} + +// Format formats the node. +func (node *JoinTableExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v %s %v%v", node.LeftExpr, node.Join, node.RightExpr, node.Condition) +} + +// Format formats the node. +func (node *IndexHints) Format(buf *TrackedBuffer) { + buf.astPrintf(node, " %sindex ", node.Type) + if len(node.Indexes) == 0 { + buf.astPrintf(node, "()") + } else { + prefix := "(" + for _, n := range node.Indexes { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } + buf.astPrintf(node, ")") + } +} + +// Format formats the node. +func (node *Where) Format(buf *TrackedBuffer) { + if node == nil || node.Expr == nil { + return + } + buf.astPrintf(node, " %s %v", node.Type, node.Expr) +} + +// Format formats the node. +func (node Exprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node *AndExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v and %v", node.Left, node.Right) +} + +// Format formats the node. +func (node *OrExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v || %v", node.Left, node.Right) +} + +// Format formats the node. +func (node *XorExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v xor %v", node.Left, node.Right) +} + +// Format formats the node. +func (node *NotExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "not %v", node.Expr) +} + +// Format formats the node. +func (node *ComparisonExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v %s %v", node.Left, node.Operator, node.Right) + if node.Escape != nil { + buf.astPrintf(node, " escape %v", node.Escape) + } +} + +// Format formats the node. +func (node *RangeCond) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v %s %v and %v", node.Left, node.Operator, node.From, node.To) +} + +// Format formats the node. +func (node *IsExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v %s", node.Expr, node.Operator) +} + +// Format formats the node. +func (node *ExistsExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "exists %v", node.Subquery) +} + +// Format formats the node. +func (node *SQLVal) Format(buf *TrackedBuffer) { + switch node.Type { + case StrVal: + sqltypes.MakeTrusted(sqltypes.VarBinary, node.Val).EncodeSQL(buf) + case IntVal, FloatVal, HexNum: + buf.astPrintf(node, "%s", node.Val) + case HexVal: + buf.astPrintf(node, "X'%s'", node.Val) + case BitVal: + buf.astPrintf(node, "B'%s'", node.Val) + case ValArg: + buf.WriteArg(string(node.Val)) + default: + panic("unexpected") + } +} + +// Format formats the node. +func (node *NullVal) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "null") +} + +// Format formats the node. +func (node BoolVal) Format(buf *TrackedBuffer) { + if node { + buf.astPrintf(node, "true") + } else { + buf.astPrintf(node, "false") + } +} + +// Format formats the node. +func (node *ColName) Format(buf *TrackedBuffer) { + if !node.Qualifier.IsEmpty() { + buf.astPrintf(node, "%v.", node.Qualifier) + } + if !buf.IsDelimitCols() { + buf.astPrintf(node, "%v", node.Name) + return + } + if node.Name.IsEmpty() { + return + } + buf.WriteString(`"`) + buf.WriteString(node.Name.GetRawVal()) + buf.WriteString(`"`) +} + +// Format formats the node. +func (node ValTuple) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "(%v)", Exprs(node)) +} + +// Format formats the node. +func (node *Subquery) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "(%v)", node.Select) +} + +// Format formats the node. +func (node ListArg) Format(buf *TrackedBuffer) { + buf.WriteArg(string(node)) +} + +// Format formats the node. +func (node *BinaryExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v %s %v", node.Left, node.Operator, node.Right) +} + +// Format formats the node. +func (node *UnaryCastConcatamerExpr) Format(buf *TrackedBuffer) { + var s string + for _, n := range node.CastConcatamer { + s = s + string(n) + } + buf.astPrintf(node, "%v%s", node.Expr, s) +} + +// Format formats the node. +func (node *UnaryExpr) Format(buf *TrackedBuffer) { + if _, unary := node.Expr.(*UnaryExpr); unary { + // They have same precedence so parenthesis is not required. + buf.astPrintf(node, "%s %v", node.Operator, node.Expr) + return + } + buf.astPrintf(node, "%s%v", node.Operator, node.Expr) +} + +// Format formats the node. +func (node *IntervalExpr) Format(buf *TrackedBuffer) { + nb := NewTrackedBuffer(nil) + nb.astPrintf(node, "%v", node.Expr) + es := strings.Trim(nb.String(), "'") + buf.astPrintf(node, "interval '%s %s'", es, node.Unit) +} + +// Format formats the node. +func (node *TimestampFuncExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%s, %v, %v)", node.Name, node.Unit, node.Expr1, node.Expr2) +} + +// Format formats the node. +func (node *CurTimeFuncExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%v)", node.Name.String(), node.Fsp) +} + +// Format formats the node. +func (node *CollateExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v collate %s", node.Expr, node.Charset) +} + +// Format formats the node. +func (node *FuncExpr) Format(buf *TrackedBuffer) { + var distinct string + if node.Distinct { + distinct = "distinct " + } + if !node.Qualifier.IsEmpty() { + buf.astPrintf(node, "%v.", node.Qualifier) + } + // Function names should not be back-quoted even + // if they match a reserved word, only if they contain illegal characters + funcName := node.Name.String() + + if ContainEscapableChars(funcName, NoAt) { + WriteEscapedString(buf, funcName) + } else { + buf.WriteString(funcName) + } + buf.astPrintf(node, "(%s%v)", distinct, node.Exprs) + if node.Over != nil { + buf.astPrintf(node, " %v", node.Over) + } +} + +// Format formats the node. +func (node *OverClause) Format(buf *TrackedBuffer) { + buf.WriteString("over ") + if !node.WindowName.IsEmpty() { + buf.astPrintf(node, "%v", node.WindowName) + } else if node.WindowSpec != nil { + buf.astPrintf(node, "(%v)", node.WindowSpec) + } else { + buf.WriteString("()") + } +} + +// Format formats the node. +func (node *WindowSpec) Format(buf *TrackedBuffer) { + needsSpace := false + if len(node.PartitionBy) > 0 { + buf.astPrintf(node, "partition by %v", node.PartitionBy) + needsSpace = true + } + if len(node.OrderBy) > 0 { + if needsSpace { + buf.WriteString(" ") + } + buf.astPrintf(node, "order by %v", node.OrderBy) + needsSpace = true + } + if node.Frame != nil { + if needsSpace { + buf.WriteString(" ") + } + buf.astPrintf(node, "%v", node.Frame) + } +} + +// Format formats the node. +func (node *FrameClause) Format(buf *TrackedBuffer) { + buf.WriteString(node.Unit) + if node.End != nil { + buf.astPrintf(node, " between %v and %v", node.Start, node.End) + } else { + buf.astPrintf(node, " %v", node.Start) + } +} + +// Format formats the node. +func (node *FramePoint) Format(buf *TrackedBuffer) { + switch node.Type { + case UnboundedPrecedingStr, CurrentRowStr, UnboundedFollowingStr: + buf.WriteString(node.Type) + default: + buf.astPrintf(node, "%v %s", node.Expr, node.Type) + } +} + +// Format formats the node +func (node *GroupConcatExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "group_concat(%s%v%v%s%v)", node.Distinct, node.Exprs, node.OrderBy, node.Separator, node.Limit) +} + +// Format formats the node. +func (node *ValuesFuncExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "values(%v)", node.Name) +} + +// Format formats the node. +func (node *SubstrExpr) Format(buf *TrackedBuffer) { + var val interface{} + if node.Name != nil { + val = node.Name + } else { + val = node.StrVal + } + + if node.To == nil { + buf.astPrintf(node, "substr(%v, %v)", val, node.From) + } else { + buf.astPrintf(node, "substr(%v, %v, %v)", val, node.From, node.To) + } +} + +// Format formats the node. +func (node *ConvertExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "cast(%v AS %v)", node.Expr, node.Type) +} + +// Format formats the node. +func (node *ConvertUsingExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "convert(%v using %s)", node.Expr, node.Type) +} + +// Format formats the node. +func (node *ConvertType) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s", node.Type) + if node.Length != nil { + buf.astPrintf(node, "(%v", node.Length) + if node.Scale != nil { + buf.astPrintf(node, ", %v", node.Scale) + } + buf.astPrintf(node, ")") + } + if node.Charset != "" { + buf.astPrintf(node, "%s %s", node.Operator, node.Charset) + } +} + +// Format formats the node +func (node *MatchExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "match(%v) against (%v%s)", node.Columns, node.Expr, node.Option) +} + +// Format formats the node. +func (node *CaseExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "case ") + if node.Expr != nil { + buf.astPrintf(node, "%v ", node.Expr) + } + for _, when := range node.Whens { + buf.astPrintf(node, "%v ", when) + } + if node.Else != nil { + buf.astPrintf(node, "else %v ", node.Else) + } + buf.astPrintf(node, "end") +} + +// Format formats the node. +func (node *Default) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "default") + if node.ColName != "" { + buf.WriteString("(") + FormatID(buf, node.ColName, strings.ToLower(node.ColName), NoAt) + buf.WriteString(")") + } +} + +// Format formats the node. +func (node *When) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "when %v then %v", node.Cond, node.Val) +} + +// Format formats the node. +func (node GroupBy) Format(buf *TrackedBuffer) { + prefix := " group by " + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node OrderBy) Format(buf *TrackedBuffer) { + prefix := " order by " + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node *Order) Format(buf *TrackedBuffer) { + if node, ok := node.Expr.(*NullVal); ok { + buf.astPrintf(node, "%v", node) + return + } + if node, ok := node.Expr.(*FuncExpr); ok { + if node.Name.Lowered() == "rand" { + buf.astPrintf(node, "%v", node) + return + } + } + + buf.astPrintf(node, "%v %s", node.Expr, node.Direction) +} + +// Format formats the node. +func (node *Limit) Format(buf *TrackedBuffer) { + if node == nil { + return + } + buf.astPrintf(node, " limit ") + if node.Offset != nil { + buf.astPrintf(node, "%v, ", node.Offset) + } + buf.astPrintf(node, "%v", node.Rowcount) +} + +// Format formats the node. +func (node Values) Format(buf *TrackedBuffer) { + prefix := "values " + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node UpdateExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node *UpdateExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%v = %v", node.Name, node.Expr) +} + +// Format formats the node. +func (node SetExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.astPrintf(node, "%s%v", prefix, n) + prefix = ", " + } +} + +// Format formats the node. +func (node *SetExpr) Format(buf *TrackedBuffer) { + if node.Scope != "" { + buf.WriteString(node.Scope) + buf.WriteString(" ") + } + // We don't have to backtick set variable names. + switch { + case node.Name.EqualString("charset") || node.Name.EqualString("names"): + buf.astPrintf(node, "%s %v", node.Name.String(), node.Expr) + case node.Name.EqualString(TransactionStr): + sqlVal := node.Expr.(*SQLVal) + buf.astPrintf(node, "%s %s", node.Name.String(), strings.ToLower(string(sqlVal.Val))) + default: + buf.astPrintf(node, "%v = %v", node.Name, node.Expr) + } +} + +// Format formats the node. +func (node OnDup) Format(buf *TrackedBuffer) { + if node == nil { + return + } + buf.astPrintf(node, " on duplicate key update %v", UpdateExprs(node)) +} + +// Format formats the node. +func (node ColIdent) Format(buf *TrackedBuffer) { + for i := NoAt; i < node.at; i++ { + buf.WriteByte('@') + } + FormatID(buf, node.val, node.Lowered(), node.at) +} + +// Format formats the node. +func (node TableIdent) Format(buf *TrackedBuffer) { + FormatID(buf, node.v, strings.ToLower(node.v), NoAt) +} + +// AtCount return the '@' count present in ColIdent Name +func (node ColIdent) AtCount() AtCount { + return node.at +} + +func (*IsolationLevel) iChar() {} +func (*AccessMode) iChar() {} + +// Format formats the node. +func (node *IsolationLevel) Format(buf *TrackedBuffer) { + buf.WriteString("isolation level " + node.Level) +} + +// Format formats the node. +func (node *AccessMode) Format(buf *TrackedBuffer) { + buf.WriteString(node.Mode) +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/ast_funcs.go b/internal/stackql-parser-fork/go/vt/sqlparser/ast_funcs.go new file mode 100644 index 00000000..ccab629f --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/ast_funcs.go @@ -0,0 +1,893 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "strings" + + "github.com/stackql/stackql-parser/go/vt/log" + + "github.com/stackql/stackql-parser/go/sqltypes" + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +// Walk calls visit on every node. +// If visit returns true, the underlying nodes +// are also visited. If it returns an error, walking +// is interrupted, and the error is returned. +func Walk(visit Visit, nodes ...SQLNode) error { + for _, node := range nodes { + if node == nil { + continue + } + var err error + var kontinue bool + pre := func(cursor *Cursor) bool { + // If we already have found an error, don't visit these nodes, just exit early + if err != nil { + return false + } + kontinue, err = visit(cursor.Node()) + if err != nil { + return true // we have to return true here so that post gets called + } + return kontinue + } + post := func(cursor *Cursor) bool { + return err == nil // now we can abort the traversal if an error was found + } + + Rewrite(node, pre, post) + if err != nil { + return err + } + } + return nil +} + +// Visit defines the signature of a function that +// can be used to visit all nodes of a parse tree. +type Visit func(node SQLNode) (kontinue bool, err error) + +// Append appends the SQLNode to the buffer. +func Append(buf *strings.Builder, node SQLNode) { + tbuf := &TrackedBuffer{ + Builder: buf, + } + node.Format(tbuf) +} + +// IndexColumn describes a column in an index definition with optional length +type IndexColumn struct { + Column ColIdent + Length *SQLVal +} + +// LengthScaleOption is used for types that have an optional length +// and scale +type LengthScaleOption struct { + Length *SQLVal + Scale *SQLVal +} + +// IndexOption is used for trailing options for indexes: COMMENT, KEY_BLOCK_SIZE, USING +type IndexOption struct { + Name string + Value *SQLVal + Using string +} + +// ColumnKeyOption indicates whether or not the given column is defined as an +// index element and contains the type of the option +type ColumnKeyOption int + +const ( + colKeyNone ColumnKeyOption = iota + ColKeyPrimary + ColKeySpatialKey + ColKeyUnique + ColKeyUniqueKey + ColKey +) + +// ReferenceAction indicates the action takes by a referential constraint e.g. +// the `CASCADE` in a `FOREIGN KEY .. ON DELETE CASCADE` table definition. +type ReferenceAction int + +// These map to the SQL-defined reference actions. +// See https://dev.mysql.com/doc/refman/8.0/en/create-table-foreign-keys.html#foreign-keys-referential-actions +const ( + // DefaultAction indicates no action was explicitly specified. + DefaultAction ReferenceAction = iota + Restrict + Cascade + NoAction + SetNull + SetDefault +) + +// ShowTablesOpt is show tables option +type ShowTablesOpt struct { + Full string + DbName string + Filter *ShowFilter +} + +// ValType specifies the type for SQLVal. +type ValType int + +// These are the possible Valtype values. +// HexNum represents a 0x... value. It cannot +// be treated as a simple value because it can +// be interpreted differently depending on the +// context. +const ( + StrVal = ValType(iota) + IntVal + FloatVal + HexNum + HexVal + ValArg + BitVal +) + +// AffectedTables returns the list table names affected by the DDL. +func (node *DDL) AffectedTables() TableNames { + if node.Action == RenameStr || node.Action == DropStr { + list := make(TableNames, 0, len(node.FromTables)+len(node.ToTables)) + list = append(list, node.FromTables...) + list = append(list, node.ToTables...) + return list + } + return TableNames{node.Table} +} + +// AddColumn appends the given column to the list in the spec +func (ts *TableSpec) AddColumn(cd *ColumnDefinition) { + ts.Columns = append(ts.Columns, cd) +} + +// AddIndex appends the given index to the list in the spec +func (ts *TableSpec) AddIndex(id *IndexDefinition) { + ts.Indexes = append(ts.Indexes, id) +} + +// AddConstraint appends the given index to the list in the spec +func (ts *TableSpec) AddConstraint(cd *ConstraintDefinition) { + ts.Constraints = append(ts.Constraints, cd) +} + +// DescribeType returns the abbreviated type information as required for +// describe table +func (ct *ColumnType) DescribeType() string { + buf := NewTrackedBuffer(nil) + buf.Myprintf("%s", ct.Type) + if ct.Length != nil && ct.Scale != nil { + buf.Myprintf("(%v,%v)", ct.Length, ct.Scale) + } else if ct.Length != nil { + buf.Myprintf("(%v)", ct.Length) + } + + opts := make([]string, 0, 16) + if ct.Unsigned { + opts = append(opts, KeywordStrings[UNSIGNED]) + } + if ct.Zerofill { + opts = append(opts, KeywordStrings[ZEROFILL]) + } + if len(opts) != 0 { + buf.Myprintf(" %s", strings.Join(opts, " ")) + } + return buf.String() +} + +// SQLType returns the sqltypes type code for the given column +func (ct *ColumnType) SQLType() querypb.Type { + switch strings.ToLower(ct.Type) { + case KeywordStrings[TINYINT]: + if ct.Unsigned { + return sqltypes.Uint8 + } + return sqltypes.Int8 + case KeywordStrings[SMALLINT]: + if ct.Unsigned { + return sqltypes.Uint16 + } + return sqltypes.Int16 + case KeywordStrings[MEDIUMINT]: + if ct.Unsigned { + return sqltypes.Uint24 + } + return sqltypes.Int24 + case KeywordStrings[INT], KeywordStrings[INTEGER]: + if ct.Unsigned { + return sqltypes.Uint32 + } + return sqltypes.Int32 + case KeywordStrings[BIGINT]: + if ct.Unsigned { + return sqltypes.Uint64 + } + return sqltypes.Int64 + case KeywordStrings[BOOL], KeywordStrings[BOOLEAN]: + return sqltypes.Uint8 + case KeywordStrings[TEXT]: + return sqltypes.Text + case KeywordStrings[TINYTEXT]: + return sqltypes.Text + case KeywordStrings[MEDIUMTEXT]: + return sqltypes.Text + case KeywordStrings[LONGTEXT]: + return sqltypes.Text + case KeywordStrings[BLOB]: + return sqltypes.Blob + case KeywordStrings[TINYBLOB]: + return sqltypes.Blob + case KeywordStrings[MEDIUMBLOB]: + return sqltypes.Blob + case KeywordStrings[LONGBLOB]: + return sqltypes.Blob + case KeywordStrings[CHAR]: + return sqltypes.Char + case KeywordStrings[VARCHAR]: + return sqltypes.VarChar + case KeywordStrings[BINARY]: + return sqltypes.Binary + case KeywordStrings[VARBINARY]: + return sqltypes.VarBinary + case KeywordStrings[DATE]: + return sqltypes.Date + case KeywordStrings[TIME]: + return sqltypes.Time + case KeywordStrings[DATETIME]: + return sqltypes.Datetime + case KeywordStrings[TIMESTAMP]: + return sqltypes.Timestamp + case KeywordStrings[YEAR]: + return sqltypes.Year + case KeywordStrings[FLOAT_TYPE]: + return sqltypes.Float32 + case KeywordStrings[DOUBLE]: + return sqltypes.Float64 + case KeywordStrings[DECIMAL]: + return sqltypes.Decimal + case KeywordStrings[BIT]: + return sqltypes.Bit + case KeywordStrings[ENUM]: + return sqltypes.Enum + case KeywordStrings[SET]: + return sqltypes.Set + case KeywordStrings[JSON]: + return sqltypes.TypeJSON + case KeywordStrings[GEOMETRY]: + return sqltypes.Geometry + case KeywordStrings[POINT]: + return sqltypes.Geometry + case KeywordStrings[LINESTRING]: + return sqltypes.Geometry + case KeywordStrings[POLYGON]: + return sqltypes.Geometry + case KeywordStrings[GEOMETRYCOLLECTION]: + return sqltypes.Geometry + case KeywordStrings[MULTIPOINT]: + return sqltypes.Geometry + case KeywordStrings[MULTILINESTRING]: + return sqltypes.Geometry + case KeywordStrings[MULTIPOLYGON]: + return sqltypes.Geometry + } + panic("unimplemented type " + ct.Type) +} + +// ParseParams parses the vindex parameter list, pulling out the special-case +// "owner" parameter +func (node *VindexSpec) ParseParams() (string, map[string]string) { + var owner string + params := map[string]string{} + for _, p := range node.Params { + if p.Key.Lowered() == VindexOwnerStr { + owner = p.Val + } else { + params[p.Key.String()] = p.Val + } + } + return owner, params +} + +var _ ConstraintInfo = &ForeignKeyDefinition{} + +func (f *ForeignKeyDefinition) iConstraintInfo() {} + +// HasOnTable returns true if the show statement has an "on" clause +func (node *Show) HasOnTable() bool { + return node.OnTable.Name.v != "" +} + +// HasTable returns true if the show statement has a parsed table name. +// Not all show statements parse table names. +func (node *Show) HasTable() bool { + return node.Table.Name.v != "" +} + +// FindColumn finds a column in the column list, returning +// the index if it exists or -1 otherwise +func (node Columns) FindColumn(col ColIdent) int { + for i, colName := range node { + if colName.Equal(col) { + return i + } + } + return -1 +} + +// RemoveHints returns a new AliasedTableExpr with the hints removed. +func (node *AliasedTableExpr) RemoveHints() *AliasedTableExpr { + noHints := *node + noHints.Hints = nil + return &noHints +} + +// IsEmpty returns true if TableName is nil or empty. +func (node TableName) IsEmpty() bool { + // If Name is empty, Qualifier is also empty. + return node.Name.IsEmpty() +} + +func (lac ListArgConcatamer) String() string { + var rv string + for _, entry := range lac { + rv = rv + string(entry) + } + return rv +} + +// ToViewName returns a TableName acceptable for use as a VIEW. VIEW names are +// always lowercase, so ToViewName lowercasese the name. Databases are case-sensitive +// so Qualifier is left untouched. +func (node TableName) ToViewName() TableName { + return TableName{ + Qualifier: node.Qualifier, + Name: NewTableIdent(strings.ToLower(node.Name.v)), + } +} + +// NewWhere creates a WHERE or HAVING clause out +// of a Expr. If the expression is nil, it returns nil. +func NewWhere(typ string, expr Expr) *Where { + if expr == nil { + return nil + } + return &Where{Type: typ, Expr: expr} +} + +// ReplaceExpr finds the from expression from root +// and replaces it with to. If from matches root, +// then to is returned. +func ReplaceExpr(root, from, to Expr) Expr { + tmp := Rewrite(root, replaceExpr(from, to), nil) + expr, success := tmp.(Expr) + if !success { + log.Errorf("Failed to rewrite expression. Rewriter returned a non-expression: " + String(tmp)) + return from + } + + return expr +} + +func replaceExpr(from, to Expr) func(cursor *Cursor) bool { + return func(cursor *Cursor) bool { + if cursor.Node() == from { + cursor.Replace(to) + } + switch cursor.Node().(type) { + case *ExistsExpr, *SQLVal, *Subquery, *ValuesFuncExpr, *Default: + return false + } + + return true + } +} + +// IsImpossible returns true if the comparison in the expression can never evaluate to true. +// Note that this is not currently exhaustive to ALL impossible comparisons. +func (node *ComparisonExpr) IsImpossible() bool { + var left, right *SQLVal + var ok bool + if left, ok = node.Left.(*SQLVal); !ok { + return false + } + if right, ok = node.Right.(*SQLVal); !ok { + return false + } + if node.Operator == NotEqualStr && left.Type == right.Type { + if len(left.Val) != len(right.Val) { + return false + } + + for i := range left.Val { + if left.Val[i] != right.Val[i] { + return false + } + } + return true + } + return false +} + +// NewStrVal builds a new StrVal. +func NewStrVal(in []byte) *SQLVal { + return &SQLVal{Type: StrVal, Val: in} +} + +// NewIntVal builds a new IntVal. +func NewIntVal(in []byte) *SQLVal { + return &SQLVal{Type: IntVal, Val: in} +} + +// NewFloatVal builds a new FloatVal. +func NewFloatVal(in []byte) *SQLVal { + return &SQLVal{Type: FloatVal, Val: in} +} + +// NewHexNum builds a new HexNum. +func NewHexNum(in []byte) *SQLVal { + return &SQLVal{Type: HexNum, Val: in} +} + +// NewHexVal builds a new HexVal. +func NewHexVal(in []byte) *SQLVal { + return &SQLVal{Type: HexVal, Val: in} +} + +// NewBitVal builds a new BitVal containing a bit literal. +func NewBitVal(in []byte) *SQLVal { + return &SQLVal{Type: BitVal, Val: in} +} + +// NewValArg builds a new ValArg. +func NewValArg(in []byte) *SQLVal { + return &SQLVal{Type: ValArg, Val: in} +} + +// HexDecode decodes the hexval into bytes. +func (node *SQLVal) HexDecode() ([]byte, error) { + dst := make([]byte, hex.DecodedLen(len([]byte(node.Val)))) + _, err := hex.Decode(dst, []byte(node.Val)) + if err != nil { + return nil, err + } + return dst, err +} + +// Equal returns true if the column names match. +func (node *ColName) Equal(c *ColName) bool { + // Failsafe: ColName should not be empty. + if node == nil || c == nil { + return false + } + return node.Name.Equal(c.Name) && node.Qualifier == c.Qualifier +} + +// Aggregates is a map of all aggregate functions. +var Aggregates = map[string]bool{ + "avg": true, + "bit_and": true, + "bit_or": true, + "bit_xor": true, + "count": true, + "group_concat": true, + "max": true, + "min": true, + "std": true, + "stddev_pop": true, + "stddev_samp": true, + "stddev": true, + "sum": true, + "var_pop": true, + "var_samp": true, + "variance": true, +} + +// IsAggregate returns true if the function is an aggregate. +func (node *FuncExpr) IsAggregate() bool { + return Aggregates[node.Name.Lowered()] +} + +// NewColIdent makes a new ColIdent. +func NewColIdent(str string) ColIdent { + return ColIdent{ + val: str, + } +} + +// NewExec makes a new Exec. +func NewExec(comments Comments, methodName TableName, execVarDefs []ExecVarDef, optExecPayload *ExecVarDef) *Exec { + return &Exec{ + Comments: comments, + MethodName: methodName, + ExecVarDefs: execVarDefs, + OptExecPayload: optExecPayload, + } +} + +// NewPurge makes a new Purge. +func NewPurge(comments Comments, target TableName, isGlobal bool) *Purge { + return &Purge{ + Comments: comments, + Target: target, + IsGlobal: isGlobal, + } +} + +// NewNativeQuery makes a new NativeQuery. +func NewNativeQuery(comments Comments, queryString string) *NativeQuery { + return &NativeQuery{ + Comments: comments, + QueryString: queryString, + } +} + +// NewExecVarDef makes a new ExecVarDef. +func NewExecVarDef(colIdent ColIdent, val Expr) ExecVarDef { + return ExecVarDef{ + ColIdent: colIdent, + Val: val, + } +} + +// NewSelect is used to create a select statement +func NewSelect(comments Comments, exprs SelectExprs, selectOptions []string, from TableExprs, where *Where, groupBy GroupBy, having *Where) *Select { + var cache *bool + var distinct, straightJoinHint, sqlFoundRows bool + + for _, option := range selectOptions { + switch strings.ToLower(option) { + case DistinctStr: + distinct = true + case SQLCacheStr: + truth := true + cache = &truth + case SQLNoCacheStr: + truth := false + cache = &truth + case StraightJoinHint: + straightJoinHint = true + case SQLCalcFoundRowsStr: + sqlFoundRows = true + } + } + return &Select{ + Cache: cache, + Comments: comments, + Distinct: distinct, + StraightJoinHint: straightJoinHint, + SQLCalcFoundRows: sqlFoundRows, + SelectExprs: exprs, + From: from, + Where: where, + GroupBy: groupBy, + Having: having, + } +} + +// NewColIdentWithAt makes a new ColIdent. +func NewColIdentWithAt(str string, at AtCount) ColIdent { + return ColIdent{ + val: str, + at: at, + } +} + +// IsEmpty returns true if the name is empty. +func (node ColIdent) IsEmpty() bool { + return node.val == "" +} + +// String returns the unescaped column name. It must +// not be used for SQL generation. Use sqlparser.String +// instead. The Stringer conformance is for usage +// in templates. +func (node ColIdent) String() string { + atStr := "" + for i := NoAt; i < node.at; i++ { + atStr += "@" + } + return atStr + node.val +} + +// CompliantName returns a compliant id name +// that can be used for a bind var. +func (node ColIdent) CompliantName() string { + return compliantName(node.val) +} + +// Lowered returns a lower-cased column name. +// This function should generally be used only for optimizing +// comparisons. +func (node ColIdent) Lowered() string { + if node.val == "" { + return "" + } + if node.lowered == "" { + node.lowered = strings.ToLower(node.val) + } + return node.lowered +} + +func (node *ColName) GetRawVal() string { + name := node.Name.GetRawVal() + q1 := node.Qualifier.GetRawVal() + if q1 != "" { + return fmt.Sprintf("%s.%s", q1, name) + } + return name +} + +func (node ColIdent) GetRawVal() string { + return node.val +} + +func (node ColIdent) GetAtCount() AtCount { + return node.at +} + +// Equal performs a case-insensitive compare. +func (node ColIdent) Equal(in ColIdent) bool { + return node.Lowered() == in.Lowered() +} + +// EqualString performs a case-insensitive compare with str. +func (node ColIdent) EqualString(str string) bool { + return node.Lowered() == strings.ToLower(str) +} + +// MarshalJSON marshals into JSON. +func (node ColIdent) MarshalJSON() ([]byte, error) { + return json.Marshal(node.val) +} + +// UnmarshalJSON unmarshals from JSON. +func (node *ColIdent) UnmarshalJSON(b []byte) error { + var result string + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + node.val = result + return nil +} + +// NewTableIdent creates a new TableIdent. +func NewTableIdent(str string) TableIdent { + return TableIdent{v: str} +} + +// IsEmpty returns true if TabIdent is empty. +func (node TableIdent) IsEmpty() bool { + return node.v == "" +} + +// String returns the unescaped table name. It must +// not be used for SQL generation. Use sqlparser.String +// instead. The Stringer conformance is for usage +// in templates. +func (node TableIdent) String() string { + return node.v +} + +// CompliantName returns a compliant id name +// that can be used for a bind var. +func (node TableIdent) CompliantName() string { + return compliantName(node.v) +} + +// MarshalJSON marshals into JSON. +func (node TableIdent) MarshalJSON() ([]byte, error) { + return json.Marshal(node.v) +} + +// UnmarshalJSON unmarshals from JSON. +func (node *TableIdent) UnmarshalJSON(b []byte) error { + var result string + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + node.v = result + return nil +} + +func (node TableIdent) GetRawVal() string { + return node.v +} + +func (tn TableName) GetRawVal() string { + name := tn.Name.GetRawVal() + q1 := tn.Qualifier.GetRawVal() + q2 := tn.QualifierSecond.GetRawVal() + q3 := tn.QualifierThird.GetRawVal() + if q3 != "" { + return fmt.Sprintf("%s.%s.%s.%s", q3, q2, q1, name) + } + if q2 != "" { + return fmt.Sprintf("%s.%s.%s", q2, q1, name) + } + if q1 != "" { + return fmt.Sprintf("%s.%s", q1, name) + } + return name +} + +func ContainEscapableChars(s string, at AtCount) bool { + isDbSystemVariable := at != NoAt + + for i, c := range s { + letter := isLetter(uint16(c)) + systemVarChar := isDbSystemVariable && isCarat(uint16(c)) + if !(letter || systemVarChar) { + if i == 0 || !isDigit(uint16(c)) { + return true + } + } + } + + return false +} + +func isKeyword(s string) bool { + _, isKeyword := keywords[s] + return isKeyword +} + +func FormatID(buf *TrackedBuffer, original, lowered string, at AtCount) { + if ContainEscapableChars(original, at) || isKeyword(lowered) { + WriteEscapedString(buf, original) + } else { + buf.Myprintf("%s", original) + } +} + +func WriteEscapedString(buf *TrackedBuffer, original string) { + buf.WriteByte('`') + for _, c := range original { + buf.WriteRune(c) + if c == '`' { + buf.WriteByte('`') + } + } + buf.WriteByte('`') +} + +func compliantName(in string) string { + var buf strings.Builder + for i, c := range in { + if !isLetter(uint16(c)) { + if i == 0 || !isDigit(uint16(c)) { + buf.WriteByte('_') + continue + } + } + buf.WriteRune(c) + } + return buf.String() +} + +// AddOrder adds an order by element +func (node *Select) AddOrder(order *Order) { + node.OrderBy = append(node.OrderBy, order) +} + +// SetLimit sets the limit clause +func (node *Select) SetLimit(limit *Limit) { + node.Limit = limit +} + +// SetLock sets the lock clause +func (node *Select) SetLock(lock string) { + node.Lock = lock +} + +// AddWhere adds the boolean expression to the +// WHERE clause as an AND condition. +func (node *Select) AddWhere(expr Expr) { + if node.Where == nil { + node.Where = &Where{ + Type: WhereStr, + Expr: expr, + } + return + } + node.Where.Expr = &AndExpr{ + Left: node.Where.Expr, + Right: expr, + } +} + +// AddHaving adds the boolean expression to the +// HAVING clause as an AND condition. +func (node *Select) AddHaving(expr Expr) { + if node.Having == nil { + node.Having = &Where{ + Type: HavingStr, + Expr: expr, + } + return + } + node.Having.Expr = &AndExpr{ + Left: node.Having.Expr, + Right: expr, + } +} + +// AddOrder adds an order by element +func (node *ParenSelect) AddOrder(order *Order) { + node.Select.AddOrder(order) +} + +// SetLimit sets the limit clause +func (node *ParenSelect) SetLimit(limit *Limit) { + node.Select.SetLimit(limit) +} + +// SetLock sets the lock clause +func (node *ParenSelect) SetLock(lock string) { + node.Select.SetLock(lock) +} + +// AddOrder adds an order by element +func (node *Union) AddOrder(order *Order) { + node.OrderBy = append(node.OrderBy, order) +} + +// SetLimit sets the limit clause +func (node *Union) SetLimit(limit *Limit) { + node.Limit = limit +} + +// SetLock sets the lock clause +func (node *Union) SetLock(lock string) { + node.Lock = lock +} + +// Unionize returns a UNION, either creating one or adding SELECT to an existing one +func Unionize(lhs, rhs SelectStatement, typ string, by OrderBy, limit *Limit, lock string) *Union { + union, isUnion := lhs.(*Union) + if isUnion { + union.UnionSelects = append(union.UnionSelects, &UnionSelect{Type: typ, Statement: rhs}) + union.OrderBy = by + union.Limit = limit + union.Lock = lock + return union + } + + return &Union{FirstStatement: lhs, UnionSelects: []*UnionSelect{{Type: typ, Statement: rhs}}, OrderBy: by, Limit: limit, Lock: lock} +} + +// AtCount represents the '@' count in ColIdent +type AtCount int + +const ( + // NoAt represents no @ + NoAt AtCount = iota + // SingleAt represents @ + SingleAt + // DoubleAt represnts @@ + DoubleAt +) diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/ast_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/ast_test.go new file mode 100644 index 00000000..968492bc --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/ast_test.go @@ -0,0 +1,800 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bytes" + "encoding/json" + "reflect" + "strings" + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + + "github.com/stretchr/testify/require" +) + +func TestAppend(t *testing.T) { + query := "select * from t where a = 1" + tree, err := Parse(query) + require.NoError(t, err) + var b strings.Builder + Append(&b, tree) + got := b.String() + want := query + if got != want { + t.Errorf("Append: %s, want %s", got, want) + } + Append(&b, tree) + got = b.String() + want = query + query + if got != want { + t.Errorf("Append: %s, want %s", got, want) + } +} + +func TestSelect(t *testing.T) { + tree, err := Parse("select * from t where a = 1") + require.NoError(t, err) + expr := tree.(*Select).Where.Expr + + sel := &Select{} + sel.AddWhere(expr) + buf := NewTrackedBuffer(nil) + sel.Where.Format(buf) + want := " where a = 1" + if buf.String() != want { + t.Errorf("where: %q, want %s", buf.String(), want) + } + sel.AddWhere(expr) + buf = NewTrackedBuffer(nil) + sel.Where.Format(buf) + want = " where a = 1 and a = 1" + if buf.String() != want { + t.Errorf("where: %q, want %s", buf.String(), want) + } + sel = &Select{} + sel.AddHaving(expr) + buf = NewTrackedBuffer(nil) + sel.Having.Format(buf) + want = " having a = 1" + if buf.String() != want { + t.Errorf("having: %q, want %s", buf.String(), want) + } + sel.AddHaving(expr) + buf = NewTrackedBuffer(nil) + sel.Having.Format(buf) + want = " having a = 1 and a = 1" + if buf.String() != want { + t.Errorf("having: %q, want %s", buf.String(), want) + } + + tree, err = Parse("select * from t where a = 1 or b = 1") + require.NoError(t, err) + expr = tree.(*Select).Where.Expr + sel = &Select{} + sel.AddWhere(expr) + buf = NewTrackedBuffer(nil) + sel.Where.Format(buf) + want = " where a = 1 or b = 1" + if buf.String() != want { + t.Errorf("where: %q, want %s", buf.String(), want) + } + sel = &Select{} + sel.AddHaving(expr) + buf = NewTrackedBuffer(nil) + sel.Having.Format(buf) + want = " having a = 1 or b = 1" + if buf.String() != want { + t.Errorf("having: %q, want %s", buf.String(), want) + } +} + +func TestRemoveHints(t *testing.T) { + for _, query := range []string{ + "select * from t use index (i)", + "select * from t force index (i)", + } { + tree, err := Parse(query) + if err != nil { + t.Fatal(err) + } + sel := tree.(*Select) + sel.From = TableExprs{ + sel.From[0].(*AliasedTableExpr).RemoveHints(), + } + buf := NewTrackedBuffer(nil) + sel.Format(buf) + if got, want := buf.String(), "select * from t"; got != want { + t.Errorf("stripped query: %s, want %s", got, want) + } + } +} + +func TestAddOrder(t *testing.T) { + src, err := Parse("select foo, bar from baz order by foo") + require.NoError(t, err) + order := src.(*Select).OrderBy[0] + dst, err := Parse("select * from t") + require.NoError(t, err) + dst.(*Select).AddOrder(order) + buf := NewTrackedBuffer(nil) + dst.Format(buf) + want := "select * from t order by foo asc" + if buf.String() != want { + t.Errorf("order: %q, want %s", buf.String(), want) + } + dst, err = Parse("select * from t union select * from s") + require.NoError(t, err) + dst.(*Union).AddOrder(order) + buf = NewTrackedBuffer(nil) + dst.Format(buf) + want = "select * from t union select * from s order by foo asc" + if buf.String() != want { + t.Errorf("order: %q, want %s", buf.String(), want) + } +} + +func TestSetLimit(t *testing.T) { + src, err := Parse("select foo, bar from baz limit 4") + require.NoError(t, err) + limit := src.(*Select).Limit + dst, err := Parse("select * from t") + require.NoError(t, err) + dst.(*Select).SetLimit(limit) + buf := NewTrackedBuffer(nil) + dst.Format(buf) + want := "select * from t limit 4" + if buf.String() != want { + t.Errorf("limit: %q, want %s", buf.String(), want) + } + dst, err = Parse("select * from t union select * from s") + require.NoError(t, err) + dst.(*Union).SetLimit(limit) + buf = NewTrackedBuffer(nil) + dst.Format(buf) + want = "select * from t union select * from s limit 4" + if buf.String() != want { + t.Errorf("order: %q, want %s", buf.String(), want) + } +} + +func TestDDL(t *testing.T) { + testcases := []struct { + query string + output *DDL + affected []string + }{{ + query: "create table a", + output: &DDL{ + Action: CreateStr, + Table: TableName{Name: NewTableIdent("a")}, + }, + affected: []string{"a"}, + }, { + query: "rename table a to b", + output: &DDL{ + Action: RenameStr, + FromTables: TableNames{ + TableName{Name: NewTableIdent("a")}, + }, + ToTables: TableNames{ + TableName{Name: NewTableIdent("b")}, + }, + }, + affected: []string{"a", "b"}, + }, { + query: "rename table a to b, c to d", + output: &DDL{ + Action: RenameStr, + FromTables: TableNames{ + TableName{Name: NewTableIdent("a")}, + TableName{Name: NewTableIdent("c")}, + }, + ToTables: TableNames{ + TableName{Name: NewTableIdent("b")}, + TableName{Name: NewTableIdent("d")}, + }, + }, + affected: []string{"a", "c", "b", "d"}, + }, { + query: "drop table a", + output: &DDL{ + Action: DropStr, + FromTables: TableNames{ + TableName{Name: NewTableIdent("a")}, + }, + }, + affected: []string{"a"}, + }, { + query: "drop table a, b", + output: &DDL{ + Action: DropStr, + FromTables: TableNames{ + TableName{Name: NewTableIdent("a")}, + TableName{Name: NewTableIdent("b")}, + }, + }, + affected: []string{"a", "b"}, + }} + for _, tcase := range testcases { + got, err := Parse(tcase.query) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, tcase.output) { + t.Errorf("%s: %v, want %v", tcase.query, got, tcase.output) + } + want := make(TableNames, 0, len(tcase.affected)) + for _, t := range tcase.affected { + want = append(want, TableName{Name: NewTableIdent(t)}) + } + if affected := got.(*DDL).AffectedTables(); !reflect.DeepEqual(affected, want) { + t.Errorf("Affected(%s): %v, want %v", tcase.query, affected, want) + } + } +} + +func TestSetAutocommitON(t *testing.T) { + stmt, err := Parse("SET autocommit=ON") + require.NoError(t, err) + s, ok := stmt.(*Set) + if !ok { + t.Errorf("SET statement is not Set: %T", s) + } + + if len(s.Exprs) < 1 { + t.Errorf("SET statement has no expressions") + } + + e := s.Exprs[0] + switch v := e.Expr.(type) { + case *SQLVal: + if v.Type != StrVal { + t.Errorf("SET statement value is not StrVal: %T", v) + } + + if !bytes.Equal([]byte("on"), v.Val) { + t.Errorf("SET statement value want: on, got: %s", v.Val) + } + default: + t.Errorf("SET statement expression is not SQLVal: %T", e.Expr) + } + + stmt, err = Parse("SET @@session.autocommit=ON") + require.NoError(t, err) + s, ok = stmt.(*Set) + if !ok { + t.Errorf("SET statement is not Set: %T", s) + } + + if len(s.Exprs) < 1 { + t.Errorf("SET statement has no expressions") + } + + e = s.Exprs[0] + switch v := e.Expr.(type) { + case *SQLVal: + if v.Type != StrVal { + t.Errorf("SET statement value is not StrVal: %T", v) + } + + if !bytes.Equal([]byte("on"), v.Val) { + t.Errorf("SET statement value want: on, got: %s", v.Val) + } + default: + t.Errorf("SET statement expression is not SQLVal: %T", e.Expr) + } +} + +func TestSetAutocommitOFF(t *testing.T) { + stmt, err := Parse("SET autocommit=OFF") + require.NoError(t, err) + s, ok := stmt.(*Set) + if !ok { + t.Errorf("SET statement is not Set: %T", s) + } + + if len(s.Exprs) < 1 { + t.Errorf("SET statement has no expressions") + } + + e := s.Exprs[0] + switch v := e.Expr.(type) { + case *SQLVal: + if v.Type != StrVal { + t.Errorf("SET statement value is not StrVal: %T", v) + } + + if !bytes.Equal([]byte("off"), v.Val) { + t.Errorf("SET statement value want: on, got: %s", v.Val) + } + default: + t.Errorf("SET statement expression is not SQLVal: %T", e.Expr) + } + + stmt, err = Parse("SET @@session.autocommit=OFF") + require.NoError(t, err) + s, ok = stmt.(*Set) + if !ok { + t.Errorf("SET statement is not Set: %T", s) + } + + if len(s.Exprs) < 1 { + t.Errorf("SET statement has no expressions") + } + + e = s.Exprs[0] + switch v := e.Expr.(type) { + case *SQLVal: + if v.Type != StrVal { + t.Errorf("SET statement value is not StrVal: %T", v) + } + + if !bytes.Equal([]byte("off"), v.Val) { + t.Errorf("SET statement value want: on, got: %s", v.Val) + } + default: + t.Errorf("SET statement expression is not SQLVal: %T", e.Expr) + } + +} + +func TestWhere(t *testing.T) { + var w *Where + buf := NewTrackedBuffer(nil) + w.Format(buf) + if buf.String() != "" { + t.Errorf("w.Format(nil): %q, want \"\"", buf.String()) + } + w = NewWhere(WhereStr, nil) + buf = NewTrackedBuffer(nil) + w.Format(buf) + if buf.String() != "" { + t.Errorf("w.Format(&Where{nil}: %q, want \"\"", buf.String()) + } +} + +func TestIsAggregate(t *testing.T) { + f := FuncExpr{Name: NewColIdent("avg")} + if !f.IsAggregate() { + t.Error("IsAggregate: false, want true") + } + + f = FuncExpr{Name: NewColIdent("Avg")} + if !f.IsAggregate() { + t.Error("IsAggregate: false, want true") + } + + f = FuncExpr{Name: NewColIdent("foo")} + if f.IsAggregate() { + t.Error("IsAggregate: true, want false") + } +} + +func TestIsImpossible(t *testing.T) { + f := ComparisonExpr{ + Operator: NotEqualStr, + Left: newIntVal("1"), + Right: newIntVal("1"), + } + if !f.IsImpossible() { + t.Error("IsImpossible: false, want true") + } + + f = ComparisonExpr{ + Operator: EqualStr, + Left: newIntVal("1"), + Right: newIntVal("1"), + } + if f.IsImpossible() { + t.Error("IsImpossible: true, want false") + } + + f = ComparisonExpr{ + Operator: NotEqualStr, + Left: newIntVal("1"), + Right: newIntVal("2"), + } + if f.IsImpossible() { + t.Error("IsImpossible: true, want false") + } +} + +func TestReplaceExpr(t *testing.T) { + tcases := []struct { + in, out string + }{{ + in: "select * from t where (select a from b)", + out: ":a", + }, { + in: "select * from t where (select a from b) and b", + out: ":a and b", + }, { + in: "select * from t where a and (select a from b)", + out: "a and :a", + }, { + in: "select * from t where (select a from b) or b", + out: ":a or b", + }, { + in: "select * from t where a or (select a from b)", + out: "a or :a", + }, { + in: "select * from t where not (select a from b)", + out: "not :a", + }, { + in: "select * from t where ((select a from b))", + out: ":a", + }, { + in: "select * from t where (select a from b) = 1", + out: ":a = 1", + }, { + in: "select * from t where a = (select a from b)", + out: "a = :a", + }, { + in: "select * from t where a like b escape (select a from b)", + out: "a like b escape :a", + }, { + in: "select * from t where (select a from b) between a and b", + out: ":a between a and b", + }, { + in: "select * from t where a between (select a from b) and b", + out: "a between :a and b", + }, { + in: "select * from t where a between b and (select a from b)", + out: "a between b and :a", + }, { + in: "select * from t where (select a from b) is null", + out: ":a is null", + }, { + // exists should not replace. + in: "select * from t where exists (select a from b)", + out: "exists (select a from b)", + }, { + in: "select * from t where a in ((select a from b), 1)", + out: "a in (:a, 1)", + }, { + in: "select * from t where a in (0, (select a from b), 1)", + out: "a in (0, :a, 1)", + }, { + in: "select * from t where (select a from b) + 1", + out: ":a + 1", + }, { + in: "select * from t where 1+(select a from b)", + out: "1 + :a", + }, { + in: "select * from t where -(select a from b)", + out: "-:a", + }, { + in: "select * from t where interval (select a from b) aa", + out: "interval :a aa", + }, { + in: "select * from t where (select a from b) collate utf8", + out: ":a collate utf8", + }, { + in: "select * from t where func((select a from b), 1)", + out: "func(:a, 1)", + }, { + in: "select * from t where func(1, (select a from b), 1)", + out: "func(1, :a, 1)", + }, { + in: "select * from t where group_concat((select a from b), 1 order by a)", + out: "group_concat(:a, 1 order by a asc)", + }, { + in: "select * from t where group_concat(1 order by (select a from b), a)", + out: "group_concat(1 order by :a asc, a asc)", + }, { + in: "select * from t where group_concat(1 order by a, (select a from b))", + out: "group_concat(1 order by a asc, :a asc)", + }, { + in: "select * from t where substr(a, (select a from b), b)", + out: "substr(a, :a, b)", + }, { + in: "select * from t where substr(a, b, (select a from b))", + out: "substr(a, b, :a)", + }, { + in: "select * from t where convert((select a from b), json)", + out: "convert(:a, json)", + }, { + in: "select * from t where convert((select a from b) using utf8)", + out: "convert(:a using utf8)", + }, { + in: "select * from t where match((select a from b), 1) against (a)", + out: "match(:a, 1) against (a)", + }, { + in: "select * from t where match(1, (select a from b), 1) against (a)", + out: "match(1, :a, 1) against (a)", + }, { + in: "select * from t where match(1, a, 1) against ((select a from b))", + out: "match(1, a, 1) against (:a)", + }, { + in: "select * from t where case (select a from b) when a then b when b then c else d end", + out: "case :a when a then b when b then c else d end", + }, { + in: "select * from t where case a when (select a from b) then b when b then c else d end", + out: "case a when :a then b when b then c else d end", + }, { + in: "select * from t where case a when b then (select a from b) when b then c else d end", + out: "case a when b then :a when b then c else d end", + }, { + in: "select * from t where case a when b then c when (select a from b) then c else d end", + out: "case a when b then c when :a then c else d end", + }, { + in: "select * from t where case a when b then c when d then c else (select a from b) end", + out: "case a when b then c when d then c else :a end", + }} + to := NewValArg([]byte(":a")) + for _, tcase := range tcases { + tree, err := Parse(tcase.in) + if err != nil { + t.Fatal(err) + } + var from *Subquery + _ = Walk(func(node SQLNode) (kontinue bool, err error) { + if sq, ok := node.(*Subquery); ok { + from = sq + return false, nil + } + return true, nil + }, tree) + if from == nil { + t.Fatalf("from is nil for %s", tcase.in) + } + expr := ReplaceExpr(tree.(*Select).Where.Expr, from, to) + got := String(expr) + if tcase.out != got { + t.Errorf("ReplaceExpr(%s): %s, want %s", tcase.in, got, tcase.out) + } + } +} + +func TestColNameEqual(t *testing.T) { + var c1, c2 *ColName + if c1.Equal(c2) { + t.Error("nil columns equal, want unequal") + } + c1 = &ColName{ + Name: NewColIdent("aa"), + } + c2 = &ColName{ + Name: NewColIdent("bb"), + } + if c1.Equal(c2) { + t.Error("columns equal, want unequal") + } + c2.Name = NewColIdent("aa") + if !c1.Equal(c2) { + t.Error("columns unequal, want equal") + } +} + +func TestColIdent(t *testing.T) { + str := NewColIdent("Ab") + if str.String() != "Ab" { + t.Errorf("String=%s, want Ab", str.String()) + } + if str.String() != "Ab" { + t.Errorf("Val=%s, want Ab", str.String()) + } + if str.Lowered() != "ab" { + t.Errorf("Val=%s, want ab", str.Lowered()) + } + if !str.Equal(NewColIdent("aB")) { + t.Error("str.Equal(NewColIdent(aB))=false, want true") + } + if !str.EqualString("ab") { + t.Error("str.EqualString(ab)=false, want true") + } + str = NewColIdent("") + if str.Lowered() != "" { + t.Errorf("Val=%s, want \"\"", str.Lowered()) + } +} + +func TestColIdentMarshal(t *testing.T) { + str := NewColIdent("Ab") + b, err := json.Marshal(str) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `"Ab"` + if got != want { + t.Errorf("json.Marshal()= %s, want %s", got, want) + } + var out ColIdent + if err := json.Unmarshal(b, &out); err != nil { + t.Errorf("Unmarshal err: %v, want nil", err) + } + if !reflect.DeepEqual(out, str) { + t.Errorf("Unmarshal: %v, want %v", out, str) + } +} + +func TestColIdentSize(t *testing.T) { + size := unsafe.Sizeof(NewColIdent("")) + want := 2*unsafe.Sizeof("") + 8 + if size != want { + t.Errorf("Size of ColIdent: %d, want 32", want) + } +} + +func TestTableIdentMarshal(t *testing.T) { + str := NewTableIdent("Ab") + b, err := json.Marshal(str) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `"Ab"` + if got != want { + t.Errorf("json.Marshal()= %s, want %s", got, want) + } + var out TableIdent + if err := json.Unmarshal(b, &out); err != nil { + t.Errorf("Unmarshal err: %v, want nil", err) + } + if !reflect.DeepEqual(out, str) { + t.Errorf("Unmarshal: %v, want %v", out, str) + } +} + +func TestHexDecode(t *testing.T) { + testcase := []struct { + in, out string + }{{ + in: "313233", + out: "123", + }, { + in: "ag", + out: "encoding/hex: invalid byte: U+0067 'g'", + }, { + in: "777", + out: "encoding/hex: odd length hex string", + }} + for _, tc := range testcase { + out, err := newHexVal(tc.in).HexDecode() + if err != nil { + if err.Error() != tc.out { + t.Errorf("Decode(%q): %v, want %s", tc.in, err, tc.out) + } + continue + } + if !bytes.Equal(out, []byte(tc.out)) { + t.Errorf("Decode(%q): %s, want %s", tc.in, out, tc.out) + } + } +} + +func TestCompliantName(t *testing.T) { + testcases := []struct { + in, out string + }{{ + in: "aa", + out: "aa", + }, { + in: "1a", + out: "_a", + }, { + in: "a1", + out: "a1", + }, { + in: "a.b", + out: "a_b", + }, { + in: ".ab", + out: "_ab", + }} + for _, tc := range testcases { + out := NewColIdent(tc.in).CompliantName() + if out != tc.out { + t.Errorf("ColIdent(%s).CompliantNamt: %s, want %s", tc.in, out, tc.out) + } + out = NewTableIdent(tc.in).CompliantName() + if out != tc.out { + t.Errorf("TableIdent(%s).CompliantNamt: %s, want %s", tc.in, out, tc.out) + } + } +} + +func TestColumns_FindColumn(t *testing.T) { + cols := Columns{NewColIdent("a"), NewColIdent("c"), NewColIdent("b"), NewColIdent("0")} + + testcases := []struct { + in string + out int + }{{ + in: "a", + out: 0, + }, { + in: "b", + out: 2, + }, + { + in: "0", + out: 3, + }, + { + in: "f", + out: -1, + }} + + for _, tc := range testcases { + val := cols.FindColumn(NewColIdent(tc.in)) + if val != tc.out { + t.Errorf("FindColumn(%s): %d, want %d", tc.in, val, tc.out) + } + } +} + +func TestSplitStatementToPieces(t *testing.T) { + testcases := []struct { + input string + output string + }{{ + input: "select * from table", + }, { + input: "select * from table1; select * from table2;", + output: "select * from table1; select * from table2", + }, { + input: "select * from /* comment ; */ table;", + output: "select * from /* comment ; */ table", + }, { + input: "select * from table where semi = ';';", + output: "select * from table where semi = ';'", + }, { + input: "select * from table1;--comment;\nselect * from table2;", + output: "select * from table1;--comment;\nselect * from table2", + }, { + input: "CREATE TABLE `total_data` (`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'id', " + + "`region` varchar(32) NOT NULL COMMENT 'region name, like zh; th; kepler'," + + "`data_size` bigint NOT NULL DEFAULT '0' COMMENT 'data size;'," + + "`createtime` datetime NOT NULL DEFAULT NOW() COMMENT 'create time;'," + + "`comment` varchar(100) NOT NULL DEFAULT '' COMMENT 'comment'," + + "PRIMARY KEY (`id`))", + }} + + for _, tcase := range testcases { + if tcase.output == "" { + tcase.output = tcase.input + } + + stmtPieces, err := SplitStatementToPieces(tcase.input) + if err != nil { + t.Errorf("input: %s, err: %v", tcase.input, err) + continue + } + + out := strings.Join(stmtPieces, ";") + if out != tcase.output { + t.Errorf("out: %s, want %s", out, tcase.output) + } + } +} + +func TestTypeConversion(t *testing.T) { + ct1 := &ColumnType{Type: "BIGINT"} + ct2 := &ColumnType{Type: "bigint"} + assert.Equal(t, ct1.SQLType(), ct2.SQLType()) +} + +func TestDefaultStatus(t *testing.T) { + assert.Equal(t, + String(&Default{ColName: "status"}), + "default(`status`)") +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/comments.go b/internal/stackql-parser-fork/go/vt/sqlparser/comments.go new file mode 100644 index 00000000..1d2787fa --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/comments.go @@ -0,0 +1,323 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strconv" + "strings" + "unicode" +) + +const ( + // DirectiveMultiShardAutocommit is the query comment directive to allow + // single round trip autocommit with a multi-shard statement. + DirectiveMultiShardAutocommit = "MULTI_SHARD_AUTOCOMMIT" + // DirectiveSkipQueryPlanCache skips query plan cache when set. + DirectiveSkipQueryPlanCache = "SKIP_QUERY_PLAN_CACHE" + // DirectiveQueryTimeout sets a query timeout in vtgate. Only supported for SELECTS. + DirectiveQueryTimeout = "QUERY_TIMEOUT_MS" + // DirectiveScatterErrorsAsWarnings enables partial success scatter select queries + DirectiveScatterErrorsAsWarnings = "SCATTER_ERRORS_AS_WARNINGS" + // DirectiveIgnoreMaxPayloadSize skips payload size validation when set. + DirectiveIgnoreMaxPayloadSize = "IGNORE_MAX_PAYLOAD_SIZE" +) + +func isNonSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +// leadingCommentEnd returns the first index after all leading comments, or +// 0 if there are no leading comments. +func leadingCommentEnd(text string) (end int) { + hasComment := false + pos := 0 + for pos < len(text) { + // Eat up any whitespace. Trailing whitespace will be considered part of + // the leading comments. + nextVisibleOffset := strings.IndexFunc(text[pos:], isNonSpace) + if nextVisibleOffset < 0 { + break + } + pos += nextVisibleOffset + remainingText := text[pos:] + + // Found visible characters. Look for '/*' at the beginning + // and '*/' somewhere after that. + if len(remainingText) < 4 || remainingText[:2] != "/*" || remainingText[2] == '!' { + break + } + commentLength := 4 + strings.Index(remainingText[2:], "*/") + if commentLength < 4 { + // Missing end comment :/ + break + } + + hasComment = true + pos += commentLength + } + + if hasComment { + return pos + } + return 0 +} + +// trailingCommentStart returns the first index of trailing comments. +// If there are no trailing comments, returns the length of the input string. +func trailingCommentStart(text string) (start int) { + hasComment := false + reducedLen := len(text) + for reducedLen > 0 { + // Eat up any whitespace. Leading whitespace will be considered part of + // the trailing comments. + nextReducedLen := strings.LastIndexFunc(text[:reducedLen], isNonSpace) + 1 + if nextReducedLen == 0 { + break + } + reducedLen = nextReducedLen + if reducedLen < 4 || text[reducedLen-2:reducedLen] != "*/" { + break + } + + // Find the beginning of the comment + startCommentPos := strings.LastIndex(text[:reducedLen-2], "/*") + if startCommentPos < 0 || text[startCommentPos+2] == '!' { + // Badly formatted sql, or a special /*! comment + break + } + + hasComment = true + reducedLen = startCommentPos + } + + if hasComment { + return reducedLen + } + return len(text) +} + +// MarginComments holds the leading and trailing comments that surround a query. +type MarginComments struct { + Leading string + Trailing string +} + +// SplitMarginComments pulls out any leading or trailing comments from a raw sql query. +// This function also trims leading (if there's a comment) and trailing whitespace. +func SplitMarginComments(sql string) (query string, comments MarginComments) { + trailingStart := trailingCommentStart(sql) + leadingEnd := leadingCommentEnd(sql[:trailingStart]) + comments = MarginComments{ + Leading: strings.TrimLeftFunc(sql[:leadingEnd], unicode.IsSpace), + Trailing: strings.TrimRightFunc(sql[trailingStart:], unicode.IsSpace), + } + return strings.TrimFunc(sql[leadingEnd:trailingStart], func(c rune) bool { + return unicode.IsSpace(c) || c == ';' + }), comments +} + +// StripLeadingComments trims the SQL string and removes any leading comments +func StripLeadingComments(sql string) string { + sql = strings.TrimFunc(sql, unicode.IsSpace) + + for hasCommentPrefix(sql) { + switch sql[0] { + case '/': + // Multi line comment + index := strings.Index(sql, "*/") + if index <= 1 { + return sql + } + // don't strip /*! ... */ or /*!50700 ... */ + if len(sql) > 2 && sql[2] == '!' { + return sql + } + sql = sql[index+2:] + case '-': + // Single line comment + index := strings.Index(sql, "\n") + if index == -1 { + return "" + } + sql = sql[index+1:] + } + + sql = strings.TrimFunc(sql, unicode.IsSpace) + } + + return sql +} + +func hasCommentPrefix(sql string) bool { + return len(sql) > 1 && ((sql[0] == '/' && sql[1] == '*') || (sql[0] == '-' && sql[1] == '-')) +} + +// ExtractMysqlComment extracts the version and SQL from a comment-only query +// such as /*!50708 sql here */ +func ExtractMysqlComment(sql string) (string, string) { + sql = sql[3 : len(sql)-2] + + digitCount := 0 + endOfVersionIndex := strings.IndexFunc(sql, func(c rune) bool { + digitCount++ + return !unicode.IsDigit(c) || digitCount == 6 + }) + if endOfVersionIndex < 0 { + return "", "" + } + version := sql[0:endOfVersionIndex] + innerSQL := strings.TrimFunc(sql[endOfVersionIndex:], unicode.IsSpace) + + return version, innerSQL +} + +const commentDirectivePreamble = "/*+" + +// CommentDirectives is the parsed representation for execution directives +// conveyed in query comments +type CommentDirectives map[string]interface{} + +// ExtractCommentDirectives parses the comment list for any execution directives +// of the form: +// +// /*vt+ OPTION_ONE=1 OPTION_TWO OPTION_THREE=abcd */ +// +// It returns the map of the directive values or nil if there aren't any. +func ExtractCommentDirectives(comments Comments) CommentDirectives { + if comments == nil { + return nil + } + + var vals map[string]interface{} + + for _, comment := range comments { + commentStr := string(comment) + if commentStr[0:3] != commentDirectivePreamble { + continue + } + + if vals == nil { + vals = make(map[string]interface{}) + } + + // Split on whitespace and ignore the first and last directive + // since they contain the comment start/end + directives := strings.Fields(commentStr) + for i := 1; i < len(directives)-1; i++ { + directive := directives[i] + sep := strings.IndexByte(directive, '=') + + // No value is equivalent to a true boolean + if sep == -1 { + vals[directive] = true + continue + } + + strVal := directive[sep+1:] + directive = directive[:sep] + + intVal, err := strconv.Atoi(strVal) + if err == nil { + vals[directive] = intVal + continue + } + + boolVal, err := strconv.ParseBool(strVal) + if err == nil { + vals[directive] = boolVal + continue + } + + vals[directive] = strVal + } + } + return vals +} + +// IsSet checks the directive map for the named directive and returns +// true if the directive is set and has a true/false or 0/1 value +func (d CommentDirectives) IsSet(key string) bool { + if d == nil { + return false + } + + val, ok := d[key] + if !ok { + return false + } + + boolVal, ok := val.(bool) + if ok { + return boolVal + } + + intVal, ok := val.(int) + if ok { + return intVal == 1 + } + return false +} + +// SkipQueryPlanCacheDirective returns true if skip query plan cache directive is set to true in query. +func SkipQueryPlanCacheDirective(stmt Statement) bool { + switch stmt := stmt.(type) { + case *Select: + directives := ExtractCommentDirectives(stmt.Comments) + if directives.IsSet(DirectiveSkipQueryPlanCache) { + return true + } + case *Insert: + directives := ExtractCommentDirectives(stmt.Comments) + if directives.IsSet(DirectiveSkipQueryPlanCache) { + return true + } + case *Update: + directives := ExtractCommentDirectives(stmt.Comments) + if directives.IsSet(DirectiveSkipQueryPlanCache) { + return true + } + case *Delete: + directives := ExtractCommentDirectives(stmt.Comments) + if directives.IsSet(DirectiveSkipQueryPlanCache) { + return true + } + default: + return false + } + return false +} + +// IgnoreMaxPayloadSizeDirective returns true if the max payload size override +// directive is set to true. +func IgnoreMaxPayloadSizeDirective(stmt Statement) bool { + switch stmt := stmt.(type) { + case *Select: + directives := ExtractCommentDirectives(stmt.Comments) + return directives.IsSet(DirectiveIgnoreMaxPayloadSize) + case *Insert: + directives := ExtractCommentDirectives(stmt.Comments) + return directives.IsSet(DirectiveIgnoreMaxPayloadSize) + case *Update: + directives := ExtractCommentDirectives(stmt.Comments) + return directives.IsSet(DirectiveIgnoreMaxPayloadSize) + case *Delete: + directives := ExtractCommentDirectives(stmt.Comments) + return directives.IsSet(DirectiveIgnoreMaxPayloadSize) + default: + return false + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/comments_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/comments_test.go new file mode 100644 index 00000000..8ec2a0e1 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/comments_test.go @@ -0,0 +1,409 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSplitComments(t *testing.T) { + var testCases = []struct { + input, outSQL, outLeadingComments, outTrailingComments string + }{{ + input: "/", + outSQL: "/", + outLeadingComments: "", + outTrailingComments: "", + }, { + input: "*/", + outSQL: "*/", + outLeadingComments: "", + outTrailingComments: "", + }, { + input: "/*/", + outSQL: "/*/", + outLeadingComments: "", + outTrailingComments: "", + }, { + input: "a*/", + outSQL: "a*/", + outLeadingComments: "", + outTrailingComments: "", + }, { + input: "*a*/", + outSQL: "*a*/", + outLeadingComments: "", + outTrailingComments: "", + }, { + input: "**a*/", + outSQL: "**a*/", + outLeadingComments: "", + outTrailingComments: "", + }, { + input: "/*b**a*/", + outSQL: "", + outLeadingComments: "", + outTrailingComments: "/*b**a*/", + }, { + input: "/*a*/", + outSQL: "", + outLeadingComments: "", + outTrailingComments: "/*a*/", + }, { + input: "/**/", + outSQL: "", + outLeadingComments: "", + outTrailingComments: "/**/", + }, { + input: "/*b*/ /*a*/", + outSQL: "", + outLeadingComments: "", + outTrailingComments: "/*b*/ /*a*/", + }, { + input: "/* before */ foo /* bar */", + outSQL: "foo", + outLeadingComments: "/* before */ ", + outTrailingComments: " /* bar */", + }, { + input: "/* before1 */ /* before2 */ foo /* after1 */ /* after2 */", + outSQL: "foo", + outLeadingComments: "/* before1 */ /* before2 */ ", + outTrailingComments: " /* after1 */ /* after2 */", + }, { + input: "/** before */ foo /** bar */", + outSQL: "foo", + outLeadingComments: "/** before */ ", + outTrailingComments: " /** bar */", + }, { + input: "/*** before */ foo /*** bar */", + outSQL: "foo", + outLeadingComments: "/*** before */ ", + outTrailingComments: " /*** bar */", + }, { + input: "/** before **/ foo /** bar **/", + outSQL: "foo", + outLeadingComments: "/** before **/ ", + outTrailingComments: " /** bar **/", + }, { + input: "/*** before ***/ foo /*** bar ***/", + outSQL: "foo", + outLeadingComments: "/*** before ***/ ", + outTrailingComments: " /*** bar ***/", + }, { + input: " /*** before ***/ foo /*** bar ***/ ", + outSQL: "foo", + outLeadingComments: "/*** before ***/ ", + outTrailingComments: " /*** bar ***/", + }, { + input: "*** bar ***/", + outSQL: "*** bar ***/", + outLeadingComments: "", + outTrailingComments: "", + }, { + input: " foo ", + outSQL: "foo", + outLeadingComments: "", + outTrailingComments: "", + }, { + input: "select 1 from t where col = '*//*'", + outSQL: "select 1 from t where col = '*//*'", + outLeadingComments: "", + outTrailingComments: "", + }, { + input: "/*! select 1 */", + outSQL: "/*! select 1 */", + outLeadingComments: "", + outTrailingComments: "", + }} + for _, testCase := range testCases { + t.Run(testCase.input, func(t *testing.T) { + gotSQL, gotComments := SplitMarginComments(testCase.input) + gotLeadingComments, gotTrailingComments := gotComments.Leading, gotComments.Trailing + + if gotSQL != testCase.outSQL { + t.Errorf("test input: '%s', got SQL\n%+v, want\n%+v", testCase.input, gotSQL, testCase.outSQL) + } + if gotLeadingComments != testCase.outLeadingComments { + t.Errorf("test input: '%s', got LeadingComments\n%+v, want\n%+v", testCase.input, gotLeadingComments, testCase.outLeadingComments) + } + if gotTrailingComments != testCase.outTrailingComments { + t.Errorf("test input: '%s', got TrailingComments\n%+v, want\n%+v", testCase.input, gotTrailingComments, testCase.outTrailingComments) + } + }) + } +} + +func TestStripLeadingComments(t *testing.T) { + var testCases = []struct { + input, outSQL string + }{{ + input: "/", + outSQL: "/", + }, { + input: "*/", + outSQL: "*/", + }, { + input: "/*/", + outSQL: "/*/", + }, { + input: "/*a", + outSQL: "/*a", + }, { + input: "/*a*", + outSQL: "/*a*", + }, { + input: "/*a**", + outSQL: "/*a**", + }, { + input: "/*b**a*/", + outSQL: "", + }, { + input: "/*a*/", + outSQL: "", + }, { + input: "/**/", + outSQL: "", + }, { + input: "/*!*/", + outSQL: "/*!*/", + }, { + input: "/*!a*/", + outSQL: "/*!a*/", + }, { + input: "/*b*/ /*a*/", + outSQL: "", + }, { + input: `/*b*/ --foo +bar`, + outSQL: "bar", + }, { + input: "foo /* bar */", + outSQL: "foo /* bar */", + }, { + input: "/* foo */ bar", + outSQL: "bar", + }, { + input: "-- /* foo */ bar", + outSQL: "", + }, { + input: "foo -- bar */", + outSQL: "foo -- bar */", + }, { + input: `/* +foo */ bar`, + outSQL: "bar", + }, { + input: `-- foo bar +a`, + outSQL: "a", + }, { + input: `-- foo bar`, + outSQL: "", + }} + for _, testCase := range testCases { + gotSQL := StripLeadingComments(testCase.input) + + if gotSQL != testCase.outSQL { + t.Errorf("test input: '%s', got SQL\n%+v, want\n%+v", testCase.input, gotSQL, testCase.outSQL) + } + } +} + +func TestExtractMysqlComment(t *testing.T) { + var testCases = []struct { + input, outSQL, outVersion string + }{{ + input: "/*!50708SET max_execution_time=5000 */", + outSQL: "SET max_execution_time=5000", + outVersion: "50708", + }, { + input: "/*!50708 SET max_execution_time=5000*/", + outSQL: "SET max_execution_time=5000", + outVersion: "50708", + }, { + input: "/*!50708* from*/", + outSQL: "* from", + outVersion: "50708", + }, { + input: "/*! SET max_execution_time=5000*/", + outSQL: "SET max_execution_time=5000", + outVersion: "", + }} + for _, testCase := range testCases { + gotVersion, gotSQL := ExtractMysqlComment(testCase.input) + + if gotVersion != testCase.outVersion { + t.Errorf("test input: '%s', got version\n%+v, want\n%+v", testCase.input, gotVersion, testCase.outVersion) + } + if gotSQL != testCase.outSQL { + t.Errorf("test input: '%s', got SQL\n%+v, want\n%+v", testCase.input, gotSQL, testCase.outSQL) + } + } +} + +func TestExtractCommentDirectives(t *testing.T) { + var testCases = []struct { + input string + vals CommentDirectives + }{{ + input: "", + vals: nil, + }, { + input: "/* not a vt comment */", + vals: nil, + }, { + input: "/*vt+ */", + vals: CommentDirectives{}, + }, { + input: "/*vt+ SINGLE_OPTION */", + vals: CommentDirectives{ + "SINGLE_OPTION": true, + }, + }, { + input: "/*vt+ ONE_OPT TWO_OPT */", + vals: CommentDirectives{ + "ONE_OPT": true, + "TWO_OPT": true, + }, + }, { + input: "/*vt+ ONE_OPT */ /* other comment */ /*vt+ TWO_OPT */", + vals: CommentDirectives{ + "ONE_OPT": true, + "TWO_OPT": true, + }, + }, { + input: "/*vt+ ONE_OPT=abc TWO_OPT=def */", + vals: CommentDirectives{ + "ONE_OPT": "abc", + "TWO_OPT": "def", + }, + }, { + input: "/*vt+ ONE_OPT=true TWO_OPT=false */", + vals: CommentDirectives{ + "ONE_OPT": true, + "TWO_OPT": false, + }, + }, { + input: "/*vt+ ONE_OPT=true TWO_OPT=\"false\" */", + vals: CommentDirectives{ + "ONE_OPT": true, + "TWO_OPT": "\"false\"", + }, + }, { + input: "/*vt+ RANGE_OPT=[a:b] ANOTHER ANOTHER_WITH_VALEQ=val= AND_ONE_WITH_EQ== */", + vals: CommentDirectives{ + "RANGE_OPT": "[a:b]", + "ANOTHER": true, + "ANOTHER_WITH_VALEQ": "val=", + "AND_ONE_WITH_EQ": "=", + }, + }} + + for _, testCase := range testCases { + sql := "select " + testCase.input + " 1 from dual" + stmt, _ := Parse(sql) + comments := stmt.(*Select).Comments + vals := ExtractCommentDirectives(comments) + + if !reflect.DeepEqual(vals, testCase.vals) { + t.Errorf("test input: '%v', got vals:\n%+v, want\n%+v", testCase.input, vals, testCase.vals) + } + } + + d := CommentDirectives{ + "ONE_OPT": true, + "TWO_OPT": false, + "three": 1, + "four": 2, + "five": 0, + "six": "true", + } + + if !d.IsSet("ONE_OPT") { + t.Errorf("d.IsSet(ONE_OPT) should be true") + } + + if d.IsSet("TWO_OPT") { + t.Errorf("d.IsSet(TWO_OPT) should be false") + } + + if !d.IsSet("three") { + t.Errorf("d.IsSet(three) should be true") + } + + if d.IsSet("four") { + t.Errorf("d.IsSet(four) should be false") + } + + if d.IsSet("five") { + t.Errorf("d.IsSet(five) should be false") + } + + if d.IsSet("six") { + t.Errorf("d.IsSet(six) should be false") + } +} + +func TestSkipQueryPlanCacheDirective(t *testing.T) { + stmt, _ := Parse("insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)") + if !SkipQueryPlanCacheDirective(stmt) { + t.Errorf("d.SkipQueryPlanCacheDirective(stmt) should be true") + } + + stmt, _ = Parse("insert into user(id) values (1), (2)") + if SkipQueryPlanCacheDirective(stmt) { + t.Errorf("d.SkipQueryPlanCacheDirective(stmt) should be false") + } + + stmt, _ = Parse("update /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ users set name=1") + if !SkipQueryPlanCacheDirective(stmt) { + t.Errorf("d.SkipQueryPlanCacheDirective(stmt) should be true") + } + + stmt, _ = Parse("select /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ * from users") + if !SkipQueryPlanCacheDirective(stmt) { + t.Errorf("d.SkipQueryPlanCacheDirective(stmt) should be true") + } + + stmt, _ = Parse("delete /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ from users") + if !SkipQueryPlanCacheDirective(stmt) { + t.Errorf("d.SkipQueryPlanCacheDirective(stmt) should be true") + } +} + +func TestIgnoreMaxPayloadSizeDirective(t *testing.T) { + testCases := []struct { + query string + expected bool + }{ + {"insert /*vt+ IGNORE_MAX_PAYLOAD_SIZE=1 */ into user(id) values (1), (2)", true}, + {"insert into user(id) values (1), (2)", false}, + {"update /*vt+ IGNORE_MAX_PAYLOAD_SIZE=1 */ users set name=1", true}, + {"select /*vt+ IGNORE_MAX_PAYLOAD_SIZE=1 */ * from users", true}, + {"delete /*vt+ IGNORE_MAX_PAYLOAD_SIZE=1 */ from users", true}, + } + + for _, test := range testCases { + stmt, _ := Parse(test.query) + got := IgnoreMaxPayloadSizeDirective(stmt) + assert.Equalf(t, test.expected, got, fmt.Sprintf("d.IgnoreMaxPayloadSizeDirective(stmt) returned %v but expected %v", got, test.expected)) + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/constants.go b/internal/stackql-parser-fork/go/vt/sqlparser/constants.go new file mode 100644 index 00000000..fb18bfff --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/constants.go @@ -0,0 +1,204 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +const ( + // Select.Distinct + DistinctStr = "distinct " + StraightJoinHint = "straight_join " + SQLCalcFoundRowsStr = "sql_calc_found_rows " + + // Select.Lock + ForUpdateStr = " for update" + ShareModeStr = " lock in share mode" + + // Select.Cache + SQLCacheStr = "sql_cache " + SQLNoCacheStr = "sql_no_cache " + + // Union.Type + UnionStr = "union" + UnionAllStr = "union all" + UnionDistinctStr = "union distinct" + + // DDL strings. + InsertStr = "insert" + ReplaceStr = "replace" + UpdateStr = "update" + + // Set.Scope or Show.Scope + SessionStr = "session" + GlobalStr = "global" + VitessMetadataStr = "vitess_metadata" + VariableStr = "variable" + ImplicitStr = "" + + // DDL strings. + CreateStr = "create" + AlterStr = "alter" + DropStr = "drop" + RenameStr = "rename" + TruncateStr = "truncate" + FlushStr = "flush" + CreateVindexStr = "create vindex" + DropVindexStr = "drop vindex" + AddVschemaTableStr = "add vschema table" + DropVschemaTableStr = "drop vschema table" + AddColVindexStr = "on table add vindex" + DropColVindexStr = "on table drop vindex" + AddSequenceStr = "add sequence" + AddAutoIncStr = "add auto_increment" + + // Vindex DDL param to specify the owner of a vindex + VindexOwnerStr = "owner" + + // Partition strings + ReorganizeStr = "reorganize partition" + + // JoinTableExpr.Join + JoinStr = "join" + StraightJoinStr = "straight_join" + LeftJoinStr = "left join" + LeftOuterJoinStr = "left outer join" + RightJoinStr = "right join" + RightOuterJoinStr = "right outer join" + NaturalJoinStr = "natural join" + NaturalLeftJoinStr = "natural left join" + NaturalRightJoinStr = "natural right join" + + // Index hints. + UseStr = "use " + IgnoreStr = "ignore " + ForceStr = "force " + + // Table Modifiers + TempStr = "temp" + TemporaryStr = "temporary" + + // View Modifiers + MaterializedStr = "materialized" + + // Where.Type + WhereStr = "where" + HavingStr = "having" + + // ComparisonExpr.Operator + EqualStr = "=" + LessThanStr = "<" + GreaterThanStr = ">" + LessEqualStr = "<=" + GreaterEqualStr = ">=" + NotEqualStr = "!=" + NullSafeEqualStr = "<=>" + InStr = "in" + NotInStr = "not in" + LikeStr = "like" + NotLikeStr = "not like" + RegexpStr = "regexp" + NotRegexpStr = "not regexp" + JSONExtractOp = "->" + JSONUnquoteExtractOp = "->>" + + // RangeCond.Operator + BetweenStr = "between" + NotBetweenStr = "not between" + + // IsExpr.Operator + IsNullStr = "is null" + IsNotNullStr = "is not null" + IsTrueStr = "is true" + IsNotTrueStr = "is not true" + IsFalseStr = "is false" + IsNotFalseStr = "is not false" + + // BinaryExpr.Operator + BitAndStr = "&" + BitOrStr = "|" + BitXorStr = "^" + PlusStr = "+" + MinusStr = "-" + MultStr = "*" + DivStr = "/" + IntDivStr = "div" + ModStr = "%" + ShiftLeftStr = "<<" + ShiftRightStr = ">>" + + // UnaryExpr.Operator + UPlusStr = "+" + UMinusStr = "-" + TildaStr = "~" + BangStr = "!" + BinaryStr = "binary " + UBinaryStr = "_binary " + Utf8mb4Str = "_utf8mb4 " + Utf8Str = "_utf8 " + Latin1Str = "_latin1 " + + // this string is "character set" and this comment is required + CharacterSetStr = " character set" + CharsetStr = "charset" + + // MatchExpr.Option + BooleanModeStr = " in boolean mode" + NaturalLanguageModeStr = " in natural language mode" + NaturalLanguageModeWithQueryExpansionStr = " in natural language mode with query expansion" + QueryExpansionStr = " with query expansion" + + // Order.Direction + AscScr = "asc" + DescScr = "desc" + + // SetExpr.Expr, for SET TRANSACTION ... or START TRANSACTION + // TransactionStr is the Name for a SET TRANSACTION statement + TransactionStr = "transaction" + + // Transaction isolation levels + ReadUncommitted = "read uncommitted" + ReadCommitted = "read committed" + RepeatableRead = "repeatable read" + Serializable = "serializable" + + TxReadOnly = "read only" + TxReadWrite = "read write" + + // Explain formats + TreeStr = "tree" + JSONStr = "json" + VitessStr = "vitess" + TraditionalStr = "traditional" + AnalyzeStr = "analyze" + + // Auth constants + InteractiveStr = "interactive" + ServiceAccountStr = "serviceaccount" + + // Cardinality increase functions + JsonEachStr = "json_each" + JsonArrayElementsTextStr = "json_array_elements_text" + + // Window function frame unit types + RowsStr = "rows" + RangeStr = "range" + + // Window function frame point types + UnboundedPrecedingStr = "unbounded preceding" + UnboundedFollowingStr = "unbounded following" + CurrentRowStr = "current row" + PrecedingStr = "preceding" + FollowingStr = "following" +) diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/cte_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/cte_test.go new file mode 100644 index 00000000..cf313614 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/cte_test.go @@ -0,0 +1,51 @@ +package sqlparser + +import ( + "testing" +) + +func TestCTEs(t *testing.T) { + tests := []struct { + name string + sql string + valid bool + }{ + { + name: "simple CTE", + sql: "WITH cte AS (SELECT id FROM t) SELECT * FROM cte", + valid: true, + }, + { + name: "CTE with column list", + sql: "WITH cte (col1, col2) AS (SELECT id, name FROM t) SELECT * FROM cte", + valid: true, + }, + { + name: "multiple CTEs", + sql: "WITH cte1 AS (SELECT id FROM t1), cte2 AS (SELECT id FROM t2) SELECT * FROM cte1 JOIN cte2", + valid: true, + }, + { + name: "recursive CTE", + sql: "WITH RECURSIVE cte AS (SELECT 1 AS n UNION ALL SELECT n + 1 FROM cte WHERE n < 10) SELECT * FROM cte", + valid: true, + }, + { + name: "CTE with window function", + sql: "WITH sales AS (SELECT product, amount FROM orders) SELECT product, SUM(amount) OVER (ORDER BY product) FROM sales", + valid: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + _, err := Parse(tc.sql) + if tc.valid && err != nil { + t.Errorf("expected valid SQL but got error: %v", err) + } + if !tc.valid && err == nil { + t.Errorf("expected invalid SQL but got success") + } + }) + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/encodable.go b/internal/stackql-parser-fork/go/vt/sqlparser/encodable.go new file mode 100644 index 00000000..f9da17cf --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/encodable.go @@ -0,0 +1,99 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strings" + + "github.com/stackql/stackql-parser/go/sqltypes" +) + +// This file contains types that are 'Encodable'. + +// Encodable defines the interface for types that can +// be custom-encoded into SQL. +type Encodable interface { + EncodeSQL(buf *strings.Builder) +} + +// InsertValues is a custom SQL encoder for the values of +// an insert statement. +type InsertValues [][]sqltypes.Value + +// EncodeSQL performs the SQL encoding for InsertValues. +func (iv InsertValues) EncodeSQL(buf *strings.Builder) { + for i, rows := range iv { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteByte('(') + for j, bv := range rows { + if j != 0 { + buf.WriteString(", ") + } + bv.EncodeSQL(buf) + } + buf.WriteByte(')') + } +} + +// TupleEqualityList is for generating equality constraints +// for tables that have composite primary keys. +type TupleEqualityList struct { + Columns []ColIdent + Rows [][]sqltypes.Value +} + +// EncodeSQL generates the where clause constraints for the tuple +// equality. +func (tpl *TupleEqualityList) EncodeSQL(buf *strings.Builder) { + if len(tpl.Columns) == 1 { + tpl.encodeAsIn(buf) + return + } + tpl.encodeAsEquality(buf) +} + +func (tpl *TupleEqualityList) encodeAsIn(buf *strings.Builder) { + Append(buf, tpl.Columns[0]) + buf.WriteString(" in (") + for i, r := range tpl.Rows { + if i != 0 { + buf.WriteString(", ") + } + r[0].EncodeSQL(buf) + } + buf.WriteByte(')') +} + +func (tpl *TupleEqualityList) encodeAsEquality(buf *strings.Builder) { + for i, r := range tpl.Rows { + if i != 0 { + buf.WriteString(" or ") + } + buf.WriteString("(") + for j, c := range tpl.Columns { + if j != 0 { + buf.WriteString(" and ") + } + Append(buf, c) + buf.WriteString(" = ") + r[j].EncodeSQL(buf) + } + buf.WriteByte(')') + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/encodable_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/encodable_test.go new file mode 100644 index 00000000..045615b3 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/encodable_test.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strings" + "testing" + + "github.com/stackql/stackql-parser/go/sqltypes" +) + +func TestEncodable(t *testing.T) { + tcases := []struct { + in Encodable + out string + }{{ + in: InsertValues{{ + sqltypes.NewInt64(1), + sqltypes.NewVarBinary("foo('a')"), + }, { + sqltypes.NewInt64(2), + sqltypes.NewVarBinary("bar(`b`)"), + }}, + out: "(1, 'foo(\\'a\\')'), (2, 'bar(`b`)')", + }, { + // Single column. + in: &TupleEqualityList{ + Columns: []ColIdent{NewColIdent("pk")}, + Rows: [][]sqltypes.Value{ + {sqltypes.NewInt64(1)}, + {sqltypes.NewVarBinary("aa")}, + }, + }, + out: "pk in (1, 'aa')", + }, { + // Multiple columns. + in: &TupleEqualityList{ + Columns: []ColIdent{NewColIdent("pk1"), NewColIdent("pk2")}, + Rows: [][]sqltypes.Value{ + { + sqltypes.NewInt64(1), + sqltypes.NewVarBinary("aa"), + }, + { + sqltypes.NewInt64(2), + sqltypes.NewVarBinary("bb"), + }, + }, + }, + out: "(pk1 = 1 and pk2 = 'aa') or (pk1 = 2 and pk2 = 'bb')", + }} + for _, tcase := range tcases { + buf := new(strings.Builder) + tcase.in.EncodeSQL(buf) + if out := buf.String(); out != tcase.out { + t.Errorf("EncodeSQL(%v): %s, want %s", tcase.in, out, tcase.out) + } + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/expression_converter.go b/internal/stackql-parser-fork/go/vt/sqlparser/expression_converter.go new file mode 100644 index 00000000..4509243d --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/expression_converter.go @@ -0,0 +1,71 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "fmt" + + "github.com/stackql/stackql-parser/go/vt/vtgate/evalengine" +) + +var ExprNotSupported = fmt.Errorf("Expr Not Supported") + +// Convert converts between AST expressions and executable expressions +func Convert(e Expr) (evalengine.Expr, error) { + switch node := e.(type) { + case *SQLVal: + switch node.Type { + case IntVal: + return evalengine.NewLiteralInt(node.Val) + case FloatVal: + return evalengine.NewLiteralFloat(node.Val) + case ValArg: + return &evalengine.BindVariable{Key: string(node.Val[1:])}, nil + case StrVal: + return evalengine.NewLiteralString(node.Val) + } + case *BinaryExpr: + var op evalengine.BinaryExpr + switch node.Operator { + case PlusStr: + op = &evalengine.Addition{} + case MinusStr: + op = &evalengine.Subtraction{} + case MultStr: + op = &evalengine.Multiplication{} + case DivStr: + op = &evalengine.Division{} + default: + return nil, ExprNotSupported + } + left, err := Convert(node.Left) + if err != nil { + return nil, err + } + right, err := Convert(node.Right) + if err != nil { + return nil, err + } + return &evalengine.BinaryOp{ + Expr: op, + Left: left, + Right: right, + }, nil + + } + return nil, ExprNotSupported +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/expression_rewriting.go b/internal/stackql-parser-fork/go/vt/sqlparser/expression_rewriting.go new file mode 100644 index 00000000..3149dca9 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/expression_rewriting.go @@ -0,0 +1,220 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strings" + + "github.com/stackql/stackql-parser/go/vt/log" + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" + "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" + "github.com/stackql/stackql-parser/go/vt/vterrors" +) + +// PrepareAST will normalize the query +func PrepareAST(in Statement, bindVars map[string]*querypb.BindVariable, prefix string, parameterize bool) (*RewriteASTResult, error) { + if parameterize { + Normalize(in, bindVars, prefix) + } + return RewriteAST(in) +} + +// BindVarNeeds represents the bind vars that need to be provided as the result of expression rewriting. +type BindVarNeeds struct { + NeedLastInsertID bool + NeedDatabase bool + NeedFoundRows bool + NeedRowCount bool + NeedUserDefinedVariables []string +} + +// RewriteAST rewrites the whole AST, replacing function calls and adding column aliases to queries +func RewriteAST(in Statement) (*RewriteASTResult, error) { + er := newExpressionRewriter() + er.shouldRewriteDatabaseFunc = shouldRewriteDatabaseFunc(in) + setRewriter := &setNormalizer{} + out, ok := Rewrite(in, er.goingDown, setRewriter.rewriteSetComingUp).(Statement) + if !ok { + return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "statement rewriting returned a non statement: %s", String(out)) + } + if setRewriter.err != nil { + return nil, setRewriter.err + } + + r := &RewriteASTResult{ + AST: out, + } + for k := range er.bindVars { + switch k { + case LastInsertIDName: + r.NeedLastInsertID = true + case DBVarName: + r.NeedDatabase = true + case FoundRowsName: + r.NeedFoundRows = true + case RowCountName: + r.NeedRowCount = true + default: + r.NeedUserDefinedVariables = append(r.NeedUserDefinedVariables, k) + } + } + return r, nil +} + +func shouldRewriteDatabaseFunc(in Statement) bool { + selct, ok := in.(*Select) + if !ok { + return false + } + if len(selct.From) != 1 { + return false + } + aliasedTable, ok := selct.From[0].(*AliasedTableExpr) + if !ok { + return false + } + tableName, ok := aliasedTable.Expr.(TableName) + if !ok { + return false + } + return tableName.Name.String() == "dual" +} + +// RewriteASTResult contains the rewritten ast and meta information about it +type RewriteASTResult struct { + BindVarNeeds + AST Statement // The rewritten AST +} + +type expressionRewriter struct { + bindVars map[string]struct{} + shouldRewriteDatabaseFunc bool + err error +} + +func newExpressionRewriter() *expressionRewriter { + return &expressionRewriter{bindVars: make(map[string]struct{})} +} + +const ( + //LastInsertIDName is a reserved bind var name for last_insert_id() + LastInsertIDName = "__lastInsertId" + + //DBVarName is a reserved bind var name for database() + DBVarName = "__vtdbname" + + //FoundRowsName is a reserved bind var name for found_rows() + FoundRowsName = "__vtfrows" + + //RowCountName is a reserved bind var name for row_count() + RowCountName = "__vtrcount" + + //UserDefinedVariableName is what we prepend bind var names for user defined variables + UserDefinedVariableName = "__vtudv" +) + +func (er *expressionRewriter) goingDown(cursor *Cursor) bool { + switch node := cursor.Node().(type) { + // select last_insert_id() -> select :__lastInsertId as `last_insert_id()` + case *Select: + for _, col := range node.SelectExprs { + aliasedExpr, ok := col.(*AliasedExpr) + if ok && aliasedExpr.As.IsEmpty() { + buf := NewTrackedBuffer(nil) + aliasedExpr.Expr.Format(buf) + inner := newExpressionRewriter() + inner.shouldRewriteDatabaseFunc = er.shouldRewriteDatabaseFunc + tmp := Rewrite(aliasedExpr.Expr, inner.goingDown, nil) + newExpr, ok := tmp.(Expr) + if !ok { + log.Errorf("failed to rewrite AST. function expected to return Expr returned a %s", String(tmp)) + return false + } + aliasedExpr.Expr = newExpr + if inner.didAnythingChange() { + aliasedExpr.As = NewColIdent(buf.String()) + } + for k := range inner.bindVars { + er.needBindVarFor(k) + } + } + } + case *FuncExpr: + er.funcRewrite(cursor, node) + case *ColName: + if node.Name.at == SingleAt { + udv := strings.ToLower(node.Name.CompliantName()) + cursor.Replace(bindVarExpression(UserDefinedVariableName + udv)) + er.needBindVarFor(udv) + } + } + return true +} + +func (er *expressionRewriter) funcRewrite(cursor *Cursor, node *FuncExpr) { + switch { + // last_insert_id() -> :__lastInsertId + case node.Name.EqualString("last_insert_id"): + if len(node.Exprs) > 0 { //last_insert_id(x) + er.err = vterrors.New(vtrpc.Code_UNIMPLEMENTED, "Argument to LAST_INSERT_ID() not supported") + } else { + cursor.Replace(bindVarExpression(LastInsertIDName)) + er.needBindVarFor(LastInsertIDName) + } + // database() -> :__vtdbname + case er.shouldRewriteDatabaseFunc && + (node.Name.EqualString("database") || + node.Name.EqualString("schema")): + if len(node.Exprs) > 0 { + er.err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "Syntax error. %s() takes no arguments", node.Name.String()) + } else { + cursor.Replace(bindVarExpression(DBVarName)) + er.needBindVarFor(DBVarName) + } + // found_rows() -> :__vtfrows + case node.Name.EqualString("found_rows"): + if len(node.Exprs) > 0 { + er.err = vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "Arguments to FOUND_ROWS() not supported") + } else { + cursor.Replace(bindVarExpression(FoundRowsName)) + er.needBindVarFor(FoundRowsName) + } + // row_count() -> :__vtrcount + case node.Name.EqualString("row_count"): + if len(node.Exprs) > 0 { + er.err = vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "Arguments to ROW_COUNT() not supported") + } else { + cursor.Replace(bindVarExpression(RowCountName)) + er.needBindVarFor(RowCountName) + } + } +} + +// instead of creating new objects, we'll reuse this one +var token = struct{}{} + +func (er *expressionRewriter) needBindVarFor(name string) { + er.bindVars[name] = token +} + +func (er *expressionRewriter) didAnythingChange() bool { + return len(er.bindVars) > 0 +} + +func bindVarExpression(name string) *SQLVal { + return NewValArg([]byte(":" + name)) +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/expression_rewriting_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/expression_rewriting_test.go new file mode 100644 index 00000000..110c4883 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/expression_rewriting_test.go @@ -0,0 +1,145 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type myTestCase struct { + in, expected string + liid, db, foundRows, rowCount bool + udv int +} + +func TestRewrites(in *testing.T) { + tests := []myTestCase{ + { + in: "SELECT 42", + expected: "SELECT 42", + // no bindvar needs + }, + { + in: "SELECT last_insert_id()", + expected: "SELECT :__lastInsertId as `last_insert_id()`", + liid: true, + }, + { + in: "SELECT database()", + expected: "SELECT :__vtdbname as `database()`", + db: true, + }, + { + in: "SELECT database() from test", + expected: "SELECT database() from test", + // no bindvar needs + }, + { + in: "SELECT last_insert_id() as test", + expected: "SELECT :__lastInsertId as test", + liid: true, + }, + { + in: "SELECT last_insert_id() + database()", + expected: "SELECT :__lastInsertId + :__vtdbname as `last_insert_id() + database()`", + db: true, liid: true, + }, + { + in: "select (select database()) from test", + expected: "select (select database() from dual) from test", + // no bindvar needs + }, + { + in: "select (select database() from dual) from test", + expected: "select (select database() from dual) from test", + // no bindvar needs + }, + { + in: "select (select database() from dual) from dual", + expected: "select (select :__vtdbname as `database()` from dual) as `(select database() from dual)` from dual", + db: true, + }, + { + in: "select id from user where database()", + expected: "select id from user where database()", + // no bindvar needs + }, + { + in: "select table_name from information_schema.tables where table_schema = database()", + expected: "select table_name from information_schema.tables where table_schema = database()", + // no bindvar needs + }, + { + in: "select schema()", + expected: "select :__vtdbname as `schema()`", + db: true, + }, + { + in: "select found_rows()", + expected: "select :__vtfrows as `found_rows()`", + foundRows: true, + }, + { + in: "select @`x y`", + expected: "select :__vtudvx_y as `@``x y``` from dual", + udv: 1, + }, + { + in: "select id from t where id = @x and val = @y", + expected: "select id from t where id = :__vtudvx and val = :__vtudvy", + db: false, udv: 2, + }, + { + in: "insert into t(id) values(@xyx)", + expected: "insert into t(id) values(:__vtudvxyx)", + db: false, udv: 1, + }, + { + in: "select row_count()", + expected: "select :__vtrcount as `row_count()`", + rowCount: true, + }, + { + in: "SELECT lower(database())", + expected: "SELECT lower(:__vtdbname) as `lower(database())`", + db: true, + }, + } + + for _, tc := range tests { + in.Run(tc.in, func(t *testing.T) { + stmt, err := Parse(tc.in) + require.NoError(t, err) + + result, err := RewriteAST(stmt) + require.NoError(t, err) + + expected, err := Parse(tc.expected) + require.NoError(t, err, "test expectation does not parse [%s]", tc.expected) + + s := String(expected) + require.Equal(t, s, String(result.AST)) + require.Equal(t, tc.liid, result.NeedLastInsertID, "should need last insert id") + require.Equal(t, tc.db, result.NeedDatabase, "should need database name") + require.Equal(t, tc.foundRows, result.NeedFoundRows, "should need found rows") + require.Equal(t, tc.rowCount, result.NeedRowCount, "should need row count") + require.Equal(t, tc.udv, len(result.NeedUserDefinedVariables), "should need row count") + }) + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/expressions_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/expressions_test.go new file mode 100644 index 00000000..72b0e62e --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/expressions_test.go @@ -0,0 +1,102 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "testing" + + "github.com/stackql/stackql-parser/go/vt/vtgate/evalengine" + + "github.com/stackql/stackql-parser/go/sqltypes" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +/* +These tests should in theory live in the sqltypes package but they live here so we can +exercise both expression conversion and evaluation in the same test file +*/ + +func TestEvaluate(t *testing.T) { + type testCase struct { + expression string + expected sqltypes.Value + } + + tests := []testCase{{ + expression: "42", + expected: sqltypes.NewInt64(42), + }, { + expression: "42.42", + expected: sqltypes.NewFloat64(42.42), + }, { + expression: "40+2", + expected: sqltypes.NewInt64(42), + }, { + expression: "40-2", + expected: sqltypes.NewInt64(38), + }, { + expression: "40*2", + expected: sqltypes.NewInt64(80), + }, { + expression: "40/2", + expected: sqltypes.NewFloat64(20), + }, { + expression: ":exp", + expected: sqltypes.NewInt64(66), + }, { + expression: ":uint64_bind_variable", + expected: sqltypes.NewUint64(22), + }, { + expression: ":string_bind_variable", + expected: sqltypes.NewVarBinary("bar"), + }, { + expression: ":float_bind_variable", + expected: sqltypes.NewFloat64(2.2), + }} + + for _, test := range tests { + t.Run(test.expression, func(t *testing.T) { + // Given + stmt, err := Parse("select " + test.expression) + require.NoError(t, err) + astExpr := stmt.(*Select).SelectExprs[0].(*AliasedExpr).Expr + sqltypesExpr, err := Convert(astExpr) + require.Nil(t, err) + require.NotNil(t, sqltypesExpr) + env := evalengine.ExpressionEnv{ + BindVars: map[string]*querypb.BindVariable{ + "exp": sqltypes.Int64BindVariable(66), + "string_bind_variable": sqltypes.StringBindVariable("bar"), + "uint64_bind_variable": sqltypes.Uint64BindVariable(22), + "float_bind_variable": sqltypes.Float64BindVariable(2.2), + }, + Row: nil, + } + + // When + r, err := sqltypesExpr.Evaluate(env) + + // Then + require.NoError(t, err) + assert.Equal(t, test.expected, r.Value(), "expected %s", test.expected.String()) + }) + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/external_visitor.go b/internal/stackql-parser-fork/go/vt/sqlparser/external_visitor.go new file mode 100644 index 00000000..48ba6ee1 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/external_visitor.go @@ -0,0 +1,125 @@ +package sqlparser + +type SQLAstVisitor interface { + Visit(SQLNode) error +} + +func (node *AccessMode) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *AliasedExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *AliasedTableExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *AndExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Auth) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *AuthRevoke) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *AutoIncSpec) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Begin) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *BinaryExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node BoolVal) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *CaseExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node ColIdent) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ColName) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *CollateExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ColumnDefinition) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node Columns) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ColumnType) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node Comments) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Commit) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *CommonTableExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ComparisonExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ConstraintDefinition) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ConvertExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ConvertType) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ConvertUsingExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *CurTimeFuncExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *DBDDL) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *DDL) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Default) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Delete) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *DescribeTable) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Exec) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ExecSubquery) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ExistsExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Explain) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node Exprs) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ForeignKeyDefinition) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *FrameClause) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *FramePoint) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *FuncExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node GroupBy) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *GroupConcatExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *IndexDefinition) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *IndexHints) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *IndexInfo) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Insert) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *IntervalExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *IsExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *IsolationLevel) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node JoinCondition) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *JoinTableExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Limit) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node ListArg) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *MatchExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node Nextval) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *NotExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *NullVal) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *OptLike) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node OnDup) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node OrderBy) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *OrExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Order) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *OverClause) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *OtherAdmin) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *OtherRead) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ParenSelect) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ParenTableExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *PartitionDefinition) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node Partitions) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *PartitionSpec) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Purge) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *NativeQuery) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *RangeCond) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node ReferenceAction) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Registry) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *RefreshMaterializedView) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Release) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Rollback) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *SQLVal) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *SRollback) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Savepoint) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Select) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node SelectExprs) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Set) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *SetExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node SetExprs) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *SetTransaction) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Show) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ShowFilter) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Sleep) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *StarExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Stream) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Subquery) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *SubstrExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node TableExprs) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node TableIdent) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node TableName) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node TableNames) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *TableSpec) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *TableValuedFuncTableExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *TimestampFuncExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *UnaryCastConcatamerExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *UnaryExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Union) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *UnionSelect) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Update) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *UpdateExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node UpdateExprs) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Use) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node ValTuple) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node Values) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *ValuesFuncExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node VindexParam) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *VindexSpec) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *When) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *Where) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *WindowSpec) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *With) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } +func (node *XorExpr) Accept(vis SQLAstVisitor) error { return vis.Visit(node) } diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/fuzz.go b/internal/stackql-parser-fork/go/vt/sqlparser/fuzz.go new file mode 100644 index 00000000..35699150 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/fuzz.go @@ -0,0 +1,25 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +func Fuzz(data []byte) int { + _, err := Parse(string(data)) + if err != nil { + return 0 + } + return 1 +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/impossible_query.go b/internal/stackql-parser-fork/go/vt/sqlparser/impossible_query.go new file mode 100644 index 00000000..2437b551 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/impossible_query.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +// FormatImpossibleQuery creates an impossible query in a TrackedBuffer. +// An impossible query is a modified version of a query where all selects have where clauses that are +// impossible for mysql to resolve. This is used in the vtgate and vttablet: +// +// - In the vtgate it's used for joins: if the first query returns no result, then vtgate uses the impossible +// query just to fetch field info from vttablet +// - In the vttablet, it's just an optimization: the field info is fetched once form MySQL, cached and reused +// for subsequent queries +func FormatImpossibleQuery(buf *TrackedBuffer, node SQLNode) { + switch node := node.(type) { + case *Select: + buf.Myprintf("select %v from %v where 1 != 1", node.SelectExprs, node.From) + if node.GroupBy != nil { + node.GroupBy.Format(buf) + } + case *Union: + buf.astPrintf(node, "%v", node.FirstStatement) + for _, us := range node.UnionSelects { + buf.astPrintf(node, "%v", us) + } + default: + node.Format(buf) + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/like_filter.go b/internal/stackql-parser-fork/go/vt/sqlparser/like_filter.go new file mode 100644 index 00000000..b4219ca6 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/like_filter.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + re = regexp.MustCompile(`([^\\]?|[\\]{2})[%_]`) +) + +func replacer(s string) string { + if strings.HasPrefix(s, `\\`) { + return s[2:] + } + + result := strings.Replace(s, "%", ".*", -1) + result = strings.Replace(result, "_", ".", -1) + + return result +} + +// LikeToRegexp converts a like sql expression to regular expression +func LikeToRegexp(likeExpr string) *regexp.Regexp { + if likeExpr == "" { + return regexp.MustCompile("^.*$") // Can never fail + } + + keyPattern := regexp.QuoteMeta(likeExpr) + keyPattern = re.ReplaceAllStringFunc(keyPattern, replacer) + keyPattern = fmt.Sprintf("^%s$", keyPattern) + return regexp.MustCompile(keyPattern) // Can never fail +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/like_filter_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/like_filter_test.go new file mode 100644 index 00000000..fa7d3d46 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/like_filter_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEmptyLike(t *testing.T) { + want := "^.*$" + got := LikeToRegexp("").String() + + assert.Equal(t, want, got) +} + +func TestLikePrefixRegexp(t *testing.T) { + show, e := Parse("show vitess_metadata variables like 'key%'") + if e != nil { + t.Error(e) + } + + want := "^key.*$" + got := LikeToRegexp(show.(*Show).ShowTablesOpt.Filter.Like).String() + + assert.Equal(t, want, got) +} + +func TestLikeAnyCharsRegexp(t *testing.T) { + show, e := Parse("show vitess_metadata variables like '%val1%val2%'") + if e != nil { + t.Error(e) + } + + want := "^.*val1.*val2.*$" + got := LikeToRegexp(show.(*Show).ShowTablesOpt.Filter.Like).String() + + assert.Equal(t, want, got) +} + +func TestSingleAndMultipleCharsRegexp(t *testing.T) { + show, e := Parse("show vitess_metadata variables like '_val1_val2%'") + if e != nil { + t.Error(e) + } + + want := "^.val1.val2.*$" + got := LikeToRegexp(show.(*Show).ShowTablesOpt.Filter.Like).String() + + assert.Equal(t, want, got) +} + +func TestSpecialCharactersRegexp(t *testing.T) { + show, e := Parse("show vitess_metadata variables like '?.*?'") + if e != nil { + t.Error(e) + } + + want := "^\\?\\.\\*\\?$" + got := LikeToRegexp(show.(*Show).ShowTablesOpt.Filter.Like).String() + + assert.Equal(t, want, got) +} + +func TestQuoteLikeSpecialCharacters(t *testing.T) { + show, e := Parse(`show vitess_metadata variables like 'part1_part2\\%part3_part4\\_part5%'`) + if e != nil { + t.Error(e) + } + + want := "^part1.part2%part3.part4_part5.*$" + got := LikeToRegexp(show.(*Show).ShowTablesOpt.Filter.Like).String() + + assert.Equal(t, want, got) +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/normalizer.go b/internal/stackql-parser-fork/go/vt/sqlparser/normalizer.go new file mode 100644 index 00000000..d4d57787 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/normalizer.go @@ -0,0 +1,244 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strconv" + + "github.com/stackql/stackql-parser/go/sqltypes" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +// Normalize changes the statement to use bind values, and +// updates the bind vars to those values. The supplied prefix +// is used to generate the bind var names. The function ensures +// that there are no collisions with existing bind vars. +// Within Select constructs, bind vars are deduped. This allows +// us to identify vindex equality. Otherwise, every value is +// treated as distinct. +func Normalize(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) { + nz := newNormalizer(stmt, bindVars, prefix) + _ = Walk(nz.WalkStatement, stmt) +} + +type normalizer struct { + stmt Statement + bindVars map[string]*querypb.BindVariable + prefix string + reserved map[string]struct{} + counter int + vals map[string]string +} + +func newNormalizer(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) *normalizer { + return &normalizer{ + stmt: stmt, + bindVars: bindVars, + prefix: prefix, + reserved: GetBindvars(stmt), + counter: 1, + vals: make(map[string]string), + } +} + +// WalkStatement is the top level walk function. +// If it encounters a Select, it switches to a mode +// where variables are deduped. +func (nz *normalizer) WalkStatement(node SQLNode) (bool, error) { + switch node := node.(type) { + case *Select: + _ = Walk(nz.WalkSelect, node) + // Don't continue + return false, nil + case *SQLVal: + nz.convertSQLVal(node) + case *ComparisonExpr: + nz.convertComparison(node) + case *ColName, TableName: + // Common node types that never contain SQLVals or ListArgs but create a lot of object + // allocations. + return false, nil + case *ConvertType: // we should not rewrite the type description + return false, nil + } + return true, nil +} + +// WalkSelect normalizes the AST in Select mode. +func (nz *normalizer) WalkSelect(node SQLNode) (bool, error) { + switch node := node.(type) { + case *SQLVal: + nz.convertSQLValDedup(node) + case *ComparisonExpr: + nz.convertComparison(node) + case *ColName, TableName: + // Common node types that never contain SQLVals or ListArgs but create a lot of object + // allocations. + return false, nil + case OrderBy, GroupBy: + // do not make a bind var for order by column_position + return false, nil + case *ConvertType: + // we should not rewrite the type description + return false, nil + } + return true, nil +} + +func (nz *normalizer) convertSQLValDedup(node *SQLVal) { + // If value is too long, don't dedup. + // Such values are most likely not for vindexes. + // We save a lot of CPU because we avoid building + // the key for them. + if len(node.Val) > 256 { + nz.convertSQLVal(node) + return + } + + // Make the bindvar + bval := nz.sqlToBindvar(node) + if bval == nil { + return + } + + // Check if there's a bindvar for that value already. + var key string + if bval.Type == sqltypes.VarBinary { + // Prefixing strings with "'" ensures that a string + // and number that have the same representation don't + // collide. + key = "'" + string(node.Val) + } else { + key = string(node.Val) + } + bvname, ok := nz.vals[key] + if !ok { + // If there's no such bindvar, make a new one. + bvname = nz.newName() + nz.vals[key] = bvname + nz.bindVars[bvname] = bval + } + + // Modify the AST node to a bindvar. + node.Type = ValArg + node.Val = append([]byte(":"), bvname...) +} + +// convertSQLVal converts an SQLVal without the dedup. +func (nz *normalizer) convertSQLVal(node *SQLVal) { + bval := nz.sqlToBindvar(node) + if bval == nil { + return + } + + bvname := nz.newName() + nz.bindVars[bvname] = bval + + node.Type = ValArg + node.Val = append([]byte(":"), bvname...) +} + +// convertComparison attempts to convert IN clauses to +// use the list bind var construct. If it fails, it returns +// with no change made. The walk function will then continue +// and iterate on converting each individual value into separate +// bind vars. +func (nz *normalizer) convertComparison(node *ComparisonExpr) { + if node.Operator != InStr && node.Operator != NotInStr { + return + } + tupleVals, ok := node.Right.(ValTuple) + if !ok { + return + } + // The RHS is a tuple of values. + // Make a list bindvar. + bvals := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + } + for _, val := range tupleVals { + bval := nz.sqlToBindvar(val) + if bval == nil { + return + } + bvals.Values = append(bvals.Values, &querypb.Value{ + Type: bval.Type, + Value: bval.Value, + }) + } + bvname := nz.newName() + nz.bindVars[bvname] = bvals + // Modify RHS to be a list bindvar. + node.Right = ListArg(append([]byte("::"), bvname...)) +} + +func (nz *normalizer) sqlToBindvar(node SQLNode) *querypb.BindVariable { + if node, ok := node.(*SQLVal); ok { + var v sqltypes.Value + var err error + switch node.Type { + case StrVal: + v, err = sqltypes.NewValue(sqltypes.VarBinary, node.Val) + case IntVal: + v, err = sqltypes.NewValue(sqltypes.Int64, node.Val) + case FloatVal: + v, err = sqltypes.NewValue(sqltypes.Float64, node.Val) + default: + return nil + } + if err != nil { + return nil + } + return sqltypes.ValueBindVariable(v) + } + return nil +} + +func (nz *normalizer) newName() string { + for { + newName := nz.prefix + strconv.Itoa(nz.counter) + if _, ok := nz.reserved[newName]; !ok { + nz.reserved[newName] = struct{}{} + return newName + } + nz.counter++ + } +} + +// GetBindvars returns a map of the bind vars referenced in the statement. +// TODO(sougou); This function gets called again from vtgate/planbuilder. +// Ideally, this should be done only once. +func GetBindvars(stmt Statement) map[string]struct{} { + bindvars := make(map[string]struct{}) + _ = Walk(func(node SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *ColName, TableName: + // Common node types that never contain SQLVals or ListArgs but create a lot of object + // allocations. + return false, nil + case *SQLVal: + if node.Type == ValArg { + bindvars[string(node.Val[1:])] = struct{}{} + } + case ListArg: + bindvars[string(node[2:])] = struct{}{} + } + return true, nil + }, stmt) + return bindvars +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/normalizer_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/normalizer_test.go new file mode 100644 index 00000000..534cf56c --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/normalizer_test.go @@ -0,0 +1,257 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stackql/stackql-parser/go/sqltypes" + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +func TestNormalize(t *testing.T) { + prefix := "bv" + testcases := []struct { + in string + outstmt string + outbv map[string]*querypb.BindVariable + }{{ + // str val + in: "select * from t where v1 = 'aa'", + outstmt: "select * from t where v1 = :bv1", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.BytesBindVariable([]byte("aa")), + }, + }, { + // str val in select + in: "select 'aa' from t", + outstmt: "select :bv1 from t", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.BytesBindVariable([]byte("aa")), + }, + }, { + // int val + in: "select * from t where v1 = 1", + outstmt: "select * from t where v1 = :bv1", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.Int64BindVariable(1), + }, + }, { + // float val + in: "select * from t where v1 = 1.2", + outstmt: "select * from t where v1 = :bv1", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.Float64BindVariable(1.2), + }, + }, { + // multiple vals + in: "select * from t where v1 = 1.2 and v2 = 2", + outstmt: "select * from t where v1 = :bv1 and v2 = :bv2", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.Float64BindVariable(1.2), + "bv2": sqltypes.Int64BindVariable(2), + }, + }, { + // bv collision + in: "select * from t where v1 = :bv1 and v2 = 1", + outstmt: "select * from t where v1 = :bv1 and v2 = :bv2", + outbv: map[string]*querypb.BindVariable{ + "bv2": sqltypes.Int64BindVariable(1), + }, + }, { + // val reuse + in: "select * from t where v1 = 1 and v2 = 1", + outstmt: "select * from t where v1 = :bv1 and v2 = :bv1", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.Int64BindVariable(1), + }, + }, { + // ints and strings are different + in: "select * from t where v1 = 1 and v2 = '1'", + outstmt: "select * from t where v1 = :bv1 and v2 = :bv2", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.Int64BindVariable(1), + "bv2": sqltypes.BytesBindVariable([]byte("1")), + }, + }, { + // val should not be reused for non-select statements + in: "insert into a values(1, 1)", + outstmt: "insert into a values (:bv1, :bv2)", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.Int64BindVariable(1), + "bv2": sqltypes.Int64BindVariable(1), + }, + }, { + // val should be reused only in subqueries of DMLs + in: "update a set v1=(select 5 from t), v2=5, v3=(select 5 from t), v4=5", + outstmt: "update a set v1 = (select :bv1 from t), v2 = :bv2, v3 = (select :bv1 from t), v4 = :bv3", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.Int64BindVariable(5), + "bv2": sqltypes.Int64BindVariable(5), + "bv3": sqltypes.Int64BindVariable(5), + }, + }, { + // list vars should work for DMLs also + in: "update a set v1=5 where v2 in (1, 4, 5)", + outstmt: "update a set v1 = :bv1 where v2 in ::bv2", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.Int64BindVariable(5), + "bv2": sqltypes.TestBindVariable([]interface{}{1, 4, 5}), + }, + }, { + // Hex value does not convert + in: "select * from t where v1 = 0x1234", + outstmt: "select * from t where v1 = 0x1234", + outbv: map[string]*querypb.BindVariable{}, + }, { + // Hex value does not convert for DMLs + in: "update a set v1 = 0x1234", + outstmt: "update a set v1 = 0x1234", + outbv: map[string]*querypb.BindVariable{}, + }, { + // Bin value does not convert + in: "select * from t where v1 = b'11'", + outstmt: "select * from t where v1 = B'11'", + outbv: map[string]*querypb.BindVariable{}, + }, { + // Bin value does not convert for DMLs + in: "update a set v1 = b'11'", + outstmt: "update a set v1 = B'11'", + outbv: map[string]*querypb.BindVariable{}, + }, { + // ORDER BY column_position + in: "select a, b from t order by 1 asc", + outstmt: "select a, b from t order by 1 asc", + outbv: map[string]*querypb.BindVariable{}, + }, { + // ORDER BY variable + in: "select a, b from t order by c asc", + outstmt: "select a, b from t order by c asc", + outbv: map[string]*querypb.BindVariable{}, + }, { + // Values up to len 256 will reuse. + in: fmt.Sprintf("select * from t where v1 = '%256s' and v2 = '%256s'", "a", "a"), + outstmt: "select * from t where v1 = :bv1 and v2 = :bv1", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.BytesBindVariable([]byte(fmt.Sprintf("%256s", "a"))), + }, + }, { + // Values greater than len 256 will not reuse. + in: fmt.Sprintf("select * from t where v1 = '%257s' and v2 = '%257s'", "b", "b"), + outstmt: "select * from t where v1 = :bv1 and v2 = :bv2", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.BytesBindVariable([]byte(fmt.Sprintf("%257s", "b"))), + "bv2": sqltypes.BytesBindVariable([]byte(fmt.Sprintf("%257s", "b"))), + }, + }, { + // bad int + in: "select * from t where v1 = 12345678901234567890", + outstmt: "select * from t where v1 = 12345678901234567890", + outbv: map[string]*querypb.BindVariable{}, + }, { + // comparison with no vals + in: "select * from t where v1 = v2", + outstmt: "select * from t where v1 = v2", + outbv: map[string]*querypb.BindVariable{}, + }, { + // IN clause with existing bv + in: "select * from t where v1 in ::list", + outstmt: "select * from t where v1 in ::list", + outbv: map[string]*querypb.BindVariable{}, + }, { + // IN clause with non-val values + in: "select * from t where v1 in (1, a)", + outstmt: "select * from t where v1 in (:bv1, a)", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.Int64BindVariable(1), + }, + }, { + // IN clause with vals + in: "select * from t where v1 in (1, '2')", + outstmt: "select * from t where v1 in ::bv1", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.TestBindVariable([]interface{}{1, []byte("2")}), + }, + }, { + // NOT IN clause + in: "select * from t where v1 not in (1, '2')", + outstmt: "select * from t where v1 not in ::bv1", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.TestBindVariable([]interface{}{1, []byte("2")}), + }, + }, { + // Do not normalize cast/convert types + in: `select CAST("test" AS CHAR(60))`, + outstmt: `select convert(:bv1, CHAR(60)) from dual`, + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.StringBindVariable("test"), + }, + }} + for _, tc := range testcases { + stmt, err := Parse(tc.in) + if err != nil { + t.Error(err) + continue + } + bv := make(map[string]*querypb.BindVariable) + Normalize(stmt, bv, prefix) + outstmt := String(stmt) + if outstmt != tc.outstmt { + t.Errorf("Query:\n%s:\n%s, want\n%s", tc.in, outstmt, tc.outstmt) + } + if !reflect.DeepEqual(tc.outbv, bv) { + t.Errorf("Query:\n%s:\n%v, want\n%v", tc.in, bv, tc.outbv) + } + } +} + +func TestGetBindVars(t *testing.T) { + stmt, err := Parse("select * from t where :v1 = :v2 and :v2 = :v3 and :v4 in ::v5") + if err != nil { + t.Fatal(err) + } + got := GetBindvars(stmt) + want := map[string]struct{}{ + "v1": {}, + "v2": {}, + "v3": {}, + "v4": {}, + "v5": {}, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("GetBindVars: %v, want: %v", got, want) + } +} + +/* +Skipping ColName, TableName: +BenchmarkNormalize-8 1000000 2205 ns/op 821 B/op 27 allocs/op +Prior to skip: +BenchmarkNormalize-8 500000 3620 ns/op 1461 B/op 55 allocs/op +*/ +func BenchmarkNormalize(b *testing.B) { + sql := "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" + ast, err := Parse(sql) + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + Normalize(ast, map[string]*querypb.BindVariable{}, "") + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/parse_next_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/parse_next_test.go new file mode 100644 index 00000000..9f4e9c48 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/parse_next_test.go @@ -0,0 +1,217 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bytes" + "io" + "strings" + "testing" +) + +// TestParseNextValid concatenates all the valid SQL test cases and check it can read +// them as one long string. +func TestParseNextValid(t *testing.T) { + var sql bytes.Buffer + for _, tcase := range validSQL { + sql.WriteString(strings.TrimSuffix(tcase.input, ";")) + sql.WriteRune(';') + } + + tokens := NewTokenizer(&sql) + for i, tcase := range validSQL { + input := tcase.input + ";" + want := tcase.output + if want == "" { + want = tcase.input + } + + tree, err := ParseNext(tokens) + if err != nil { + t.Fatalf("[%d] ParseNext(%q) err: %q, want nil", i, input, err) + continue + } + + if got := String(tree); got != want { + t.Fatalf("[%d] ParseNext(%q) = %q, want %q", i, input, got, want) + } + } + + // Read once more and it should be EOF. + if tree, err := ParseNext(tokens); err != io.EOF { + t.Errorf("ParseNext(tokens) = (%q, %v) want io.EOF", String(tree), err) + } +} + +func TestIgnoreSpecialComments(t *testing.T) { + input := `SELECT 1;/*! ALTER TABLE foo DISABLE KEYS */;SELECT 2;` + + tokenizer := NewStringTokenizer(input) + tokenizer.SkipSpecialComments = true + one, err := ParseNextStrictDDL(tokenizer) + if err != nil { + t.Fatal(err) + } + two, err := ParseNextStrictDDL(tokenizer) + if err != nil { + t.Fatal(err) + } + if got, want := String(one), "select 1 from dual"; got != want { + t.Fatalf("got %s want %s", got, want) + } + if got, want := String(two), "select 2 from dual"; got != want { + t.Fatalf("got %s want %s", got, want) + } +} + +// TestParseNextErrors tests all the error cases, and ensures a valid +// SQL statement can be passed afterwards. +func TestParseNextErrors(t *testing.T) { + for _, tcase := range invalidSQL { + if tcase.excludeMulti { + // Skip tests which leave unclosed strings, or comments. + continue + } + + sql := tcase.input + "; select 1 from t" + tokens := NewStringTokenizer(sql) + + // The first statement should be an error + _, err := ParseNext(tokens) + if err == nil || err.Error() != tcase.output { + t.Fatalf("[0] ParseNext(%q) err: %q, want %q", sql, err, tcase.output) + continue + } + + // The second should be valid + tree, err := ParseNext(tokens) + if err != nil { + t.Fatalf("[1] ParseNext(%q) err: %q, want nil", sql, err) + continue + } + + want := "select 1 from t" + if got := String(tree); got != want { + t.Fatalf("[1] ParseNext(%q) = %q, want %q", sql, got, want) + } + + // Read once more and it should be EOF. + if tree, err := ParseNext(tokens); err != io.EOF { + t.Errorf("ParseNext(tokens) = (%q, %v) want io.EOF", String(tree), err) + } + } +} + +// TestParseNextEdgeCases tests various ParseNext edge cases. +func TestParseNextEdgeCases(t *testing.T) { + tests := []struct { + name string + input string + want []string + }{{ + name: "Trailing ;", + input: "select 1 from a; update a set b = 2;", + want: []string{"select 1 from a", "update a set b = 2"}, + }, { + name: "No trailing ;", + input: "select 1 from a; update a set b = 2", + want: []string{"select 1 from a", "update a set b = 2"}, + }, { + name: "Trailing whitespace", + input: "select 1 from a; update a set b = 2 ", + want: []string{"select 1 from a", "update a set b = 2"}, + }, { + name: "Trailing whitespace and ;", + input: "select 1 from a; update a set b = 2 ; ", + want: []string{"select 1 from a", "update a set b = 2"}, + }, { + name: "Handle SkipToEnd statements", + input: "set character set utf8; select 1 from a", + want: []string{"set charset 'utf8'", "select 1 from a"}, + }, { + name: "Semicolin inside a string", + input: "set character set ';'; select 1 from a", + want: []string{"set charset ';'", "select 1 from a"}, + }, { + name: "Partial DDL", + input: "create table a; select 1 from a", + want: []string{"create table a", "select 1 from a"}, + }, { + name: "Partial DDL", + input: "create table a ignore me this is garbage; select 1 from a", + want: []string{"create table a", "select 1 from a"}, + }} + + for _, test := range tests { + tokens := NewStringTokenizer(test.input) + + for i, want := range test.want { + tree, err := ParseNext(tokens) + if err != nil { + t.Fatalf("[%d] ParseNext(%q) err = %q, want nil", i, test.input, err) + continue + } + + if got := String(tree); got != want { + t.Fatalf("[%d] ParseNext(%q) = %q, want %q", i, test.input, got, want) + } + } + + // Read once more and it should be EOF. + if tree, err := ParseNext(tokens); err != io.EOF { + t.Errorf("ParseNext(%q) = (%q, %v) want io.EOF", test.input, String(tree), err) + } + + // And again, once more should be EOF. + if tree, err := ParseNext(tokens); err != io.EOF { + t.Errorf("ParseNext(%q) = (%q, %v) want io.EOF", test.input, String(tree), err) + } + } +} + +// TestParseNextEdgeCases tests various ParseNext edge cases. +func TestParseNextStrictNonStrict(t *testing.T) { + // This is one of the edge cases above. + input := "create table a ignore me this is garbage; select 1 from a" + want := []string{"create table a", "select 1 from a"} + + // First go through as expected with non-strict DDL parsing. + tokens := NewStringTokenizer(input) + for i, want := range want { + tree, err := ParseNext(tokens) + if err != nil { + t.Fatalf("[%d] ParseNext(%q) err = %q, want nil", i, input, err) + } + if got := String(tree); got != want { + t.Fatalf("[%d] ParseNext(%q) = %q, want %q", i, input, got, want) + } + } + + // Now try again with strict parsing and observe the expected error. + tokens = NewStringTokenizer(input) + _, err := ParseNextStrictDDL(tokens) + if err == nil || !strings.Contains(err.Error(), "ignore") { + t.Fatalf("ParseNext(%q) err = %q, want ignore", input, err) + } + tree, err := ParseNextStrictDDL(tokens) + if err != nil { + t.Fatalf("ParseNext(%q) err = %q, want nil", input, err) + } + if got := String(tree); got != want[1] { + t.Fatalf("ParseNext(%q) = %q, want %q", input, got, want) + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/parse_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/parse_test.go new file mode 100644 index 00000000..83a8a178 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/parse_test.go @@ -0,0 +1,2923 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bufio" + "bytes" + "fmt" + "math/rand" + "os" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +var ( + validSQL = []struct { + input string + output string + }{{ + input: "select 1", + output: "select 1 from dual", + }, { + input: "select 1 from t", + }, { + input: "select * from information_schema.columns", + output: "select * from information_schema.`columns`", + }, { + input: "select * from information_schema.processlist", + output: "select * from information_schema.`processlist`", + }, { + input: "select .1 from t", + }, { + input: "select 1.2e1 from t", + }, { + input: "select 1.2e+1 from t", + }, { + input: "select 1.2e-1 from t", + }, { + input: "select 08.3 from t", + }, { + input: "select -1 from t where b = -2", + }, { + input: "select - -1 from t", + output: "select 1 from t", + }, { + input: "select a from t", + }, { + input: "select $ from t", + }, { + input: "select a.b as a$b from $test$", + }, { + input: "select 1 from t // aa\n", + output: "select 1 from t", + }, { + input: "select 1 from t -- aa\n", + output: "select 1 from t", + }, { + input: "select 1 from t # aa\n", + output: "select 1 from t", + }, { + input: "select 1 --aa\nfrom t", + output: "select 1 from t", + }, { + input: "select 1 #aa\nfrom t", + output: "select 1 from t", + }, { + input: "select /* simplest */ 1 from t", + }, { + input: "select /* double star **/ 1 from t", + }, { + input: "select /* double */ /* comment */ 1 from t", + }, { + input: "select /* back-quote keyword */ `By` from t", + }, { + input: "select /* back-quote num */ `2a` from t", + }, { + input: "select /* back-quote . */ `a.b` from t", + }, { + input: "select /* back-quote back-quote */ `a``b` from t", + }, { + input: "select /* back-quote unnecessary */ 1 from `t`", + output: "select /* back-quote unnecessary */ 1 from t", + }, { + input: "select /* back-quote idnum */ 1 from `a1`", + output: "select /* back-quote idnum */ 1 from a1", + }, { + input: "select /* @ */ @@a from b", + }, { + input: "select /* \\0 */ '\\0' from a", + }, { + input: "select 1 /* drop this comment */ from t", + output: "select 1 from t", + }, { + input: "select /* union */ 1 from t union select 1 from t", + }, { + input: "select /* double union */ 1 from t union select 1 from t union select 1 from t", + }, { + input: "select /* union all */ 1 from t union all select 1 from t", + }, { + input: "select /* union distinct */ 1 from t union distinct select 1 from t", + }, { + input: "(select /* union parenthesized select */ 1 from t order by a) union select 1 from t", + output: "(select /* union parenthesized select */ 1 from t order by a asc) union select 1 from t", + }, { + input: "select /* union parenthesized select 2 */ 1 from t union (select 1 from t)", + }, { + input: "select /* union order by */ 1 from t union select 1 from t order by a", + output: "select /* union order by */ 1 from t union select 1 from t order by a asc", + }, { + input: "select /* union order by limit lock */ 1 from t union select 1 from t order by a limit 1 for update", + output: "select /* union order by limit lock */ 1 from t union select 1 from t order by a asc limit 1 for update", + }, { + input: "select /* union with limit on lhs */ 1 from t limit 1 union select 1 from t", + }, { + input: "(select id, a from t order by id limit 1) union (select id, b as a from s order by id limit 1) order by a limit 1", + output: "(select id, a from t order by id asc limit 1) union (select id, b as a from s order by id asc limit 1) order by a asc limit 1", + }, { + input: "select a from (select 1 as a from tbl1 union select 2 from tbl2) as t", + }, { + input: "select * from t1 join (select * from t2 union select * from t3) as t", + }, { + // Ensure this doesn't generate: ""select * from t1 join t2 on a = b join t3 on a = b". + input: "select * from t1 join t2 on a = b join t3", + }, { + input: "select * from t1 where col in (select 1 from dual union select 2 from dual)", + }, { + input: "select * from t1 where exists (select a from t2 union select b from t3)", + }, { + input: "select 1 from dual union select 2 from dual union all select 3 from dual union select 4 from dual union all select 5 from dual", + }, { + input: "(select 1 from dual) order by 1 asc limit 2", + }, { + input: "(select 1 from dual order by 1 desc) order by 1 asc limit 2", + }, { + input: "(select 1 from dual)", + }, { + input: "((select 1 from dual))", + }, { + input: "select 1 from (select 1 from dual) as t", + }, { + input: "select 1 from (select 1 from dual union select 2 from dual) as t", + }, { + input: "select 1 from ((select 1 from dual) union select 2 from dual) as t", + }, { + input: "select /* distinct */ distinct 1 from t", + }, { + input: "select /* straight_join */ straight_join 1 from t", + }, { + input: "select /* for update */ 1 from t for update", + }, { + input: "select /* lock in share mode */ 1 from t lock in share mode", + }, { + input: "select /* select list */ 1, 2 from t", + }, { + input: "select /* * */ * from t", + }, { + input: "select /* a.* */ a.* from t", + }, { + input: "select /* a.b.* */ a.b.* from t", + }, { + input: "select /* column alias */ a b from t", + output: "select /* column alias */ a as b from t", + }, { + input: "select /* column alias with as */ a as b from t", + }, { + input: "select /* keyword column alias */ a as `By` from t", + }, { + input: "select /* column alias as string */ a as \"b\" from t", + output: "select /* column alias as string */ a as b from t", + }, { + input: "select /* column alias as string without as */ a \"b\" from t", + output: "select /* column alias as string without as */ a as b from t", + }, { + input: "select /* a.* */ a.* from t", + }, { + input: "select next value for t", + output: "select next 1 values from t", + }, { + input: "select next value from t", + output: "select next 1 values from t", + }, { + input: "select next 10 values from t", + }, { + input: "select next :a values from t", + }, { + input: "select /* `By`.* */ `By`.* from t", + }, { + input: "select /* select with bool expr */ a = b from t", + }, { + input: "select /* case_when */ case when a = b then c end from t", + }, { + input: "select /* case_when_else */ case when a = b then c else d end from t", + }, { + input: "select /* case_when_when_else */ case when a = b then c when b = d then d else d end from t", + }, { + input: "select /* case */ case aa when a = b then c end from t", + }, { + input: "select /* parenthesis */ 1 from (t)", + }, { + input: "select /* parenthesis multi-table */ 1 from (t1, t2)", + }, { + input: "select /* table list */ 1 from t1, t2", + }, { + input: "select /* parenthessis in table list 1 */ 1 from (t1), t2", + }, { + input: "select /* parenthessis in table list 2 */ 1 from t1, (t2)", + }, { + input: "select /* use */ 1 from t1 use index (a) where b = 1", + }, { + input: "select /* use */ 1 from t1 use index () where b = 1", + }, { + input: "select /* keyword index */ 1 from t1 use index (`By`) where b = 1", + }, { + input: "select /* ignore */ 1 from t1 as t2 ignore index (a), t3 use index (b) where b = 1", + }, { + input: "select /* use */ 1 from t1 as t2 use index (a), t3 use index (b) where b = 1", + }, { + input: "select /* force */ 1 from t1 as t2 force index (a), t3 force index (b) where b = 1", + }, { + input: "select /* table alias */ 1 from t t1", + output: "select /* table alias */ 1 from t as t1", + }, { + input: "select /* table alias with as */ 1 from t as t1", + }, { + input: "select /* string table alias */ 1 from t as 't1'", + output: "select /* string table alias */ 1 from t as t1", + }, { + input: "select /* string table alias without as */ 1 from t 't1'", + output: "select /* string table alias without as */ 1 from t as t1", + }, { + input: "select /* keyword table alias */ 1 from t as `By`", + }, { + input: "select /* join */ 1 from t1 join t2", + }, { + input: "select /* join on */ 1 from t1 join t2 on a = b", + }, { + input: "select /* join on */ 1 from t1 join t2 using (a)", + }, { + input: "select /* inner join */ 1 from t1 inner join t2", + output: "select /* inner join */ 1 from t1 join t2", + }, { + input: "select /* cross join */ 1 from t1 cross join t2", + output: "select /* cross join */ 1 from t1 join t2", + }, { + input: "select /* straight_join */ 1 from t1 straight_join t2", + }, { + input: "select /* straight_join on */ 1 from t1 straight_join t2 on a = b", + }, { + input: "select /* left join */ 1 from t1 left join t2 on a = b", + }, { + input: "select /* left join */ 1 from t1 left join t2 using (a)", + }, { + input: "select /* left outer join */ 1 from t1 left outer join t2 on a = b", + output: "select /* left outer join */ 1 from t1 left join t2 on a = b", + }, { + input: "select /* left outer join */ 1 from t1 left outer join t2 using (a)", + output: "select /* left outer join */ 1 from t1 left join t2 using (a)", + }, { + input: "select /* right join */ 1 from t1 right join t2 on a = b", + }, { + input: "select /* right join */ 1 from t1 right join t2 using (a)", + }, { + input: "select /* right outer join */ 1 from t1 right outer join t2 on a = b", + output: "select /* right outer join */ 1 from t1 right join t2 on a = b", + }, { + input: "select /* right outer join */ 1 from t1 right outer join t2 using (a)", + output: "select /* right outer join */ 1 from t1 right join t2 using (a)", + }, { + input: "select /* natural join */ 1 from t1 natural join t2", + }, { + input: "select /* natural left join */ 1 from t1 natural left join t2", + }, { + input: "select /* natural left outer join */ 1 from t1 natural left join t2", + output: "select /* natural left outer join */ 1 from t1 natural left join t2", + }, { + input: "select /* natural right join */ 1 from t1 natural right join t2", + }, { + input: "select /* natural right outer join */ 1 from t1 natural right join t2", + output: "select /* natural right outer join */ 1 from t1 natural right join t2", + }, { + input: "select /* join on */ 1 from t1 join t2 on a = b", + }, { + input: "select /* join using */ 1 from t1 join t2 using (a)", + }, { + input: "select /* join using (a, b, c) */ 1 from t1 join t2 using (a, b, c)", + }, { + input: "select /* s.t */ 1 from s.t", + }, { + input: "select /* keyword schema & table name */ 1 from `By`.`bY`", + }, { + input: "select /* select in from */ 1 from (select 1 from t) as a", + }, { + input: "select /* select in from with no as */ 1 from (select 1 from t) a", + output: "select /* select in from with no as */ 1 from (select 1 from t) as a", + }, { + input: "select /* where */ 1 from t where a = b", + }, { + input: "select /* and */ 1 from t where a = b and a = c", + }, { + input: "select /* && */ 1 from t where a = b && a = c", + output: "select /* && */ 1 from t where a = b and a = c", + }, { + input: "select /* or */ 1 from t where a = b or a = c", + }, { + input: "select /* || */ 1 from t where a = b || a = c", + output: "select /* || */ 1 from t where a = b or a = c", + }, { + input: "select /* not */ 1 from t where not a = b", + }, { + input: "select /* ! */ 1 from t where a = !1", + }, { + input: "select /* bool is */ 1 from t where a = b is null", + }, { + input: "select /* bool is not */ 1 from t where a = b is not false", + }, { + input: "select /* true */ 1 from t where true", + }, { + input: "select /* false */ 1 from t where false", + }, { + input: "select /* false on left */ 1 from t where false = 0", + }, { + input: "select /* exists */ 1 from t where exists (select 1 from t)", + }, { + input: "select /* (boolean) */ 1 from t where not (a = b)", + output: "select /* (boolean) */ 1 from t where not a = b", + }, { + input: "select /* in value list */ 1 from t where a in (b, c)", + }, { + input: "select /* in select */ 1 from t where a in (select 1 from t)", + }, { + input: "select /* not in */ 1 from t where a not in (b, c)", + }, { + input: "select /* like */ 1 from t where a like b", + }, { + input: "select /* like escape */ 1 from t where a like b escape '!'", + }, { + input: "select /* not like */ 1 from t where a not like b", + }, { + input: "select /* not like escape */ 1 from t where a not like b escape '$'", + }, { + input: "select /* regexp */ 1 from t where a regexp b", + }, { + input: "select /* not regexp */ 1 from t where a not regexp b", + }, { + input: "select /* rlike */ 1 from t where a rlike b", + output: "select /* rlike */ 1 from t where a regexp b", + }, { + input: "select /* not rlike */ 1 from t where a not rlike b", + output: "select /* not rlike */ 1 from t where a not regexp b", + }, { + input: "select /* between */ 1 from t where a between b and c", + }, { + input: "select /* not between */ 1 from t where a not between b and c", + }, { + input: "select /* is null */ 1 from t where a is null", + }, { + input: "select /* is not null */ 1 from t where a is not null", + }, { + input: "select /* is true */ 1 from t where a is true", + }, { + input: "select /* is not true */ 1 from t where a is not true", + }, { + input: "select /* is false */ 1 from t where a is false", + }, { + input: "select /* is not false */ 1 from t where a is not false", + }, { + input: "select /* < */ 1 from t where a < b", + }, { + input: "select /* <= */ 1 from t where a <= b", + }, { + input: "select /* >= */ 1 from t where a >= b", + }, { + input: "select /* > */ 1 from t where a > b", + }, { + input: "select /* != */ 1 from t where a != b", + }, { + input: "select /* <> */ 1 from t where a <> b", + output: "select /* <> */ 1 from t where a != b", + }, { + input: "select /* <=> */ 1 from t where a <=> b", + }, { + input: "select /* != */ 1 from t where a != b", + }, { + input: "select /* single value expre list */ 1 from t where a in (b)", + }, { + input: "select /* select as a value expression */ 1 from t where a = (select a from t)", + }, { + input: "select /* parenthesised value */ 1 from t where a = (b)", + output: "select /* parenthesised value */ 1 from t where a = b", + }, { + input: "select /* over-parenthesize */ ((1)) from t where ((a)) in (((1))) and ((a, b)) in ((((1, 1))), ((2, 2)))", + output: "select /* over-parenthesize */ 1 from t where a in (1) and (a, b) in ((1, 1), (2, 2))", + }, { + input: "select /* dot-parenthesize */ (a.b) from t where (b.c) = 2", + output: "select /* dot-parenthesize */ a.b from t where b.c = 2", + }, { + input: "select /* & */ 1 from t where a = b & c", + }, { + input: "select /* & */ 1 from t where a = b & c", + }, { + input: "select /* | */ 1 from t where a = b | c", + }, { + input: "select /* ^ */ 1 from t where a = b ^ c", + }, { + input: "select /* + */ 1 from t where a = b + c", + }, { + input: "select /* - */ 1 from t where a = b - c", + }, { + input: "select /* * */ 1 from t where a = b * c", + }, { + input: "select /* / */ 1 from t where a = b / c", + }, { + input: "select /* % */ 1 from t where a = b % c", + }, { + input: "select /* div */ 1 from t where a = b div c", + }, { + input: "select /* MOD */ 1 from t where a = b MOD c", + output: "select /* MOD */ 1 from t where a = b % c", + }, { + input: "select /* << */ 1 from t where a = b << c", + }, { + input: "select /* >> */ 1 from t where a = b >> c", + }, { + input: "select /* % no space */ 1 from t where a = b%c", + output: "select /* % no space */ 1 from t where a = b % c", + }, { + input: "select /* u+ */ 1 from t where a = +b", + }, { + input: "select /* u- */ 1 from t where a = -b", + }, { + input: "select /* u~ */ 1 from t where a = ~b", + }, { + input: "select /* -> */ a.b -> 'ab' from t", + }, { + input: "select /* -> */ a.b ->> 'ab' from t", + }, { + input: "select /* empty function */ 1 from t where a = b()", + }, { + input: "select /* function with 1 param */ 1 from t where a = b(c)", + }, { + input: "select /* function with many params */ 1 from t where a = b(c, d)", + }, { + input: "select /* function with distinct */ count(distinct a) from t", + }, { + input: "select count(distinctrow(1)) from (select (1) from dual union all select 1 from dual) a", + output: "select count(distinct 1) from (select 1 from dual union all select 1 from dual) as a", + }, { + input: "select /* if as func */ 1 from t where a = if(b)", + }, { + input: "select /* current_timestamp */ current_timestamp() from t", + }, { + input: "select /* current_timestamp as func */ current_timestamp() from t", + }, { + input: "select /* current_timestamp with fsp */ current_timestamp(3) from t", + }, { + input: "select /* current_date */ current_date() from t", + }, { + input: "select /* current_date as func */ current_date() from t", + }, { + input: "select /* current_time */ current_time() from t", + }, { + input: "select /* current_time as func */ current_time() from t", + }, { + input: "select /* current_time with fsp */ current_time(1) from t", + }, { + input: "select /* utc_timestamp */ utc_timestamp() from t", + }, { + input: "select /* utc_timestamp as func */ utc_timestamp() from t", + }, { + input: "select /* utc_timestamp with fsp */ utc_timestamp(0) from t", + }, { + input: "select /* utc_time */ utc_time() from t", + }, { + input: "select /* utc_time as func */ utc_time() from t", + }, { + input: "select /* utc_time with fsp */ utc_time(4) from t", + }, { + input: "select /* utc_date */ utc_date() from t", + }, { + input: "select /* utc_date as func */ utc_date() from t", + }, { + input: "select /* localtime */ localtime() from t", + }, { + input: "select /* localtime as func */ localtime() from t", + }, { + input: "select /* localtime with fsp */ localtime(5) from t", + }, { + input: "select /* localtimestamp */ localtimestamp() from t", + }, { + input: "select /* localtimestamp as func */ localtimestamp() from t", + }, { + input: "select /* localtimestamp with fsp */ localtimestamp(7) from t", + }, { + input: "select /* mod as func */ a from tab where mod(b, 2) = 0", + }, { + input: "select /* database as func no param */ database() from t", + }, { + input: "select /* database as func 1 param */ database(1) from t", + }, { + input: "select /* a */ a from t", + }, { + input: "select /* a.b */ a.b from t", + }, { + input: "select /* a.b.c */ a.b.c from t", + }, { + input: "select /* keyword a.b */ `By`.`bY` from t", + }, { + input: "select /* string */ 'a' from t", + }, { + input: "select /* double quoted string */ \"a\" from t", + output: "select /* double quoted string */ 'a' from t", + }, { + input: "select /* quote quote in string */ 'a''a' from t", + output: "select /* quote quote in string */ 'a\\'a' from t", + }, { + input: "select /* double quote quote in string */ \"a\"\"a\" from t", + output: "select /* double quote quote in string */ 'a\\\"a' from t", + }, { + input: "select /* quote in double quoted string */ \"a'a\" from t", + output: "select /* quote in double quoted string */ 'a\\'a' from t", + }, { + input: "select /* backslash quote in string */ 'a\\'a' from t", + }, { + input: "select /* literal backslash in string */ 'a\\\\na' from t", + }, { + input: "select /* all escapes */ '\\0\\'\\\"\\b\\n\\r\\t\\Z\\\\' from t", + }, { + input: "select /* non-escape */ '\\x' from t", + output: "select /* non-escape */ 'x' from t", + }, { + input: "select /* unescaped backslash */ '\\n' from t", + }, { + input: "select /* value argument */ :a from t", + }, { + input: "select /* value argument with digit */ :a1 from t", + }, { + input: "select /* value argument with dot */ :a.b from t", + }, { + input: "select /* positional argument */ ? from t", + output: "select /* positional argument */ :v1 from t", + }, { + input: "select /* multiple positional arguments */ ?, ? from t", + output: "select /* multiple positional arguments */ :v1, :v2 from t", + }, { + input: "select /* list arg */ * from t where a in ::list", + }, { + input: "select /* list arg not in */ * from t where a not in ::list", + }, { + input: "select /* null */ null from t", + }, { + input: "select /* octal */ 010 from t", + }, { + input: "select /* hex */ x'f0A1' from t", + output: "select /* hex */ X'f0A1' from t", + }, { + input: "select /* hex caps */ X'F0a1' from t", + }, { + input: "select /* bit literal */ b'0101' from t", + output: "select /* bit literal */ B'0101' from t", + }, { + input: "select /* bit literal caps */ B'010011011010' from t", + }, { + input: "select /* 0x */ 0xf0 from t", + }, { + input: "select /* float */ 0.1 from t", + }, { + input: "select /* group by */ 1 from t group by a", + }, { + input: "select /* having */ 1 from t having a = b", + }, { + input: "select /* simple order by */ 1 from t order by a", + output: "select /* simple order by */ 1 from t order by a asc", + }, { + input: "select /* order by asc */ 1 from t order by a asc", + }, { + input: "select /* order by desc */ 1 from t order by a desc", + }, { + input: "select /* order by null */ 1 from t order by null", + }, { + input: "select /* limit a */ 1 from t limit a", + }, { + input: "select /* limit a,b */ 1 from t limit a, b", + }, { + input: "select /* binary unary */ a- -b from t", + output: "select /* binary unary */ a - -b from t", + }, { + input: "select /* - - */ - -b from t", + }, { + input: "select /* binary binary */ binary binary b from t", + }, { + input: "select /* binary ~ */ binary ~b from t", + }, { + input: "select /* ~ binary */ ~ binary b from t", + }, { + input: "select /* interval */ adddate('2008-01-02', interval 31 day) from t", + }, { + input: "select /* interval keyword */ adddate('2008-01-02', interval 1 year) from t", + }, { + input: "select /* TIMESTAMPADD */ TIMESTAMPADD(MINUTE, 1, '2008-01-04') from t", + output: "select /* TIMESTAMPADD */ timestampadd(MINUTE, 1, '2008-01-04') from t", + }, { + input: "select /* TIMESTAMPDIFF */ TIMESTAMPDIFF(MINUTE, '2008-01-02', '2008-01-04') from t", + output: "select /* TIMESTAMPDIFF */ timestampdiff(MINUTE, '2008-01-02', '2008-01-04') from t", + }, { + input: "select /* dual */ 1 from dual", + }, { + input: "select /* Dual */ 1 from Dual", + output: "select /* Dual */ 1 from dual", + }, { + input: "select /* DUAL */ 1 from Dual", + output: "select /* DUAL */ 1 from dual", + }, { + input: "select /* column as bool in where */ a from t where b", + }, { + input: "select /* OR of columns in where */ * from t where a or b", + }, { + input: "select /* OR of mixed columns in where */ * from t where a = 5 or b and c is not null", + }, { + input: "select /* OR in select columns */ (a or b) from t where c = 5", + output: "select /* OR in select columns */ a or b from t where c = 5", + }, { + input: "select /* XOR of columns in where */ * from t where a xor b", + }, { + input: "select /* XOR of mixed columns in where */ * from t where a = 5 xor b and c is not null", + }, { + input: "select /* XOR in select columns */ (a xor b) from t where c = 5", + output: "select /* XOR in select columns */ a xor b from t where c = 5", + }, { + input: "select /* XOR in select columns */ * from t where (1 xor c1 > 0)", + output: "select /* XOR in select columns */ * from t where 1 xor c1 > 0", + }, { + input: "select /* bool as select value */ a, true from t", + }, { + input: "select /* bool column in ON clause */ * from t join s on t.id = s.id and s.foo where t.bar", + }, { + input: "select /* bool in order by */ * from t order by a is null or b asc", + }, { + input: "select /* string in case statement */ if(max(case a when 'foo' then 1 else 0 end) = 1, 'foo', 'bar') as foobar from t", + }, { + input: "/*!show databases*/", + output: "show databases", + }, { + input: "select /*!40101 * from*/ t", + output: "select * from t", + }, { + input: "select /*! * from*/ t", + output: "select * from t", + }, { + input: "select /*!* from*/ t", + output: "select * from t", + }, { + input: "select /*!401011 from*/ t", + output: "select 1 from t", + }, { + input: "select /* dual */ 1 from dual", + }, { + input: "select * from (select 'tables') tables", + output: "select * from (select 'tables' from dual) as `tables`", + }, { + input: "insert /* simple */ into a values (1)", + }, { + input: "insert /* a.b */ into a.b values (1)", + }, { + input: "insert /* multi-value */ into a values (1, 2)", + }, { + input: "insert /* multi-value list */ into a values (1, 2), (3, 4)", + }, { + input: "insert /* no values */ into a values ()", + }, { + input: "insert /* set */ into a set a = 1, b = 2", + output: "insert /* set */ into a(a, b) values (1, 2)", + }, { + input: "insert /* set default */ into a set a = default, b = 2", + output: "insert /* set default */ into a(a, b) values (default, 2)", + }, { + input: "insert /* value expression list */ into a values (a + 1, 2 * 3)", + }, { + input: "insert /* default */ into a values (default, 2 * 3)", + }, { + input: "insert /* column list */ into a(a, b) values (1, 2)", + }, { + input: "insert into a(a, b) values (1, ifnull(null, default(b)))", + }, { + input: "insert /* qualified column list */ into a(a, b) values (1, 2)", + }, { + input: "insert /* qualified columns */ into t (t.a, t.b) values (1, 2)", + output: "insert /* qualified columns */ into t(a, b) values (1, 2)", + }, { + input: "insert /* select */ into a select b, c from d", + }, { + input: "insert /* it accepts columns with keyword action */ into a(action, b) values (1, 2)", + output: "insert /* it accepts columns with keyword action */ into a(`action`, b) values (1, 2)", + }, { + input: "insert /* no cols & paren select */ into a (select * from t)", + }, { + input: "insert /* cols & paren select */ into a(a, b, c) (select * from t)", + }, { + input: "insert /* cols & union with paren select */ into a(b, c) (select d, e from f) union (select g from h)", + }, { + input: "insert /* on duplicate */ into a values (1, 2) on duplicate key update b = func(a), c = d", + }, { + input: "insert /* bool in insert value */ into a values (1, true, false)", + }, { + input: "insert /* bool in on duplicate */ into a values (1, 2) on duplicate key update b = false, c = d", + }, { + input: "insert /* bool in on duplicate */ into a values (1, 2, 3) on duplicate key update b = values(b), c = d", + }, { + input: "insert /* bool in on duplicate */ into a values (1, 2, 3) on duplicate key update b = values(a.b), c = d", + }, { + input: "insert /* bool expression on duplicate */ into a values (1, 2) on duplicate key update b = func(a), c = a > d", + }, { + input: "insert into user(username, `status`) values ('Chuck', default(`status`))", + }, { + input: "update /* simple */ a set b = 3", + }, { + input: "update /* a.b */ a.b set b = 3", + }, { + input: "update /* list */ a set b = 3, c = 4", + }, { + input: "update /* expression */ a set b = 3 + 4", + }, { + input: "update /* where */ a set b = 3 where a = b", + }, { + input: "update /* order */ a set b = 3 order by c desc", + }, { + input: "update /* limit */ a set b = 3 limit c", + }, { + input: "update /* bool in update */ a set b = true", + }, { + input: "update /* bool expr in update */ a set b = 5 > 2", + }, { + input: "update /* bool in update where */ a set b = 5 where c", + }, { + input: "update /* table qualifier */ a set a.b = 3", + }, { + input: "update /* table qualifier */ a set t.a.b = 3", + }, { + input: "update /* table alias */ tt aa set aa.cc = 3", + output: "update /* table alias */ tt as aa set aa.cc = 3", + }, { + input: "update (select id from foo) subqalias set id = 4", + output: "update (select id from foo) as subqalias set id = 4", + }, { + input: "update foo f, bar b set f.id = b.id where b.name = 'test'", + output: "update foo as f, bar as b set f.id = b.id where b.name = 'test'", + }, { + input: "update foo f join bar b on f.name = b.name set f.id = b.id where b.name = 'test'", + output: "update foo as f join bar as b on f.name = b.name set f.id = b.id where b.name = 'test'", + }, { + input: "update /* ignore */ ignore a set b = 3", + }, { + input: "delete /* simple */ from a", + }, { + input: "delete /* a.b */ from a.b", + }, { + input: "delete /* where */ from a where a = b", + }, { + input: "delete /* order */ from a order by b desc", + }, { + input: "delete /* limit */ from a limit b", + }, { + input: "delete a from a join b on a.id = b.id where b.name = 'test'", + }, { + input: "delete a, b from a, b where a.id = b.id and b.name = 'test'", + }, { + input: "delete from a1, a2 using t1 as a1 inner join t2 as a2 where a1.id=a2.id", + output: "delete a1, a2 from t1 as a1 join t2 as a2 where a1.id = a2.id", + }, { + input: "set /* simple */ a = 3", + }, { + input: "set #simple\n b = 4", + }, { + input: "set character_set_results = utf8", + }, { + input: "set @@session.autocommit = true", + }, { + input: "set @@session.`autocommit` = true", + }, { + input: "set @@session.'autocommit' = true", + }, { + input: "set @@session.\"autocommit\" = true", + }, { + input: "set @@session.autocommit = ON", + output: "set @@session.autocommit = 'on'", + }, { + input: "set @@session.autocommit= OFF", + output: "set @@session.autocommit = 'off'", + }, { + input: "set autocommit = on", + output: "set autocommit = 'on'", + }, { + input: "set autocommit = off", + output: "set autocommit = 'off'", + }, { + input: "set names utf8 collate foo", + output: "set names 'utf8'", + }, { + input: "set names utf8 collate 'foo'", + output: "set names 'utf8'", + }, { + input: "set character set utf8", + output: "set charset 'utf8'", + }, { + input: "set character set 'utf8'", + output: "set charset 'utf8'", + }, { + input: "set character set \"utf8\"", + output: "set charset 'utf8'", + }, { + input: "set charset default", + output: "set charset default", + }, { + input: "set session wait_timeout = 3600", + output: "set session wait_timeout = 3600", + }, { + input: "set /* list */ a = 3, b = 4", + }, { + input: "set /* mixed list */ a = 3, names 'utf8', charset 'ascii', b = 4", + }, { + input: "set session transaction isolation level repeatable read", + }, { + input: "set transaction isolation level repeatable read", + }, { + input: "set global transaction isolation level repeatable read", + }, { + input: "set transaction isolation level repeatable read", + }, { + input: "set transaction isolation level read committed", + }, { + input: "set transaction isolation level read uncommitted", + }, { + input: "set transaction isolation level serializable", + }, { + input: "set transaction read write", + }, { + input: "set transaction read only", + }, { + input: "set tx_read_only = 1", + }, { + input: "set tx_read_only = 0", + }, { + input: "set transaction_read_only = 1", + }, { + input: "set transaction_read_only = 0", + }, { + input: "set tx_isolation = 'repeatable read'", + }, { + input: "set tx_isolation = 'read committed'", + }, { + input: "set tx_isolation = 'read uncommitted'", + }, { + input: "set tx_isolation = 'serializable'", + }, { + input: "set sql_safe_updates = 0", + }, { + input: "set sql_safe_updates = 1", + }, { + input: "set @variable = 42", + }, { + input: "set @period.variable = 42", + }, { + input: "alter ignore table a add foo", + output: "alter table a", + }, { + input: "alter table a add foo", + output: "alter table a", + }, { + input: "alter table a add spatial key foo (column1)", + output: "alter table a", + }, { + input: "alter table a add unique key foo (column1)", + output: "alter table a", + }, { + input: "alter table `By` add foo", + output: "alter table `By`", + }, { + input: "alter table a alter foo", + output: "alter table a", + }, { + input: "alter table a change foo", + output: "alter table a", + }, { + input: "alter table a modify foo", + output: "alter table a", + }, { + input: "alter table a drop foo", + output: "alter table a", + }, { + input: "alter table a disable foo", + output: "alter table a", + }, { + input: "alter table a enable foo", + output: "alter table a", + }, { + input: "alter table a order foo", + output: "alter table a", + }, { + input: "alter table a default foo", + output: "alter table a", + }, { + input: "alter table a discard foo", + output: "alter table a", + }, { + input: "alter table a import foo", + output: "alter table a", + }, { + input: "alter table a rename b", + output: "rename table a to b", + }, { + input: "alter table `By` rename `bY`", + output: "rename table `By` to `bY`", + }, { + input: "alter table a rename to b", + output: "rename table a to b", + }, { + input: "alter table a rename as b", + output: "rename table a to b", + }, { + input: "alter table a rename index foo to bar", + output: "alter table a", + }, { + input: "alter table a rename key foo to bar", + output: "alter table a", + }, { + input: "alter table e auto_increment = 20", + output: "alter table e", + }, { + input: "alter table e character set = 'ascii'", + output: "alter table e", + }, { + input: "alter table e default character set = 'ascii'", + output: "alter table e", + }, { + input: "alter table e comment = 'hello'", + output: "alter table e", + }, { + input: "alter table a reorganize partition b into (partition c values less than (?), partition d values less than (maxvalue))", + output: "alter table a reorganize partition b into (partition c values less than (:v1), partition d values less than (maxvalue))", + }, { + input: "alter table a partition by range (id) (partition p0 values less than (10), partition p1 values less than (maxvalue))", + output: "alter table a", + }, { + input: "alter table a add column id int", + output: "alter table a", + }, { + input: "alter table a add index idx (id)", + output: "alter table a", + }, { + input: "alter table a add fulltext index idx (id)", + output: "alter table a", + }, { + input: "alter table a add spatial index idx (id)", + output: "alter table a", + }, { + input: "alter table a add foreign key", + output: "alter table a", + }, { + input: "alter table a add primary key", + output: "alter table a", + }, { + input: "alter table a add constraint", + output: "alter table a", + }, { + input: "alter table a add id", + output: "alter table a", + }, { + input: "alter table a drop column id int", + output: "alter table a", + }, { + input: "alter table a drop partition p2712", + output: "alter table a", + }, { + input: "alter table a drop index idx (id)", + output: "alter table a", + }, { + input: "alter table a drop fulltext index idx (id)", + output: "alter table a", + }, { + input: "alter table a drop spatial index idx (id)", + output: "alter table a", + }, { + input: "alter table a add check ch_1", + output: "alter table a", + }, { + input: "alter table a drop check ch_1", + output: "alter table a", + }, { + input: "alter table a drop foreign key", + output: "alter table a", + }, { + input: "alter table a drop primary key", + output: "alter table a", + }, { + input: "alter table a drop constraint", + output: "alter table a", + }, { + input: "alter table a drop id", + output: "alter table a", + }, { + input: "alter database d default character set = charset", + output: "alter database d", + }, { + input: "alter database d character set = charset", + output: "alter database d", + }, { + input: "alter database d default collate = collation", + output: "alter database d", + }, { + input: "alter database d collate = collation", + output: "alter database d", + }, { + input: "alter schema d default character set = charset", + output: "alter database d", + }, { + input: "alter schema d character set = charset", + output: "alter database d", + }, { + input: "alter schema d default collate = collation", + output: "alter database d", + }, { + input: "alter schema d collate = collation", + output: "alter database d", + }, { + input: "create table a", + }, { + input: "create table a (\n\t`a` int\n)", + output: "create table a (\n\ta int\n)", + }, { + input: "create table `by` (\n\t`by` char\n)", + }, { + input: "create table if not exists a (\n\t`a` int\n)", + output: "create table a (\n\ta int\n)", + }, { + input: "create table a ignore me this is garbage", + output: "create table a", + }, { + input: "create table a (a int, b char, c garbage)", + output: "create table a", + }, { + input: "create table a (b1 bool not null primary key, b2 boolean not null)", + output: "create table a (\n\tb1 bool not null primary key,\n\tb2 boolean not null\n)", + }, { + input: "alter vschema create vindex hash_vdx using hash", + }, { + input: "alter vschema create vindex keyspace.hash_vdx using hash", + }, { + input: "alter vschema create vindex lookup_vdx using lookup with owner=user, table=name_user_idx, from=name, to=user_id", + }, { + input: "alter vschema create vindex xyz_vdx using xyz with param1=hello, param2='world', param3=123", + }, { + input: "alter vschema drop vindex hash_vdx", + }, { + input: "alter vschema drop vindex ks.hash_vdx", + }, { + input: "alter vschema add table a", + }, { + input: "alter vschema add table ks.a", + }, { + input: "alter vschema add sequence a_seq", + }, { + input: "alter vschema add sequence ks.a_seq", + }, { + input: "alter vschema on a add auto_increment id using a_seq", + }, { + input: "alter vschema on ks.a add auto_increment id using a_seq", + }, { + input: "alter vschema drop table a", + }, { + input: "alter vschema drop table ks.a", + }, { + input: "alter vschema on a add vindex hash (id)", + }, { + input: "alter vschema on ks.a add vindex hash (id)", + }, { + input: "alter vschema on a add vindex `hash` (`id`)", + output: "alter vschema on a add vindex hash (id)", + }, { + input: "alter vschema on `ks`.a add vindex `hash` (`id`)", + output: "alter vschema on ks.a add vindex hash (id)", + }, { + input: "alter vschema on a add vindex hash (id) using `hash`", + output: "alter vschema on a add vindex hash (id) using hash", + }, { + input: "alter vschema on a add vindex `add` (`add`)", + }, { + input: "alter vschema on a add vindex hash (id) using hash", + }, { + input: "alter vschema on a add vindex hash (id) using `hash`", + output: "alter vschema on a add vindex hash (id) using hash", + }, { + input: "alter vschema on user add vindex name_lookup_vdx (name) using lookup_hash with owner=user, table=name_user_idx, from=name, to=user_id", + }, { + input: "alter vschema on user2 add vindex name_lastname_lookup_vdx (name,lastname) using lookup with owner=`user`, table=`name_lastname_keyspace_id_map`, from=`name,lastname`, to=`keyspace_id`", + output: "alter vschema on user2 add vindex name_lastname_lookup_vdx (name, lastname) using lookup with owner=user, table=name_lastname_keyspace_id_map, from=name,lastname, to=keyspace_id", + }, { + input: "alter vschema on a drop vindex hash", + }, { + input: "alter vschema on ks.a drop vindex hash", + }, { + input: "alter vschema on a drop vindex `hash`", + output: "alter vschema on a drop vindex hash", + }, { + input: "alter vschema on a drop vindex hash", + output: "alter vschema on a drop vindex hash", + }, { + input: "alter vschema on a drop vindex `add`", + output: "alter vschema on a drop vindex `add`", + }, { + input: "create index a on b", + output: "alter table b", + }, { + input: "create unique index a on b", + output: "alter table b", + }, { + input: "create unique index a using foo on b", + output: "alter table b", + }, { + input: "create fulltext index a using foo on b", + output: "alter table b", + }, { + input: "create spatial index a using foo on b", + output: "alter table b", + }, { + input: "create view a", + output: "create table a", + }, { + input: "create or replace view a", + output: "create table a", + }, { + input: "alter view a", + output: "alter table a", + }, { + input: "rename table a to b", + output: "rename table a to b", + }, { + input: "rename table a to b, b to c", + output: "rename table a to b, b to c", + }, { + input: "drop view a", + output: "drop table a", + }, { + input: "drop table a", + output: "drop table a", + }, { + input: "drop table a, b", + output: "drop table a, b", + }, { + input: "drop table if exists a", + output: "drop table if exists a", + }, { + input: "drop view if exists a", + output: "drop table if exists a", + }, { + input: "drop index b on a", + output: "alter table a", + }, { + input: "analyze table a", + output: "otherread", + }, { + input: "flush tables", + output: "flush", + }, { + input: "flush tables with read lock", + output: "flush", + }, { + input: "show binary logs", + output: "show binary logs", + }, { + input: "show binlog events", + output: "show binlog", + }, { + input: "show character set", + output: "show charset", + }, { + input: "show character set like '%foo'", + output: "show charset like '%foo'", + }, { + input: "show charset", + output: "show charset", + }, { + input: "show charset like '%foo'", + output: "show charset like '%foo'", + }, { + input: "show charset where 'charset' = 'utf8'", + output: "show charset where 'charset' = 'utf8'", + }, { + input: "show charset where 'charset' = '%foo'", + output: "show charset where 'charset' = '%foo'", + }, { + input: "show collation", + output: "show collation", + }, { + input: "show collation where `Charset` = 'utf8' and `Collation` = 'utf8_bin'", + output: "show collation where `Charset` = 'utf8' and `Collation` = 'utf8_bin'", + }, { + input: "show create database d", + output: "show create database", + }, { + input: "show create event e", + output: "show create event", + }, { + input: "show create function f", + output: "show create function", + }, { + input: "show create procedure p", + output: "show create procedure", + }, { + input: "show create table t", + output: "show create table t", + }, { + input: "show create trigger t", + output: "show create trigger", + }, { + input: "show create user u", + output: "show create user", + }, { + input: "show create view v", + output: "show create view", + }, { + input: "show databases", + output: "show databases", + }, { + input: "show schemas", + output: "show schemas", + }, { + input: "show engine INNODB", + output: "show engine", + }, { + input: "show engines", + output: "show engines", + }, { + input: "show storage engines", + output: "show storage", + }, { + input: "show errors", + output: "show errors", + }, { + input: "show events", + output: "show events", + }, { + input: "show function code func", + output: "show function", + }, { + input: "show function status", + output: "show function", + }, { + input: "show grants for 'root@localhost'", + output: "show grants", + }, { + input: "show index from t", + }, { + input: "show indexes from t", + }, { + input: "show keys from t", + }, { + input: "show master status", + output: "show master", + }, { + input: "show open tables", + output: "show open", + }, { + input: "show plugins", + output: "show plugins", + }, { + input: "show privileges", + output: "show privileges", + }, { + input: "show procedure code p", + output: "show procedure", + }, { + input: "show procedure status", + output: "show procedure", + }, { + input: "show processlist", + output: "show processlist", + }, { + input: "show full processlist", + output: "show processlist", + }, { + input: "show profile cpu for query 1", + output: "show profile", + }, { + input: "show profiles", + output: "show profiles", + }, { + input: "show relaylog events", + output: "show relaylog", + }, { + input: "show slave hosts", + output: "show slave", + }, { + input: "show slave status", + output: "show slave", + }, { + input: "show status", + output: "show status", + }, { + input: "show global status", + output: "show global status", + }, { + input: "show session status", + output: "show session status", + }, { + input: "show table status", + output: "show table", + }, { + input: "show tables", + }, { + input: "show tables like '%keyspace%'", + }, { + input: "show tables where 1 = 0", + }, { + input: "show tables from a", + }, { + input: "show tables from a where 1 = 0", + }, { + input: "show tables from a like '%keyspace%'", + }, { + input: "show full tables", + }, { + input: "show full tables from a", + }, { + input: "show full tables in a", + output: "show full tables from a", + }, { + input: "show full tables from a like '%keyspace%'", + }, { + input: "show full tables from a where 1 = 0", + }, { + input: "show full tables like '%keyspace%'", + }, { + input: "show full tables where 1 = 0", + }, { + input: "show full columns from a like '%'", + }, { + input: "show full columns from messages from test_keyspace like '%'", + }, { + input: "show full fields from a like '%'", + }, { + input: "show fields from a like '%'", + }, { + input: "show triggers", + output: "show triggers", + }, { + input: "show variables", + output: "show variables", + }, { + input: "show global variables", + output: "show global variables", + }, { + input: "show session variables", + output: "show session variables", + }, { + input: "show vitess_keyspaces", + }, { + input: "show vitess_shards", + }, { + input: "show vitess_tablets", + }, { + input: "show vschema tables", + }, { + input: "show vschema vindexes", + }, { + input: "show vschema vindexes on t", + }, { + input: "show warnings", + output: "show warnings", + }, { + input: "select warnings from t", + output: "select `warnings` from t", + }, { + input: "show foobar", + output: "show foobar", + }, { + input: "show foobar like select * from table where syntax is 'ignored'", + output: "show foobar", + }, { + input: "use db", + output: "use db", + }, { + input: "use duplicate", + output: "use `duplicate`", + }, { + input: "use `ks:-80@master`", + output: "use `ks:-80@master`", + }, { + input: "use @replica", + output: "use `@replica`", + }, { + input: "use ks@replica", + output: "use `ks@replica`", + }, { + input: "describe select * from t", + output: "explain select * from t", + }, { + input: "desc select * from t", + output: "explain select * from t", + }, { + input: "desc foobar", + output: "otherread", + }, { + input: "explain t1", + output: "otherread", + }, { + input: "explain t1 col", + output: "otherread", + }, { + input: "explain select * from t", + }, { + input: "explain format = traditional select * from t", + }, { + input: "explain analyze select * from t", + }, { + input: "explain format = tree select * from t", + }, { + input: "explain format = json select * from t", + }, { + input: "explain format = vitess select * from t", + }, { + input: "describe format = vitess select * from t", + output: "explain format = vitess select * from t", + }, { + input: "explain delete from t", + }, { + input: "explain insert into t(col1, col2) values (1, 2)", + }, { + input: "explain update t set col = 2", + }, { + input: "truncate table foo", + output: "truncate table foo", + }, { + input: "truncate foo", + output: "truncate table foo", + }, { + input: "repair foo", + output: "otheradmin", + }, { + input: "optimize foo", + output: "otheradmin", + }, { + input: "lock tables foo", + output: "otheradmin", + }, { + input: "unlock tables foo", + output: "otheradmin", + }, { + input: "select /* EQ true */ 1 from t where a = true", + }, { + input: "select /* EQ false */ 1 from t where a = false", + }, { + input: "select /* NE true */ 1 from t where a != true", + }, { + input: "select /* NE false */ 1 from t where a != false", + }, { + input: "select /* LT true */ 1 from t where a < true", + }, { + input: "select /* LT false */ 1 from t where a < false", + }, { + input: "select /* GT true */ 1 from t where a > true", + }, { + input: "select /* GT false */ 1 from t where a > false", + }, { + input: "select /* LE true */ 1 from t where a <= true", + }, { + input: "select /* LE false */ 1 from t where a <= false", + }, { + input: "select /* GE true */ 1 from t where a >= true", + }, { + input: "select /* GE false */ 1 from t where a >= false", + }, { + input: "select * from t order by a collate utf8_general_ci", + output: "select * from t order by a collate utf8_general_ci asc", + }, { + input: "select k collate latin1_german2_ci as k1 from t1 order by k1 asc", + }, { + input: "select * from t group by a collate utf8_general_ci", + }, { + input: "select MAX(k collate latin1_german2_ci) from t1", + }, { + input: "select distinct k collate latin1_german2_ci from t1", + }, { + input: "select * from t1 where 'Müller' collate latin1_german2_ci = k", + }, { + input: "select * from t1 where k like 'Müller' collate latin1_german2_ci", + }, { + input: "select k from t1 group by k having k = 'Müller' collate latin1_german2_ci", + }, { + input: "select k from t1 join t2 order by a collate latin1_german2_ci asc, b collate latin1_german2_ci asc", + }, { + input: "select k collate 'latin1_german2_ci' as k1 from t1 order by k1 asc", + output: "select k collate latin1_german2_ci as k1 from t1 order by k1 asc", + }, { + input: "select /* drop trailing semicolon */ 1 from dual;", + output: "select /* drop trailing semicolon */ 1 from dual", + }, { + input: "select /* cache directive */ sql_no_cache 'foo' from t", + }, { + input: "select distinct sql_no_cache 'foo' from t", + }, { + input: "select sql_no_cache distinct 'foo' from t", + output: "select distinct sql_no_cache 'foo' from t", + }, { + input: "select sql_no_cache straight_join distinct 'foo' from t", + output: "select distinct sql_no_cache straight_join 'foo' from t", + }, { + input: "select straight_join distinct sql_no_cache 'foo' from t", + output: "select distinct sql_no_cache straight_join 'foo' from t", + }, { + input: "select sql_calc_found_rows 'foo' from t", + output: "select sql_calc_found_rows 'foo' from t", + }, { + input: "select binary 'a' = 'A' from t", + }, { + input: "select 1 from t where foo = _binary 'bar'", + }, { + input: "select 1 from t where foo = _utf8 'bar' and bar = _latin1 'sjösjuk'", + }, { + input: "select 1 from t where foo = _binary'bar'", + output: "select 1 from t where foo = _binary 'bar'", + }, { + input: "select 1 from t where foo = _utf8mb4 'bar'", + }, { + input: "select 1 from t where foo = _utf8mb4'bar'", + output: "select 1 from t where foo = _utf8mb4 'bar'", + }, { + input: "select match(a) against ('foo') from t", + }, { + input: "select match(a1, a2) against ('foo' in natural language mode with query expansion) from t", + }, { + input: "select database()", + output: "select database() from dual", + }, { + input: "select schema()", + output: "select schema() from dual", + }, { + input: "select title from video as v where match(v.title, v.tag) against ('DEMO' in boolean mode)", + }, { + input: "select name, group_concat(score) from t group by name", + }, { + input: "select name, group_concat(distinct id, score order by id desc separator ':') from t group by name", + }, { + input: "select name, group_concat(distinct id, score order by id desc separator ':' limit 1) from t group by name", + }, { + input: "select name, group_concat(distinct id, score order by id desc separator ':' limit 10, 2) from t group by name", + }, { + input: "select * from t partition (p0)", + }, { + input: "select * from t partition (p0, p1)", + }, { + input: "select e.id, s.city from employees as e join stores partition (p1) as s on e.store_id = s.id", + }, { + input: "select truncate(120.3333, 2) from dual", + }, { + input: "update t partition (p0) set a = 1", + }, { + input: "insert into t partition (p0) values (1, 'asdf')", + }, { + input: "insert into t1 select * from t2 partition (p0)", + }, { + input: "replace into t partition (p0) values (1, 'asdf')", + }, { + input: "delete from t partition (p0) where a = 1", + }, { + input: "stream * from t", + }, { + input: "stream /* comment */ * from t", + }, { + input: "begin", + }, { + input: "begin;", + output: "begin", + }, { + input: "start transaction", + output: "begin", + }, { + input: "commit", + }, { + input: "rollback", + }, { + input: "create database test_db", + }, { + input: "create schema test_db", + output: "create database test_db", + }, { + input: "create database if not exists test_db", + output: "create database test_db", + }, { + input: "drop database test_db", + }, { + input: "drop schema test_db", + output: "drop database test_db", + }, { + input: "drop database if exists test_db", + output: "drop database test_db", + }, { + input: "delete a.*, b.* from tbl_a a, tbl_b b where a.id = b.id and b.name = 'test'", + output: "delete a, b from tbl_a as a, tbl_b as b where a.id = b.id and b.name = 'test'", + }, { + input: "select distinctrow a.* from (select (1) from dual union all select 1 from dual) a", + output: "select distinct a.* from (select 1 from dual union all select 1 from dual) as a", + }, { + input: "select `weird function name`() from t", + }, { + input: "select status() from t", // should not escape function names that are keywords + }, { + input: "select * from `weird table name`", + }, { + input: "SHOW FULL TABLES FROM `jiradb` LIKE 'AO_E8B6CC_ISSUE_MAPPING'", + output: "show full tables from jiradb like 'AO_E8B6CC_ISSUE_MAPPING'", + }, { + input: "SHOW FULL COLUMNS FROM AO_E8B6CC_ISSUE_MAPPING FROM jiradb LIKE '%'", + output: "show full columns from AO_E8B6CC_ISSUE_MAPPING from jiradb like '%'", + }, { + input: "SHOW KEYS FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", + output: "show keys from AO_E8B6CC_ISSUE_MAPPING from jiradb", + }, { + input: "SHOW CREATE TABLE `jiradb`.`AO_E8B6CC_ISSUE_MAPPING`", + output: "show create table jiradb.AO_E8B6CC_ISSUE_MAPPING", + }, { + input: "SHOW INDEX FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", + output: "show index from AO_E8B6CC_ISSUE_MAPPING from jiradb", + }, { + input: "SHOW FULL TABLES FROM `jiradb` LIKE '%'", + output: "show full tables from jiradb like '%'", + }, { + input: "SHOW EXTENDED INDEX FROM `AO_E8B6CC_PROJECT_MAPPING` FROM `jiradb`", + output: "show extended index from AO_E8B6CC_PROJECT_MAPPING from jiradb", + }, { + input: "SHOW EXTENDED KEYS FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", + output: "show extended keys from AO_E8B6CC_ISSUE_MAPPING from jiradb", + }, { + input: "SHOW CREATE TABLE `jiradb`.`AO_E8B6CC_ISSUE_MAPPING`", + output: "show create table jiradb.AO_E8B6CC_ISSUE_MAPPING", + }, { + input: "SHOW INDEXES FROM `AO_E8B6CC_ISSUE_MAPPING` FROM `jiradb`", + output: "show indexes from AO_E8B6CC_ISSUE_MAPPING from jiradb", + }, { + input: "SHOW FULL TABLES FROM `jiradb` LIKE '%'", + output: "show full tables from jiradb like '%'", + }, { + input: "SHOW EXTENDED INDEXES FROM `AO_E8B6CC_PROJECT_MAPPING` FROM `jiradb`", + output: "show extended indexes from AO_E8B6CC_PROJECT_MAPPING from jiradb", + }, { + input: "SHOW EXTENDED INDEXES IN `AO_E8B6CC_PROJECT_MAPPING` IN `jiradb`", + output: "show extended indexes from AO_E8B6CC_PROJECT_MAPPING from jiradb", + }, { + input: "do 1", + output: "otheradmin", + }, { + input: "do funcCall(), 2 = 1, 3 + 1", + output: "otheradmin", + }, { + input: "savepoint a", + }, { + input: "savepoint `@@@;a`", + }, { + input: "rollback to a", + }, { + input: "rollback to `@@@;a`", + }, { + input: "rollback work to a", + output: "rollback to a", + }, { + input: "rollback to savepoint a", + output: "rollback to a", + }, { + input: "rollback work to savepoint a", + output: "rollback to a", + }, { + input: "release savepoint a", + }, { + input: "release savepoint `@@@;a`", + }} +) + +func TestValid(t *testing.T) { + for _, tcase := range validSQL { + t.Run(tcase.input, func(t *testing.T) { + if tcase.output == "" { + tcase.output = tcase.input + } + tree, err := Parse(tcase.input) + require.NoError(t, err, tcase.input) + out := String(tree) + if tcase.output != out { + t.Errorf("Parsing failed. \nExpected/Got:\n%s\n%s", tcase.output, out) + } + // This test just exercises the tree walking functionality. + // There's no way automated way to verify that a node calls + // all its children. But we can examine code coverage and + // ensure that all walkSubtree functions were called. + Walk(func(node SQLNode) (bool, error) { + return true, nil + }, tree) + }) + } +} + +// Ensure there is no corruption from using a pooled yyParserImpl in Parse. +func TestParallelValid(t *testing.T) { + parallelism := 100 + numIters := 1000 + + wg := sync.WaitGroup{} + wg.Add(parallelism) + for i := 0; i < parallelism; i++ { + go func() { + defer wg.Done() + for j := 0; j < numIters; j++ { + tcase := validSQL[rand.Intn(len(validSQL))] + if tcase.output == "" { + tcase.output = tcase.input + } + tree, err := Parse(tcase.input) + if err != nil { + t.Errorf("Parse(%q) err: %v, want nil", tcase.input, err) + continue + } + out := String(tree) + if out != tcase.output { + t.Errorf("Parse(%q) = %q, want: %q", tcase.input, out, tcase.output) + } + } + }() + } + wg.Wait() +} + +func TestInvalid(t *testing.T) { + invalidSQL := []struct { + input string + err string + }{{ + input: "select a, b from (select * from tbl) sort by a", + err: "syntax error", + }, { + input: "/*!*/", + err: "empty statement", + }} + + for _, tcase := range invalidSQL { + _, err := Parse(tcase.input) + if err == nil { + t.Errorf("Parse invalid query(%q), got: nil, want: %s...", tcase.input, tcase.err) + } + if err != nil && !strings.Contains(err.Error(), tcase.err) { + t.Errorf("Parse invalid query(%q), got: %v, want: %s...", tcase.input, err, tcase.err) + } + } +} + +func TestCaseSensitivity(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "create table A (\n\t`B` int\n)", + output: "create table A (\n\tB int\n)", + }, { + input: "create index b on A", + output: "alter table A", + }, { + input: "alter table A foo", + output: "alter table A", + }, { + input: "alter table A convert", + output: "alter table A", + }, { + // View names get lower-cased. + input: "alter view A foo", + output: "alter table a", + }, { + input: "alter table A rename to B", + output: "rename table A to B", + }, { + input: "rename table A to B", + }, { + input: "drop table B", + output: "drop table B", + }, { + input: "drop table if exists B", + output: "drop table if exists B", + }, { + input: "drop index b on A", + output: "alter table A", + }, { + input: "select a from B", + }, { + input: "select A as B from C", + }, { + input: "select B.* from c", + }, { + input: "select B.A from c", + }, { + input: "select * from B as C", + }, { + input: "select * from A.B", + }, { + input: "update A set b = 1", + }, { + input: "update A.B set b = 1", + }, { + input: "select A() from b", + }, { + input: "select A(B, C) from b", + }, { + input: "select A(distinct B, C) from b", + }, { + // IF is an exception. It's always lower-cased. + input: "select IF(B, C) from b", + output: "select if(B, C) from b", + }, { + input: "select * from b use index (A)", + }, { + input: "insert into A(A, B) values (1, 2)", + }, { + input: "CREATE TABLE A (\n\t`A` int\n)", + output: "create table A (\n\tA int\n)", + }, { + input: "create view A", + output: "create table a", + }, { + input: "alter view A", + output: "alter table a", + }, { + input: "drop view A", + output: "drop table a", + }, { + input: "drop view if exists A", + output: "drop table if exists a", + }, { + input: "select /* lock in SHARE MODE */ 1 from t lock in SHARE MODE", + output: "select /* lock in SHARE MODE */ 1 from t lock in share mode", + }, { + input: "select next VALUE from t", + output: "select next 1 values from t", + }, { + input: "select /* use */ 1 from t1 use index (A) where b = 1", + }} + for _, tcase := range validSQL { + if tcase.output == "" { + tcase.output = tcase.input + } + tree, err := Parse(tcase.input) + if err != nil { + t.Errorf("input: %s, err: %v", tcase.input, err) + continue + } + out := String(tree) + if out != tcase.output { + t.Errorf("out: %s, want %s", out, tcase.output) + } + } +} + +func TestKeywords(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "select current_timestamp", + output: "select current_timestamp() from dual", + }, { + input: "update t set a = current_timestamp()", + }, { + input: "update t set a = current_timestamp(5)", + }, { + input: "select a, current_date from t", + output: "select a, current_date() from t", + }, { + input: "insert into t(a, b) values (current_date, current_date())", + output: "insert into t(a, b) values (current_date(), current_date())", + }, { + input: "select * from t where a > utc_timestmp()", + }, { + input: "select * from t where a > utc_timestamp(4)", + }, { + input: "update t set b = utc_timestamp + 5", + output: "update t set b = utc_timestamp() + 5", + }, { + input: "select utc_time, utc_date, utc_time(6)", + output: "select utc_time(), utc_date(), utc_time(6) from dual", + }, { + input: "select 1 from dual where localtime > utc_time", + output: "select 1 from dual where localtime() > utc_time()", + }, { + input: "select 1 from dual where localtime(2) > utc_time(1)", + output: "select 1 from dual where localtime(2) > utc_time(1)", + }, { + input: "update t set a = localtimestamp(), b = utc_timestamp", + output: "update t set a = localtimestamp(), b = utc_timestamp()", + }, { + input: "update t set a = localtimestamp(10), b = utc_timestamp(13)", + output: "update t set a = localtimestamp(10), b = utc_timestamp(13)", + }, { + input: "insert into t(a) values (unix_timestamp)", + }, { + input: "select replace(a, 'foo', 'bar') from t", + }, { + input: "update t set a = replace('1234', '2', '1')", + }, { + input: "insert into t(a, b) values ('foo', 'bar') on duplicate key update a = replace(hex('foo'), 'f', 'b')", + }, { + input: "update t set a = left('1234', 3)", + }, { + input: "select left(a, 5) from t", + }, { + input: "update t set d = adddate(date('2003-12-31 01:02:03'), interval 5 days)", + }, { + input: "insert into t(a, b) values (left('foo', 1), 'b')", + }, { + input: "insert /* qualified function */ into t(a, b) values (test.PI(), 'b')", + }, { + input: "select /* keyword in qualified id */ * from t join z on t.key = z.key", + output: "select /* keyword in qualified id */ * from t join z on t.`key` = z.`key`", + }, { + input: "select /* non-reserved keywords as unqualified cols */ date, view, offset from t", + output: "select /* non-reserved keywords as unqualified cols */ `date`, `view`, `offset` from t", + }, { + input: "select /* share and mode as cols */ share, mode from t where share = 'foo'", + output: "select /* share and mode as cols */ `share`, `mode` from t where `share` = 'foo'", + }, { + input: "select /* unused keywords as cols */ write, varying from t where trailing = 'foo'", + output: "select /* unused keywords as cols */ `write`, `varying` from t where `trailing` = 'foo'", + }, { + input: "select status from t", + output: "select `status` from t", + }, { + input: "select Status from t", + output: "select `Status` from t", + }, { + input: "select variables from t", + output: "select `variables` from t", + }, { + input: "select Variables from t", + output: "select `Variables` from t", + }} + + for _, tcase := range validSQL { + if tcase.output == "" { + tcase.output = tcase.input + } + tree, err := Parse(tcase.input) + if err != nil { + t.Errorf("input: %s, err: %v", tcase.input, err) + continue + } + out := String(tree) + if out != tcase.output { + t.Errorf("out: %s, want %s", out, tcase.output) + } + } +} + +func TestConvert(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "select cast('abc' as date) from t", + output: "select convert('abc', date) from t", + }, { + input: "select convert('abc', binary(4)) from t", + }, { + input: "select convert('abc', binary) from t", + }, { + input: "select convert('abc', char character set binary) from t", + }, { + input: "select convert('abc', char(4) ascii) from t", + }, { + input: "select convert('abc', char unicode) from t", + }, { + input: "select convert('abc', char(4)) from t", + }, { + input: "select convert('abc', char) from t", + }, { + input: "select convert('abc', nchar(4)) from t", + }, { + input: "select convert('abc', nchar) from t", + }, { + input: "select convert('abc', signed) from t", + }, { + input: "select convert('abc', signed integer) from t", + output: "select convert('abc', signed) from t", + }, { + input: "select convert('abc', unsigned) from t", + }, { + input: "select convert('abc', unsigned integer) from t", + output: "select convert('abc', unsigned) from t", + }, { + input: "select convert('abc', decimal(3, 4)) from t", + }, { + input: "select convert('abc', decimal(4)) from t", + }, { + input: "select convert('abc', decimal) from t", + }, { + input: "select convert('abc', date) from t", + }, { + input: "select convert('abc', time(4)) from t", + }, { + input: "select convert('abc', time) from t", + }, { + input: "select convert('abc', datetime(9)) from t", + }, { + input: "select convert('abc', datetime) from t", + }, { + input: "select convert('abc', json) from t", + }, { + input: "select convert('abc' using ascii) from t", + }} + + for _, tcase := range validSQL { + if tcase.output == "" { + tcase.output = tcase.input + } + tree, err := Parse(tcase.input) + if err != nil { + t.Errorf("input: %s, err: %v", tcase.input, err) + continue + } + out := String(tree) + if out != tcase.output { + t.Errorf("out: %s, want %s", out, tcase.output) + } + } + + invalidSQL := []struct { + input string + output string + }{{ + input: "select convert('abc' as date) from t", + output: "syntax error at position 24 near 'as'", + }, { + input: "select convert from t", + output: "syntax error at position 20 near 'from'", + }, { + input: "select cast('foo', decimal) from t", + output: "syntax error at position 19", + }, { + input: "select convert('abc', datetime(4+9)) from t", + output: "syntax error at position 34", + }, { + input: "select convert('abc', decimal(4+9)) from t", + output: "syntax error at position 33", + }, { + input: "/* a comment */", + output: "empty statement", + }, { + input: "set transaction isolation level 12345", + output: "syntax error at position 38 near '12345'", + }} + + for _, tcase := range invalidSQL { + _, err := Parse(tcase.input) + if err == nil || err.Error() != tcase.output { + t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) + } + } +} + +func TestPositionedErr(t *testing.T) { + invalidSQL := []struct { + input string + output PositionedErr + }{{ + input: "select convert('abc' as date) from t", + output: PositionedErr{"syntax error", 24, []byte("as")}, + }, { + input: "select convert from t", + output: PositionedErr{"syntax error", 20, []byte("from")}, + }, { + input: "select cast('foo', decimal) from t", + output: PositionedErr{"syntax error", 19, nil}, + }, { + input: "select convert('abc', datetime(4+9)) from t", + output: PositionedErr{"syntax error", 34, nil}, + }, { + input: "select convert('abc', decimal(4+9)) from t", + output: PositionedErr{"syntax error", 33, nil}, + }, { + input: "set transaction isolation level 12345", + output: PositionedErr{"syntax error", 38, []byte("12345")}, + }, { + input: "select * from a left join b", + output: PositionedErr{"syntax error", 28, nil}, + }, { + input: "select a from (select * from tbl)", + output: PositionedErr{"syntax error", 34, nil}, + }} + + for _, tcase := range invalidSQL { + tkn := NewStringTokenizer(tcase.input) + _, err := ParseNext(tkn) + + if posErr, ok := err.(PositionedErr); !ok { + t.Errorf("%s: %v expected PositionedErr, got (%T) %v", tcase.input, err, err, tcase.output) + } else if posErr.Pos != tcase.output.Pos || !bytes.Equal(posErr.Near, tcase.output.Near) || err.Error() != tcase.output.Error() { + t.Errorf("%s: %v, want: %v", tcase.input, err, tcase.output) + } + } +} + +func TestSubStr(t *testing.T) { + + validSQL := []struct { + input string + output string + }{{ + input: `select substr('foobar', 1) from t`, + }, { + input: "select substr(a, 1, 6) from t", + }, { + input: "select substring(a, 1) from t", + output: "select substr(a, 1) from t", + }, { + input: "select substring(a, 1, 6) from t", + output: "select substr(a, 1, 6) from t", + }, { + input: "select substr(a from 1 for 6) from t", + output: "select substr(a, 1, 6) from t", + }, { + input: "select substring(a from 1 for 6) from t", + output: "select substr(a, 1, 6) from t", + }, { + input: `select substr("foo" from 1 for 2) from t`, + output: `select substr('foo', 1, 2) from t`, + }, { + input: `select substring("foo", 1, 2) from t`, + output: `select substr('foo', 1, 2) from t`, + }, { + input: `select substr(substr("foo" from 1 for 2), 1, 2) from t`, + output: `select substr(substr('foo', 1, 2), 1, 2) from t`, + }, { + input: `select substr(substring("foo", 1, 2), 3, 4) from t`, + output: `select substr(substr('foo', 1, 2), 3, 4) from t`, + }, { + input: `select substring(substr("foo", 1), 2) from t`, + output: `select substr(substr('foo', 1), 2) from t`, + }} + + for _, tcase := range validSQL { + if tcase.output == "" { + tcase.output = tcase.input + } + tree, err := Parse(tcase.input) + if err != nil { + t.Errorf("input: %s, err: %v", tcase.input, err) + continue + } + out := String(tree) + if out != tcase.output { + t.Errorf("out: %s, want %s", out, tcase.output) + } + } +} + +func TestCreateTable(t *testing.T) { + validSQL := []string{ + // test all the data types and options + "create table t (\n" + + " col_bit bit,\n" + + " col_tinyint tinyint auto_increment,\n" + + " col_tinyint3 tinyint(3) unsigned,\n" + + " col_smallint smallint,\n" + + " col_smallint4 smallint(4) zerofill,\n" + + " col_mediumint mediumint,\n" + + " col_mediumint5 mediumint(5) unsigned not null,\n" + + " col_int int,\n" + + " col_int10 int(10) not null,\n" + + " col_integer integer comment 'this is an integer',\n" + + " col_bigint bigint,\n" + + " col_bigint10 bigint(10) zerofill not null default 10,\n" + + " col_real real,\n" + + " col_real2 real(1,2) not null default 1.23,\n" + + " col_double double,\n" + + " col_double2 double(3,4) not null default 1.23,\n" + + " col_float float,\n" + + " col_float2 float(3,4) not null default 1.23,\n" + + " col_decimal decimal,\n" + + " col_decimal2 decimal(2),\n" + + " col_decimal3 decimal(2,3),\n" + + " col_numeric numeric,\n" + + " col_numeric2 numeric(2),\n" + + " col_numeric3 numeric(2,3),\n" + + " col_date date,\n" + + " col_time time,\n" + + " col_timestamp timestamp,\n" + + " col_datetime datetime,\n" + + " col_year year,\n" + + " col_char char,\n" + + " col_char2 char(2),\n" + + " col_char3 char(3) character set ascii,\n" + + " col_char4 char(4) character set ascii collate ascii_bin,\n" + + " col_varchar varchar,\n" + + " col_varchar2 varchar(2),\n" + + " col_varchar3 varchar(3) character set ascii,\n" + + " col_varchar4 varchar(4) character set ascii collate ascii_bin,\n" + + " col_binary binary,\n" + + " col_varbinary varbinary(10),\n" + + " col_tinyblob tinyblob,\n" + + " col_blob blob,\n" + + " col_mediumblob mediumblob,\n" + + " col_longblob longblob,\n" + + " col_tinytext tinytext,\n" + + " col_text text,\n" + + " col_mediumtext mediumtext,\n" + + " col_longtext longtext,\n" + + " col_text text character set ascii collate ascii_bin,\n" + + " col_json json,\n" + + " col_enum enum('a', 'b', 'c', 'd'),\n" + + " col_enum2 enum('a', 'b', 'c', 'd') character set ascii,\n" + + " col_enum3 enum('a', 'b', 'c', 'd') collate ascii_bin,\n" + + " col_enum4 enum('a', 'b', 'c', 'd') character set ascii collate ascii_bin,\n" + + " col_set set('a', 'b', 'c', 'd'),\n" + + " col_set2 set('a', 'b', 'c', 'd') character set ascii,\n" + + " col_set3 set('a', 'b', 'c', 'd') collate ascii_bin,\n" + + " col_set4 set('a', 'b', 'c', 'd') character set ascii collate ascii_bin,\n" + + " col_geometry1 geometry,\n" + + " col_geometry2 geometry not null,\n" + + " col_point1 point,\n" + + " col_point2 point not null,\n" + + " col_linestring1 linestring,\n" + + " col_linestring2 linestring not null,\n" + + " col_polygon1 polygon,\n" + + " col_polygon2 polygon not null,\n" + + " col_geometrycollection1 geometrycollection,\n" + + " col_geometrycollection2 geometrycollection not null,\n" + + " col_multipoint1 multipoint,\n" + + " col_multipoint2 multipoint not null,\n" + + " col_multilinestring1 multilinestring,\n" + + " col_multilinestring2 multilinestring not null,\n" + + " col_multipolygon1 multipolygon,\n" + + " col_multipolygon2 multipolygon not null\n" + + ")", + + // test defining indexes separately + "create table t (\n" + + " id int auto_increment,\n" + + " username varchar,\n" + + " email varchar,\n" + + " full_name varchar,\n" + + " geom point not null,\n" + + " status_nonkeyword varchar,\n" + + " primary key (id),\n" + + " spatial key geom (geom),\n" + + " unique key by_username (username),\n" + + " unique by_username2 (username),\n" + + " unique index by_username3 (username),\n" + + " index by_status (status_nonkeyword),\n" + + " key by_full_name (full_name)\n" + + ")", + + // test that indexes support USING + "create table t (\n" + + " id int auto_increment,\n" + + " username varchar,\n" + + " email varchar,\n" + + " full_name varchar,\n" + + " status_nonkeyword varchar,\n" + + " primary key (id) using BTREE,\n" + + " unique key by_username (username) using HASH,\n" + + " unique by_username2 (username) using OTHER,\n" + + " unique index by_username3 (username) using XYZ,\n" + + " index by_status (status_nonkeyword) using PDQ,\n" + + " key by_full_name (full_name) using OTHER\n" + + ")", + // test other index options + "create table t (\n" + + " id int auto_increment,\n" + + " username varchar,\n" + + " email varchar,\n" + + " primary key (id) comment 'hi',\n" + + " unique key by_username (username) key_block_size 8,\n" + + " unique index by_username4 (username) comment 'hi' using BTREE,\n" + + " unique index by_username4 (username) using BTREE key_block_size 4 comment 'hi'\n" + + ")", + + // multi-column indexes + "create table t (\n" + + " id int auto_increment,\n" + + " username varchar,\n" + + " email varchar,\n" + + " full_name varchar,\n" + + " a int,\n" + + " b int,\n" + + " c int,\n" + + " primary key (id, username),\n" + + " unique key by_abc (a, b, c),\n" + + " unique key (a, b, c),\n" + + " key by_email (email(10), username)\n" + + ")", + + // foreign keys + "create table t (\n" + + " id int auto_increment,\n" + + " username varchar,\n" + + " k int,\n" + + " Z int,\n" + + " primary key (id, username),\n" + + " key by_email (email(10), username),\n" + + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b),\n" + + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete restrict,\n" + + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete no action,\n" + + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete cascade on update set default,\n" + + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete set default on update set null,\n" + + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on delete set null on update restrict,\n" + + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on update no action,\n" + + " constraint second_ibfk_1 foreign key (k, j) references simple (a, b) on update cascade\n" + + ")", + + // table options + "create table t (\n" + + " id int auto_increment\n" + + ") engine InnoDB,\n" + + " auto_increment 123,\n" + + " avg_row_length 1,\n" + + " default character set utf8mb4,\n" + + " character set latin1,\n" + + " checksum 0,\n" + + " default collate binary,\n" + + " collate ascii_bin,\n" + + " comment 'this is a comment',\n" + + " compression 'zlib',\n" + + " connection 'connect_string',\n" + + " data directory 'absolute path to directory',\n" + + " delay_key_write 1,\n" + + " encryption 'n',\n" + + " index directory 'absolute path to directory',\n" + + " insert_method no,\n" + + " key_block_size 1024,\n" + + " max_rows 100,\n" + + " min_rows 10,\n" + + " pack_keys 0,\n" + + " password 'sekret',\n" + + " row_format default,\n" + + " stats_auto_recalc default,\n" + + " stats_persistent 0,\n" + + " stats_sample_pages 1,\n" + + " tablespace tablespace_name storage disk,\n" + + " tablespace tablespace_name\n", + + // boolean columns + "create table t (\n" + + " bi bigint not null primary key,\n" + + " b1 bool not null,\n" + + " b2 boolean\n" + + ")", + } + for _, sql := range validSQL { + sql = strings.TrimSpace(sql) + tree, err := ParseStrictDDL(sql) + if err != nil { + t.Errorf("input: %s, err: %v", sql, err) + continue + } + got := String(tree.(*DDL)) + + if sql != got { + t.Errorf("want:\n%s\ngot:\n%s", sql, got) + } + } + + sql := "create table t garbage" + _, err := Parse(sql) + if err != nil { + t.Errorf("input: %s, err: %v", sql, err) + } + + tree, err := ParseStrictDDL(sql) + if tree != nil || err == nil { + t.Errorf("ParseStrictDDL unexpectedly accepted input %s", sql) + } + + testCases := []struct { + input string + output string + }{{ + // test key_block_size + input: "create table t (\n" + + " id int auto_increment,\n" + + " username varchar,\n" + + " unique key by_username (username) key_block_size 8,\n" + + " unique key by_username2 (username) key_block_size=8,\n" + + " unique by_username3 (username) key_block_size = 4\n" + + ")", + output: "create table t (\n" + + " id int auto_increment,\n" + + " username varchar,\n" + + " unique key by_username (username) key_block_size 8,\n" + + " unique key by_username2 (username) key_block_size 8,\n" + + " unique by_username3 (username) key_block_size 4\n" + + ")", + }, { + // test defaults + input: "create table t (\n" + + " i1 int default 1,\n" + + " i2 int default null,\n" + + " f1 float default 1.23,\n" + + " s1 varchar default 'c',\n" + + " s2 varchar default 'this is a string',\n" + + " s3 varchar default null,\n" + + " s4 timestamp default current_timestamp,\n" + + " s5 bit(1) default B'0'\n" + + ")", + output: "create table t (\n" + + " i1 int default 1,\n" + + " i2 int default null,\n" + + " f1 float default 1.23,\n" + + " s1 varchar default 'c',\n" + + " s2 varchar default 'this is a string',\n" + + " s3 varchar default null,\n" + + " s4 timestamp default current_timestamp(),\n" + + " s5 bit(1) default B'0'\n" + + ")", + }, { + // test key field options + input: "create table t (\n" + + " id int auto_increment primary key,\n" + + " username varchar unique key,\n" + + " email varchar unique,\n" + + " full_name varchar key,\n" + + " time1 timestamp on update current_timestamp,\n" + + " time2 timestamp default current_timestamp on update current_timestamp\n" + + ")", + output: "create table t (\n" + + " id int auto_increment primary key,\n" + + " username varchar unique key,\n" + + " email varchar unique,\n" + + " full_name varchar key,\n" + + " time1 timestamp on update current_timestamp(),\n" + + " time2 timestamp default current_timestamp() on update current_timestamp()\n" + + ")", + }, { + // test current_timestamp with and without () + input: "create table t (\n" + + " time1 timestamp default current_timestamp,\n" + + " time2 timestamp default current_timestamp(),\n" + + " time3 timestamp default current_timestamp on update current_timestamp,\n" + + " time4 timestamp default current_timestamp() on update current_timestamp(),\n" + + " time5 timestamp(3) default current_timestamp(3) on update current_timestamp(3)\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default current_timestamp(),\n" + + " time2 timestamp default current_timestamp(),\n" + + " time3 timestamp default current_timestamp() on update current_timestamp(),\n" + + " time4 timestamp default current_timestamp() on update current_timestamp(),\n" + + " time5 timestamp(3) default current_timestamp(3) on update current_timestamp(3)\n" + + ")", + }, { + // test utc_timestamp with and without () + input: "create table t (\n" + + " time1 timestamp default utc_timestamp,\n" + + " time2 timestamp default utc_timestamp(),\n" + + " time3 timestamp default utc_timestamp on update utc_timestamp,\n" + + " time4 timestamp default utc_timestamp() on update utc_timestamp(),\n" + + " time5 timestamp(4) default utc_timestamp(4) on update utc_timestamp(4)\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default utc_timestamp(),\n" + + " time2 timestamp default utc_timestamp(),\n" + + " time3 timestamp default utc_timestamp() on update utc_timestamp(),\n" + + " time4 timestamp default utc_timestamp() on update utc_timestamp(),\n" + + " time5 timestamp(4) default utc_timestamp(4) on update utc_timestamp(4)\n" + + ")", + }, { + // test utc_time with and without () + input: "create table t (\n" + + " time1 timestamp default utc_time,\n" + + " time2 timestamp default utc_time(),\n" + + " time3 timestamp default utc_time on update utc_time,\n" + + " time4 timestamp default utc_time() on update utc_time(),\n" + + " time5 timestamp(5) default utc_time(5) on update utc_time(5)\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default utc_time(),\n" + + " time2 timestamp default utc_time(),\n" + + " time3 timestamp default utc_time() on update utc_time(),\n" + + " time4 timestamp default utc_time() on update utc_time(),\n" + + " time5 timestamp(5) default utc_time(5) on update utc_time(5)\n" + + ")", + }, { + // test utc_date with and without () + input: "create table t (\n" + + " time1 timestamp default utc_date,\n" + + " time2 timestamp default utc_date(),\n" + + " time3 timestamp default utc_date on update utc_date,\n" + + " time4 timestamp default utc_date() on update utc_date()\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default utc_date(),\n" + + " time2 timestamp default utc_date(),\n" + + " time3 timestamp default utc_date() on update utc_date(),\n" + + " time4 timestamp default utc_date() on update utc_date()\n" + + ")", + }, { + // test localtime with and without () + input: "create table t (\n" + + " time1 timestamp default localtime,\n" + + " time2 timestamp default localtime(),\n" + + " time3 timestamp default localtime on update localtime,\n" + + " time4 timestamp default localtime() on update localtime(),\n" + + " time5 timestamp(6) default localtime(6) on update localtime(6)\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default localtime(),\n" + + " time2 timestamp default localtime(),\n" + + " time3 timestamp default localtime() on update localtime(),\n" + + " time4 timestamp default localtime() on update localtime(),\n" + + " time5 timestamp(6) default localtime(6) on update localtime(6)\n" + + ")", + }, { + // test localtimestamp with and without () + input: "create table t (\n" + + " time1 timestamp default localtimestamp,\n" + + " time2 timestamp default localtimestamp(),\n" + + " time3 timestamp default localtimestamp on update localtimestamp,\n" + + " time4 timestamp default localtimestamp() on update localtimestamp(),\n" + + " time5 timestamp(1) default localtimestamp(1) on update localtimestamp(1)\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default localtimestamp(),\n" + + " time2 timestamp default localtimestamp(),\n" + + " time3 timestamp default localtimestamp() on update localtimestamp(),\n" + + " time4 timestamp default localtimestamp() on update localtimestamp(),\n" + + " time5 timestamp(1) default localtimestamp(1) on update localtimestamp(1)\n" + + ")", + }, { + // test current_date with and without () + input: "create table t (\n" + + " time1 timestamp default current_date,\n" + + " time2 timestamp default current_date(),\n" + + " time3 timestamp default current_date on update current_date,\n" + + " time4 timestamp default current_date() on update current_date()\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default current_date(),\n" + + " time2 timestamp default current_date(),\n" + + " time3 timestamp default current_date() on update current_date(),\n" + + " time4 timestamp default current_date() on update current_date()\n" + + ")", + }, { + // test current_time with and without () + input: "create table t (\n" + + " time1 timestamp default current_time,\n" + + " time2 timestamp default current_time(),\n" + + " time3 timestamp default current_time on update current_time,\n" + + " time4 timestamp default current_time() on update current_time(),\n" + + " time5 timestamp(2) default current_time(2) on update current_time(2)\n" + + ")", + output: "create table t (\n" + + " time1 timestamp default current_time(),\n" + + " time2 timestamp default current_time(),\n" + + " time3 timestamp default current_time() on update current_time(),\n" + + " time4 timestamp default current_time() on update current_time(),\n" + + " time5 timestamp(2) default current_time(2) on update current_time(2)\n" + + ")", + }, + } + for _, tcase := range testCases { + tree, err := ParseStrictDDL(tcase.input) + if err != nil { + t.Errorf("input: %s, err: %v", tcase.input, err) + continue + } + if got, want := String(tree.(*DDL)), tcase.output; got != want { + t.Errorf("Parse(%s):\n%s, want\n%s", tcase.input, got, want) + } + } +} + +func TestCreateTableLike(t *testing.T) { + normal := "create table a like b" + testCases := []struct { + input string + output string + }{ + { + "create table a like b", + normal, + }, + { + "create table a (like b)", + normal, + }, + { + "create table ks.a like unsharded_ks.b", + "create table ks.a like unsharded_ks.b", + }, + } + for _, tcase := range testCases { + tree, err := ParseStrictDDL(tcase.input) + if err != nil { + t.Errorf("input: %s, err: %v", tcase.input, err) + continue + } + if got, want := String(tree.(*DDL)), tcase.output; got != want { + t.Errorf("Parse(%s):\n%s, want\n%s", tcase.input, got, want) + } + } +} + +func TestCreateTableEscaped(t *testing.T) { + testCases := []struct { + input string + output string + }{{ + input: "create table `a`(`id` int, primary key(`id`))", + output: "create table a (\n" + + "\tid int,\n" + + "\tprimary key (id)\n" + + ")", + }, { + input: "create table `insert`(`update` int, primary key(`delete`))", + output: "create table `insert` (\n" + + "\t`update` int,\n" + + "\tprimary key (`delete`)\n" + + ")", + }} + for _, tcase := range testCases { + tree, err := ParseStrictDDL(tcase.input) + if err != nil { + t.Errorf("input: %s, err: %v", tcase.input, err) + continue + } + if got, want := String(tree.(*DDL)), tcase.output; got != want { + t.Errorf("Parse(%s):\n%s, want\n%s", tcase.input, got, want) + } + } +} + +var ( + invalidSQL = []struct { + input string + output string + excludeMulti bool // Don't use in the ParseNext multi-statement parsing tests. + }{{ + input: "select : from t", + output: "syntax error at position 9 near ':'", + }, { + input: "select 0xH from t", + output: "syntax error at position 10 near '0x'", + }, { + input: "select x'78 from t", + output: "syntax error at position 12 near '78'", + }, { + input: "select x'777' from t", + output: "syntax error at position 14 near '777'", + }, { + input: "select * from t where :1 = 2", + output: "syntax error at position 24 near ':'", + }, { + input: "select * from t where :. = 2", + output: "syntax error at position 24 near ':'", + }, { + input: "select * from t where ::1 = 2", + output: "syntax error at position 25 near '::'", + }, { + input: "select * from t where ::. = 2", + output: "syntax error at position 25 near '::'", + }, { + input: "update a set c = values(1)", + output: "syntax error at position 26 near '1'", + }, { + input: "select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(", + output: "max nesting level reached at position 406", + }, { + input: "select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(", + output: "syntax error at position 404", + }, { + // This construct is considered invalid due to a grammar conflict. + input: "insert into a select * from b join c on duplicate key update d=e", + output: "syntax error at position 54 near 'key'", + }, { + input: "select * from a left join b", + output: "syntax error at position 28", + }, { + input: "select * from a natural join b on c = d", + output: "syntax error at position 34 near 'on'", + }, { + input: "select * from a natural join b using (c)", + output: "syntax error at position 37 near 'using'", + }, { + input: "select next id from a", + output: "expecting value after next at position 15 near 'id'", + }, { + input: "select next 1+1 values from a", + output: "syntax error at position 15", + }, { + input: "insert into a values (select * from b)", + output: "syntax error at position 29 near 'select'", + }, { + input: "select database", + output: "syntax error at position 16", + }, { + input: "select mod from t", + output: "syntax error at position 16 near 'from'", + }, { + input: "select 1 from t where div 5", + output: "syntax error at position 26 near 'div'", + }, { + input: "select 1 from t where binary", + output: "syntax error at position 29", + }, { + input: "select match(a1, a2) against ('foo' in boolean mode with query expansion) from t", + output: "syntax error at position 57 near 'with'", + }, { + input: "select /* reserved keyword as unqualified column */ * from t where key = 'test'", + output: "syntax error at position 71 near 'key'", + }, { + input: "select /* vitess-reserved keyword as unqualified column */ * from t where escape = 'test'", + output: "syntax error at position 81 near 'escape'", + }, { + input: "select * from t where id = ((select a from t1 union select b from t2) order by a limit 1)", + output: "syntax error at position 76 near 'order'", + }, { + input: "select /* straight_join using */ 1 from t1 straight_join t2 using (a)", + output: "syntax error at position 66 near 'using'", + }, { + input: "select 'aa", + output: "syntax error at position 11 near 'aa'", + excludeMulti: true, + }, { + input: "select 'aa\\", + output: "syntax error at position 12 near 'aa'", + excludeMulti: true, + }, { + input: "select /* aa", + output: "syntax error at position 13 near '/* aa'", + excludeMulti: true, + }} +) + +func TestErrors(t *testing.T) { + for _, tcase := range invalidSQL { + t.Run(tcase.input, func(t *testing.T) { + _, err := Parse(tcase.input) + require.Error(t, err, tcase.output) + }) + } +} + +// TestSkipToEnd tests that the skip to end functionality +// does not skip past a ';'. If any tokens exist after that, Parse +// should return an error. +func TestSkipToEnd(t *testing.T) { + testcases := []struct { + input string + output string + }{{ + // This is the case where the partial ddl will be reset + // because of a premature ';'. + input: "create table a(id; select * from t", + output: "syntax error at position 19", + }, { + // Partial DDL should get reset for valid DDLs also. + input: "create table a(id int); select * from t", + output: "syntax error at position 31 near 'select'", + }, { + // Partial DDL does not get reset here. But we allow the + // DDL only if there are no new tokens after skipping to end. + input: "create table a bb cc; select * from t", + output: "extra characters encountered after end of DDL: 'select'", + }, { + // Test that we don't step at ';' inside strings. + input: "create table a bb 'a;'; select * from t", + output: "extra characters encountered after end of DDL: 'select'", + }} + for _, tcase := range testcases { + _, err := Parse(tcase.input) + if err == nil || err.Error() != tcase.output { + t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) + } + } +} + +func TestParseDjangoQueries(t *testing.T) { + + file, err := os.Open("./test_queries/django_queries.txt") + if err != nil { + t.Errorf(" Error: %v", err) + } + defer file.Close() + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + + _, err := Parse(string(scanner.Text())) + if err != nil { + t.Error(scanner.Text()) + t.Errorf(" Error: %v", err) + } + } +} + +// Benchmark run on 6/23/17, prior to improvements: +// BenchmarkParse1-4 100000 16334 ns/op +// BenchmarkParse2-4 30000 44121 ns/op + +// Benchmark run on 9/3/18, comparing pooled parser performance. +// +// benchmark old ns/op new ns/op delta +// BenchmarkNormalize-4 2540 2533 -0.28% +// BenchmarkParse1-4 18269 13330 -27.03% +// BenchmarkParse2-4 46703 41255 -11.67% +// BenchmarkParse2Parallel-4 22246 20707 -6.92% +// BenchmarkParse3-4 4064743 4083135 +0.45% +// +// benchmark old allocs new allocs delta +// BenchmarkNormalize-4 27 27 +0.00% +// BenchmarkParse1-4 75 74 -1.33% +// BenchmarkParse2-4 264 263 -0.38% +// BenchmarkParse2Parallel-4 176 175 -0.57% +// BenchmarkParse3-4 360 361 +0.28% +// +// benchmark old bytes new bytes delta +// BenchmarkNormalize-4 821 821 +0.00% +// BenchmarkParse1-4 22776 2307 -89.87% +// BenchmarkParse2-4 28352 7881 -72.20% +// BenchmarkParse2Parallel-4 25712 5235 -79.64% +// BenchmarkParse3-4 6352082 6336307 -0.25% + +const ( + sql1 = "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" + sql2 = "select aaaa, bbb, ccc, ddd, eeee, ffff, gggg, hhhh, iiii from tttt, ttt1, ttt3 where aaaa = bbbb and bbbb = cccc and dddd+1 = eeee group by fff, gggg having hhhh = iiii and iiii = jjjj order by kkkk, llll limit 3, 4" +) + +func BenchmarkParse1(b *testing.B) { + sql := sql1 + for i := 0; i < b.N; i++ { + ast, err := Parse(sql) + if err != nil { + b.Fatal(err) + } + _ = String(ast) + } +} + +func BenchmarkParse2(b *testing.B) { + sql := sql2 + for i := 0; i < b.N; i++ { + ast, err := Parse(sql) + if err != nil { + b.Fatal(err) + } + _ = String(ast) + } +} + +func BenchmarkParse2Parallel(b *testing.B) { + sql := sql2 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + ast, err := Parse(sql) + if err != nil { + b.Fatal(err) + } + _ = ast + } + }) +} + +var benchQuery string + +func init() { + // benchQuerySize is the approximate size of the query. + benchQuerySize := 1000000 + + // Size of value is 1/10 size of query. Then we add + // 10 such values to the where clause. + var baseval bytes.Buffer + for i := 0; i < benchQuerySize/100; i++ { + // Add an escape character: This will force the upcoming + // tokenizer improvement to still create a copy of the string. + // Then we can see if avoiding the copy will be worth it. + baseval.WriteString("\\'123456789") + } + + var buf bytes.Buffer + buf.WriteString("select a from t1 where v = 1") + for i := 0; i < 10; i++ { + fmt.Fprintf(&buf, " and v%d = \"%d%s\"", i, i, baseval.String()) + } + benchQuery = buf.String() +} + +func BenchmarkParse3(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := Parse(benchQuery); err != nil { + b.Fatal(err) + } + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/parsed_query.go b/internal/stackql-parser-fork/go/vt/sqlparser/parsed_query.go new file mode 100644 index 00000000..eaa0d106 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/parsed_query.go @@ -0,0 +1,136 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/stackql/stackql-parser/go/sqltypes" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +// ParsedQuery represents a parsed query where +// bind locations are precompued for fast substitutions. +type ParsedQuery struct { + Query string + bindLocations []bindLocation +} + +type bindLocation struct { + offset, length int +} + +// NewParsedQuery returns a ParsedQuery of the ast. +func NewParsedQuery(node SQLNode) *ParsedQuery { + buf := NewTrackedBuffer(nil) + buf.Myprintf("%v", node) + return buf.ParsedQuery() +} + +// GenerateQuery generates a query by substituting the specified +// bindVariables. The extras parameter specifies special parameters +// that can perform custom encoding. +func (pq *ParsedQuery) GenerateQuery(bindVariables map[string]*querypb.BindVariable, extras map[string]Encodable) (string, error) { + if len(pq.bindLocations) == 0 { + return pq.Query, nil + } + var buf strings.Builder + buf.Grow(len(pq.Query)) + if err := pq.Append(&buf, bindVariables, extras); err != nil { + return "", err + } + return buf.String(), nil +} + +// Append appends the generated query to the provided buffer. +func (pq *ParsedQuery) Append(buf *strings.Builder, bindVariables map[string]*querypb.BindVariable, extras map[string]Encodable) error { + current := 0 + for _, loc := range pq.bindLocations { + buf.WriteString(pq.Query[current:loc.offset]) + name := pq.Query[loc.offset : loc.offset+loc.length] + if encodable, ok := extras[name[1:]]; ok { + encodable.EncodeSQL(buf) + } else { + supplied, _, err := FetchBindVar(name, bindVariables) + if err != nil { + return err + } + EncodeValue(buf, supplied) + } + current = loc.offset + loc.length + } + buf.WriteString(pq.Query[current:]) + return nil +} + +// MarshalJSON is a custom JSON marshaler for ParsedQuery. +// Note that any queries longer that 512 bytes will be truncated. +func (pq *ParsedQuery) MarshalJSON() ([]byte, error) { + return json.Marshal(TruncateForUI(pq.Query)) +} + +// EncodeValue encodes one bind variable value into the query. +func EncodeValue(buf *strings.Builder, value *querypb.BindVariable) { + if value.Type != querypb.Type_TUPLE { + // Since we already check for TUPLE, we don't expect an error. + v, _ := sqltypes.BindVariableToValue(value) + v.EncodeSQL(buf) + return + } + + // It's a TUPLE. + buf.WriteByte('(') + for i, bv := range value.Values { + if i != 0 { + buf.WriteString(", ") + } + sqltypes.ProtoToValue(bv).EncodeSQL(buf) + } + buf.WriteByte(')') +} + +// FetchBindVar resolves the bind variable by fetching it from bindVariables. +func FetchBindVar(name string, bindVariables map[string]*querypb.BindVariable) (val *querypb.BindVariable, isList bool, err error) { + name = name[1:] + if name[0] == ':' { + name = name[1:] + isList = true + } + supplied, ok := bindVariables[name] + if !ok { + return nil, false, fmt.Errorf("missing bind var %s", name) + } + + if isList { + if supplied.Type != querypb.Type_TUPLE { + return nil, false, fmt.Errorf("unexpected list arg type (%v) for key %s", supplied.Type, name) + } + if len(supplied.Values) == 0 { + return nil, false, fmt.Errorf("empty list supplied for %s", name) + } + return supplied, true, nil + } + + if supplied.Type == querypb.Type_TUPLE { + return nil, false, fmt.Errorf("unexpected arg type (TUPLE) for non-list key %s", name) + } + + return supplied, false, nil +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/parsed_query_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/parsed_query_test.go new file mode 100644 index 00000000..222fcb72 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/parsed_query_test.go @@ -0,0 +1,157 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "reflect" + "testing" + + "github.com/stackql/stackql-parser/go/sqltypes" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +func TestNewParsedQuery(t *testing.T) { + stmt, err := Parse("select * from a where id =:id") + if err != nil { + t.Error(err) + return + } + pq := NewParsedQuery(stmt) + want := &ParsedQuery{ + Query: "select * from a where id = :id", + bindLocations: []bindLocation{{offset: 27, length: 3}}, + } + if !reflect.DeepEqual(pq, want) { + t.Errorf("GenerateParsedQuery: %+v, want %+v", pq, want) + } +} + +func TestGenerateQuery(t *testing.T) { + tcases := []struct { + desc string + query string + bindVars map[string]*querypb.BindVariable + extras map[string]Encodable + output string + }{ + { + desc: "no substitutions", + query: "select * from a where id = 2", + bindVars: map[string]*querypb.BindVariable{ + "id": sqltypes.Int64BindVariable(1), + }, + output: "select * from a where id = 2", + }, { + desc: "missing bind var", + query: "select * from a where id1 = :id1 and id2 = :id2", + bindVars: map[string]*querypb.BindVariable{ + "id1": sqltypes.Int64BindVariable(1), + }, + output: "missing bind var id2", + }, { + desc: "simple bindvar substitution", + query: "select * from a where id1 = :id1 and id2 = :id2", + bindVars: map[string]*querypb.BindVariable{ + "id1": sqltypes.Int64BindVariable(1), + "id2": sqltypes.NullBindVariable, + }, + output: "select * from a where id1 = 1 and id2 = null", + }, { + desc: "tuple *querypb.BindVariable", + query: "select * from a where id in ::vals", + bindVars: map[string]*querypb.BindVariable{ + "vals": sqltypes.TestBindVariable([]interface{}{1, "aa"}), + }, + output: "select * from a where id in (1, 'aa')", + }, { + desc: "list bind vars 0 arguments", + query: "select * from a where id in ::vals", + bindVars: map[string]*querypb.BindVariable{ + "vals": sqltypes.TestBindVariable([]interface{}{}), + }, + output: "empty list supplied for vals", + }, { + desc: "non-list bind var supplied", + query: "select * from a where id in ::vals", + bindVars: map[string]*querypb.BindVariable{ + "vals": sqltypes.Int64BindVariable(1), + }, + output: "unexpected list arg type (INT64) for key vals", + }, { + desc: "list bind var for non-list", + query: "select * from a where id = :vals", + bindVars: map[string]*querypb.BindVariable{ + "vals": sqltypes.TestBindVariable([]interface{}{1}), + }, + output: "unexpected arg type (TUPLE) for non-list key vals", + }, { + desc: "single column tuple equality", + query: "select * from a where b = :equality", + extras: map[string]Encodable{ + "equality": &TupleEqualityList{ + Columns: []ColIdent{NewColIdent("pk")}, + Rows: [][]sqltypes.Value{ + {sqltypes.NewInt64(1)}, + {sqltypes.NewVarBinary("aa")}, + }, + }, + }, + output: "select * from a where b = pk in (1, 'aa')", + }, { + desc: "multi column tuple equality", + query: "select * from a where b = :equality", + extras: map[string]Encodable{ + "equality": &TupleEqualityList{ + Columns: []ColIdent{NewColIdent("pk1"), NewColIdent("pk2")}, + Rows: [][]sqltypes.Value{ + { + sqltypes.NewInt64(1), + sqltypes.NewVarBinary("aa"), + }, + { + sqltypes.NewInt64(2), + sqltypes.NewVarBinary("bb"), + }, + }, + }, + }, + output: "select * from a where b = (pk1 = 1 and pk2 = 'aa') or (pk1 = 2 and pk2 = 'bb')", + }, + } + + for _, tcase := range tcases { + tree, err := Parse(tcase.query) + if err != nil { + t.Errorf("parse failed for %s: %v", tcase.desc, err) + continue + } + buf := NewTrackedBuffer(nil) + buf.Myprintf("%v", tree) + pq := buf.ParsedQuery() + bytes, err := pq.GenerateQuery(tcase.bindVars, tcase.extras) + var got string + if err != nil { + got = err.Error() + } else { + got = string(bytes) + } + if got != tcase.output { + t.Errorf("for test case: %s, got: '%s', want '%s'", tcase.desc, got, tcase.output) + } + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/parser.go b/internal/stackql-parser-fork/go/vt/sqlparser/parser.go new file mode 100644 index 00000000..04ac2c3d --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/parser.go @@ -0,0 +1,234 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/stackql/stackql-parser/go/vt/log" + "github.com/stackql/stackql-parser/go/vt/vterrors" + + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" +) + +// parserPool is a pool for parser objects. +var parserPool = sync.Pool{} + +// zeroParser is a zero-initialized parser to help reinitialize the parser for pooling. +var zeroParser = *(yyNewParser().(*yyParserImpl)) + +// yyParsePooled is a wrapper around yyParse that pools the parser objects. There isn't a +// particularly good reason to use yyParse directly, since it immediately discards its parser. What +// would be ideal down the line is to actually pool the stacks themselves rather than the parser +// objects, as per https://github.com/cznic/goyacc/blob/master/main.go. However, absent an upstream +// change to goyacc, this is the next best option. +// +// N.B: Parser pooling means that you CANNOT take references directly to parse stack variables (e.g. +// $$ = &$4) in sql.y rules. You must instead add an intermediate reference like so: +// +// showCollationFilterOpt := $4 +// $$ = &Show{Type: string($2), ShowCollationFilterOpt: &showCollationFilterOpt} +func yyParsePooled(yylex yyLexer) int { + // Being very particular about using the base type and not an interface type b/c we depend on + // the implementation to know how to reinitialize the parser. + var parser *yyParserImpl + + i := parserPool.Get() + if i != nil { + parser = i.(*yyParserImpl) + } else { + parser = yyNewParser().(*yyParserImpl) + } + + defer func() { + *parser = zeroParser + parserPool.Put(parser) + }() + return parser.Parse(yylex) +} + +// Instructions for creating new types: If a type +// needs to satisfy an interface, declare that function +// along with that interface. This will help users +// identify the list of types to which they can assert +// those interfaces. +// If the member of a type has a string with a predefined +// list of values, declare those values as const following +// the type. +// For interfaces that define dummy functions to consolidate +// a set of types, define the function as iTypeName. +// This will help avoid name collisions. + +// Parse parses the SQL in full and returns a Statement, which +// is the AST representation of the query. If a DDL statement +// is partially parsed but still contains a syntax error, the +// error is ignored and the DDL is returned anyway. +func Parse(sql string) (Statement, error) { + tokenizer := NewStringTokenizer(sql) + if yyParsePooled(tokenizer) != 0 { + if tokenizer.partialDDL != nil { + if typ, val := tokenizer.Scan(); typ != 0 { + return nil, fmt.Errorf("extra characters encountered after end of DDL: '%s'", string(val)) + } + log.Warningf("ignoring error parsing DDL '%s': %v", sql, tokenizer.LastError) + tokenizer.ParseTree = tokenizer.partialDDL + return tokenizer.ParseTree, nil + } + return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, tokenizer.LastError.Error()) + } + if tokenizer.ParseTree == nil { + return nil, ErrEmpty + } + return tokenizer.ParseTree, nil +} + +// ParseStrictDDL is the same as Parse except it errors on +// partially parsed DDL statements. +func ParseStrictDDL(sql string) (Statement, error) { + tokenizer := NewStringTokenizer(sql) + if yyParsePooled(tokenizer) != 0 { + return nil, tokenizer.LastError + } + if tokenizer.ParseTree == nil { + return nil, ErrEmpty + } + return tokenizer.ParseTree, nil +} + +// ParseTokenizer is a raw interface to parse from the given tokenizer. +// This does not used pooled parsers, and should not be used in general. +func ParseTokenizer(tokenizer *Tokenizer) int { + return yyParse(tokenizer) +} + +// ParseNext parses a single SQL statement from the tokenizer +// returning a Statement which is the AST representation of the query. +// The tokenizer will always read up to the end of the statement, allowing for +// the next call to ParseNext to parse any subsequent SQL statements. When +// there are no more statements to parse, a error of io.EOF is returned. +func ParseNext(tokenizer *Tokenizer) (Statement, error) { + return parseNext(tokenizer, false) +} + +// ParseNextStrictDDL is the same as ParseNext except it errors on +// partially parsed DDL statements. +func ParseNextStrictDDL(tokenizer *Tokenizer) (Statement, error) { + return parseNext(tokenizer, true) +} + +func parseNext(tokenizer *Tokenizer, strict bool) (Statement, error) { + if tokenizer.lastChar == ';' { + tokenizer.next() + tokenizer.skipBlank() + } + if tokenizer.lastChar == eofChar { + return nil, io.EOF + } + + tokenizer.reset() + tokenizer.multi = true + if yyParsePooled(tokenizer) != 0 { + if tokenizer.partialDDL != nil && !strict { + tokenizer.ParseTree = tokenizer.partialDDL + return tokenizer.ParseTree, nil + } + return nil, tokenizer.LastError + } + if tokenizer.ParseTree == nil { + return ParseNext(tokenizer) + } + return tokenizer.ParseTree, nil +} + +// ErrEmpty is a sentinel error returned when parsing empty statements. +var ErrEmpty = errors.New("empty statement") + +// SplitStatement returns the first sql statement up to either a ; or EOF +// and the remainder from the given buffer +func SplitStatement(blob string) (string, string, error) { + tokenizer := NewStringTokenizer(blob) + tkn := 0 + for { + tkn, _ = tokenizer.Scan() + if tkn == 0 || tkn == ';' || tkn == eofChar { + break + } + } + if tokenizer.LastError != nil { + return "", "", tokenizer.LastError + } + if tkn == ';' { + return blob[:tokenizer.Position-2], blob[tokenizer.Position-1:], nil + } + return blob, "", nil +} + +// SplitStatementToPieces split raw sql statement that may have multi sql pieces to sql pieces +// returns the sql pieces blob contains; or error if sql cannot be parsed +func SplitStatementToPieces(blob string) (pieces []string, err error) { + pieces = make([]string, 0, 16) + tokenizer := NewStringTokenizer(blob) + + tkn := 0 + var stmt string + stmtBegin := 0 + for { + tkn, _ = tokenizer.Scan() + if tkn == ';' { + stmt = blob[stmtBegin : tokenizer.Position-2] + pieces = append(pieces, stmt) + stmtBegin = tokenizer.Position - 1 + + } else if tkn == 0 || tkn == eofChar { + blobTail := tokenizer.Position - 2 + + if stmtBegin < blobTail { + stmt = blob[stmtBegin : blobTail+1] + pieces = append(pieces, stmt) + } + break + } + } + + err = tokenizer.LastError + return +} + +// String returns a string representation of an SQLNode. +func String(node SQLNode) string { + if node == nil { + return "" + } + + buf := NewTrackedBuffer(nil) + buf.Myprintf("%v", node) + return buf.String() +} + +// String returns a string representation of an SQLNode. +func ColDelimitedString(node SQLNode) string { + if node == nil { + return "" + } + + buf := NewTrackedBuffer(nil).WithDelimitCols(true) + buf.Myprintf("%v", node) + return buf.String() +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/precedence.go b/internal/stackql-parser-fork/go/vt/sqlparser/precedence.go new file mode 100644 index 00000000..44610114 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/precedence.go @@ -0,0 +1,96 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +// Precendence is used to know the precedence between operators, +// so we can introduce parens when needed in the String representation of the AST +type Precendence int + +const ( + Syntactic Precendence = iota + P1 + P2 + P3 + P4 + P5 + P6 + P7 + P8 + P9 + P10 + P11 + P12 + P13 + P14 + P15 + P16 + P17 +) + +// precedenceFor returns the precedence of an expression. +// +// * NOTE: If you change anything here, update sql.y to keep them consistent. +// Also make sure to add the new constructs to random_expr.go so we have test coverage for the new expressions * +func precedenceFor(in Expr) Precendence { + switch node := in.(type) { + case *OrExpr: + return P16 + case *XorExpr: + return P15 + case *AndExpr: + return P14 + case *NotExpr: + return P13 + case *RangeCond: + return P12 + case *ComparisonExpr: + switch node.Operator { + case EqualStr, NotEqualStr, GreaterThanStr, GreaterEqualStr, LessThanStr, LessEqualStr, LikeStr, InStr, RegexpStr: + return P11 + } + case *IsExpr: + return P11 + case *BinaryExpr: + switch node.Operator { + case BitOrStr: + return P10 + case BitAndStr: + return P9 + case ShiftLeftStr, ShiftRightStr: + return P8 + case PlusStr, MinusStr: + return P7 + case DivStr, MultStr, ModStr, IntDivStr: + return P6 + case BitXorStr: + return P5 + } + case *UnaryExpr: + switch node.Operator { + case UPlusStr, UMinusStr: + return P4 + case BangStr: + return P3 + case BinaryStr: + return P2 + } + case *IntervalExpr: + return P1 + } + + return Syntactic +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/precedence_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/precedence_test.go new file mode 100644 index 00000000..c0676dee --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/precedence_test.go @@ -0,0 +1,181 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func readable(node Expr) string { + switch node := node.(type) { + case *OrExpr: + return fmt.Sprintf("(%s or %s)", readable(node.Left), readable(node.Right)) + case *AndExpr: + return fmt.Sprintf("(%s and %s)", readable(node.Left), readable(node.Right)) + case *XorExpr: + return fmt.Sprintf("(%s xor %s)", readable(node.Left), readable(node.Right)) + case *BinaryExpr: + return fmt.Sprintf("(%s %s %s)", readable(node.Left), node.Operator, readable(node.Right)) + case *IsExpr: + return fmt.Sprintf("(%s %s)", readable(node.Expr), node.Operator) + default: + return String(node) + } +} + +func TestAndOrPrecedence(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "select * from a where a=b and c=d or e=f", + output: "((a = b and c = d) or e = f)", + }, { + input: "select * from a where a=b or c=d and e=f", + output: "(a = b or (c = d and e = f))", + }} + for _, tcase := range validSQL { + tree, err := Parse(tcase.input) + if err != nil { + t.Error(err) + continue + } + expr := readable(tree.(*Select).Where.Expr) + if expr != tcase.output { + t.Errorf("Parse: \n%s, want: \n%s", expr, tcase.output) + } + } +} + +func TestPlusStarPrecedence(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "select 1+2*3 from a", + output: "(1 + (2 * 3))", + }, { + input: "select 1*2+3 from a", + output: "((1 * 2) + 3)", + }} + for _, tcase := range validSQL { + tree, err := Parse(tcase.input) + if err != nil { + t.Error(err) + continue + } + expr := readable(tree.(*Select).SelectExprs[0].(*AliasedExpr).Expr) + if expr != tcase.output { + t.Errorf("Parse: \n%s, want: \n%s", expr, tcase.output) + } + } +} + +func TestIsPrecedence(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "select * from a where a+b is true", + output: "((a + b) is true)", + }, { + input: "select * from a where a=1 and b=2 is true", + output: "(a = 1 and (b = 2 is true))", + }, { + input: "select * from a where (a=1 and b=2) is true", + output: "((a = 1 and b = 2) is true)", + }} + for _, tcase := range validSQL { + tree, err := Parse(tcase.input) + if err != nil { + t.Error(err) + continue + } + expr := readable(tree.(*Select).Where.Expr) + if expr != tcase.output { + t.Errorf("Parse: \n%s, want: \n%s", expr, tcase.output) + } + } +} + +func TestParens(t *testing.T) { + tests := []struct { + in, expected string + }{ + {in: "12", expected: "12"}, + {in: "(12)", expected: "12"}, + {in: "((12))", expected: "12"}, + {in: "((true) and (false))", expected: "true and false"}, + {in: "((true) and (false)) and (true)", expected: "true and false and true"}, + {in: "((true) and (false))", expected: "true and false"}, + {in: "a=b and (c=d or e=f)", expected: "a = b and (c = d or e = f)"}, + {in: "(a=b and c=d) or e=f", expected: "a = b and c = d or e = f"}, + {in: "a & (b | c)", expected: "a & (b | c)"}, + {in: "(a & b) | c", expected: "a & b | c"}, + {in: "not (a=b and c=d)", expected: "not (a = b and c = d)"}, + {in: "not (a=b) and c=d", expected: "not a = b and c = d"}, + {in: "-(12)", expected: "-12"}, + {in: "-(12 + 12)", expected: "-(12 + 12)"}, + {in: "(1 > 2) and (1 = b)", expected: "1 > 2 and 1 = b"}, + {in: "(a / b) + c", expected: "a / b + c"}, + {in: "a / (b + c)", expected: "a / (b + c)"}, + {in: "(1,2,3)", expected: "(1, 2, 3)"}, + {in: "(a) between (5) and (7)", expected: "a between 5 and 7"}, + {in: "(a | b) between (5) and (7)", expected: "a | b between 5 and 7"}, + {in: "(a and b) between (5) and (7)", expected: "(a and b) between 5 and 7"}, + {in: "(true is true) is null", expected: "(true is true) is null"}, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + stmt, err := Parse("select " + tc.in) + require.NoError(t, err) + out := String(stmt) + require.Equal(t, "select "+tc.expected+" from dual", out) + }) + } +} + +func TestRandom(t *testing.T) { + // The purpose of this test is to find discrepancies between Format and parsing. If for example our precedence rules are not consistent between the two, this test should find it. + // The idea is to generate random queries, and pass them through the parser and then the unparser, and one more time. The result of the first unparse should be the same as the second result. + seed := time.Now().UnixNano() + fmt.Println(fmt.Sprintf("seed is %d", seed)) //nolint + g := newGenerator(seed, 5) + endBy := time.Now().Add(1 * time.Second) + + for { + if time.Now().After(endBy) { + break + } + // Given a random expression + randomExpr := g.expression() + inputQ := "select " + String(randomExpr) + " from t" + + // When it's parsed and unparsed + parsedInput, err := Parse(inputQ) + require.NoError(t, err, inputQ) + + // Then the unparsing should be the same as the input query + outputOfParseResult := String(parsedInput) + require.Equal(t, outputOfParseResult, inputQ) + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/random_expr.go b/internal/stackql-parser-fork/go/vt/sqlparser/random_expr.go new file mode 100644 index 00000000..783d96c6 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/random_expr.go @@ -0,0 +1,320 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "fmt" + "math/rand" +) + +// This file is used to generate random expressions to be used for testing + +func newGenerator(seed int64, maxDepth int) *generator { + g := generator{ + seed: seed, + r: rand.New(rand.NewSource(seed)), + maxDepth: maxDepth, + } + return &g +} + +type generator struct { + seed int64 + r *rand.Rand + depth int + maxDepth int +} + +// enter should be called whenever we are producing an intermediate node. it should be followed by a `defer g.exit()` +func (g *generator) enter() { + g.depth++ +} + +// exit should be called when exiting an intermediate node +func (g *generator) exit() { + g.depth-- +} + +// atMaxDepth returns true if we have reached the maximum allowed depth or the expression tree +func (g *generator) atMaxDepth() bool { + return g.depth >= g.maxDepth +} + +/* Creates a random expression. It builds an expression tree using the following constructs: + - true/false + - AND/OR/NOT + - string literalrs, numeric literals (-/+ 1000) + - =, >, <, >=, <=, <=>, != + - &, |, ^, +, -, *, /, div, %, <<, >> + - IN, BETWEEN and CASE + - IS NULL, IS NOT NULL, IS TRUE, IS NOT TRUE, IS FALSE, IS NOT FALSE + +Note: It's important to update this method so that it produces all expressions that need precedence checking. +It's currently missing function calls and string operators +*/ +func (g *generator) expression() Expr { + if g.randomBool() { + return g.booleanExpr() + } + options := []exprF{ + func() Expr { return g.intExpr() }, + func() Expr { return g.stringExpr() }, + func() Expr { return g.booleanExpr() }, + } + + return g.randomOf(options) +} + +func (g *generator) booleanExpr() Expr { + if g.atMaxDepth() { + return g.booleanLiteral() + } + + options := []exprF{ + func() Expr { return g.andExpr() }, + func() Expr { return g.xorExpr() }, + func() Expr { return g.orExpr() }, + func() Expr { return g.comparison(g.intExpr) }, + func() Expr { return g.comparison(g.stringExpr) }, + //func() Expr { return g.comparison(g.booleanExpr) }, // this is not accepted by the parser + func() Expr { return g.inExpr() }, + func() Expr { return g.between() }, + func() Expr { return g.isExpr() }, + func() Expr { return g.notExpr() }, + func() Expr { return g.likeExpr() }, + } + + return g.randomOf(options) +} + +func (g *generator) intExpr() Expr { + if g.atMaxDepth() { + return g.intLiteral() + } + + options := []exprF{ + func() Expr { return g.arithmetic() }, + func() Expr { return g.intLiteral() }, + func() Expr { return g.caseExpr(g.intExpr) }, + } + + return g.randomOf(options) +} + +func (g *generator) booleanLiteral() Expr { + return BoolVal(g.randomBool()) +} + +func (g *generator) randomBool() bool { + return g.r.Float32() < 0.5 +} + +func (g *generator) intLiteral() Expr { + t := fmt.Sprintf("%d", g.r.Intn(1000)-g.r.Intn((1000))) + + return NewIntVal([]byte(t)) +} + +var words = []string{"ox", "ant", "ape", "asp", "bat", "bee", "boa", "bug", "cat", "cod", "cow", "cub", "doe", "dog", "eel", "eft", "elf", "elk", "emu", "ewe", "fly", "fox", "gar", "gnu", "hen", "hog", "imp", "jay", "kid", "kit", "koi", "lab", "man", "owl", "pig", "pug", "pup", "ram", "rat", "ray", "yak", "bass", "bear", "bird", "boar", "buck", "bull", "calf", "chow", "clam", "colt", "crab", "crow", "dane", "deer", "dodo", "dory", "dove", "drum", "duck", "fawn", "fish", "flea", "foal", "fowl", "frog", "gnat", "goat", "grub", "gull", "hare", "hawk", "ibex", "joey", "kite", "kiwi", "lamb", "lark", "lion", "loon", "lynx", "mako", "mink", "mite", "mole", "moth", "mule", "mutt", "newt", "orca", "oryx", "pika", "pony", "puma", "seal", "shad", "slug", "sole", "stag", "stud", "swan", "tahr", "teal", "tick", "toad", "tuna", "wasp", "wolf", "worm", "wren", "yeti", "adder", "akita", "alien", "aphid", "bison", "boxer", "bream", "bunny", "burro", "camel", "chimp", "civet", "cobra", "coral", "corgi", "crane", "dingo", "drake", "eagle", "egret", "filly", "finch", "gator", "gecko", "ghost", "ghoul", "goose", "guppy", "heron", "hippo", "horse", "hound", "husky", "hyena", "koala", "krill", "leech", "lemur", "liger", "llama", "louse", "macaw", "midge", "molly", "moose", "moray", "mouse", "panda", "perch", "prawn", "quail", "racer", "raven", "rhino", "robin", "satyr", "shark", "sheep", "shrew", "skink", "skunk", "sloth", "snail", "snake", "snipe", "squid", "stork", "swift", "swine", "tapir", "tetra", "tiger", "troll", "trout", "viper", "wahoo", "whale", "zebra", "alpaca", "amoeba", "baboon", "badger", "beagle", "bedbug", "beetle", "bengal", "bobcat", "caiman", "cattle", "cicada", "collie", "condor", "cougar", "coyote", "dassie", "donkey", "dragon", "earwig", "falcon", "feline", "ferret", "gannet", "gibbon", "glider", "goblin", "gopher", "grouse", "guinea", "hermit", "hornet", "iguana", "impala", "insect", "jackal", "jaguar", "jennet", "kitten", "kodiak", "lizard", "locust", "maggot", "magpie", "mammal", "mantis", "marlin", "marmot", "marten", "martin", "mayfly", "minnow", "monkey", "mullet", "muskox", "ocelot", "oriole", "osprey", "oyster", "parrot", "pigeon", "piglet", "poodle", "possum", "python", "quagga", "rabbit", "raptor", "rodent", "roughy", "salmon", "sawfly", "serval", "shiner", "shrimp", "spider", "sponge", "tarpon", "thrush", "tomcat", "toucan", "turkey", "turtle", "urchin", "vervet", "walrus", "weasel", "weevil", "wombat", "anchovy", "anemone", "bluejay", "buffalo", "bulldog", "buzzard", "caribou", "catfish", "chamois", "cheetah", "chicken", "chigger", "cowbird", "crappie", "crawdad", "cricket", "dogfish", "dolphin", "firefly", "garfish", "gazelle", "gelding", "giraffe", "gobbler", "gorilla", "goshawk", "grackle", "griffon", "grizzly", "grouper", "haddock", "hagfish", "halibut", "hamster", "herring", "jackass", "javelin", "jawfish", "jaybird", "katydid", "ladybug", "lamprey", "lemming", "leopard", "lioness", "lobster", "macaque", "mallard", "mammoth", "manatee", "mastiff", "meerkat", "mollusk", "monarch", "mongrel", "monitor", "monster", "mudfish", "muskrat", "mustang", "narwhal", "oarfish", "octopus", "opossum", "ostrich", "panther", "peacock", "pegasus", "pelican", "penguin", "phoenix", "piranha", "polecat", "primate", "quetzal", "raccoon", "rattler", "redbird", "redfish", "reptile", "rooster", "sawfish", "sculpin", "seagull", "skylark", "snapper", "spaniel", "sparrow", "sunbeam", "sunbird", "sunfish", "tadpole", "termite", "terrier", "unicorn", "vulture", "wallaby", "walleye", "warthog", "whippet", "wildcat", "aardvark", "airedale", "albacore", "anteater", "antelope", "arachnid", "barnacle", "basilisk", "blowfish", "bluebird", "bluegill", "bonefish", "bullfrog", "cardinal", "chipmunk", "cockatoo", "crayfish", "dinosaur", "doberman", "duckling", "elephant", "escargot", "flamingo", "flounder", "foxhound", "glowworm", "goldfish", "grubworm", "hedgehog", "honeybee", "hookworm", "humpback", "kangaroo", "killdeer", "kingfish", "labrador", "lacewing", "ladybird", "lionfish", "longhorn", "mackerel", "malamute", "marmoset", "mastodon", "moccasin", "mongoose", "monkfish", "mosquito", "pangolin", "parakeet", "pheasant", "pipefish", "platypus", "polliwog", "porpoise", "reindeer", "ringtail", "sailfish", "scorpion", "seahorse", "seasnail", "sheepdog", "shepherd", "silkworm", "squirrel", "stallion", "starfish", "starling", "stingray", "stinkbug", "sturgeon", "terrapin", "titmouse", "tortoise", "treefrog", "werewolf", "woodcock"} + +func (g *generator) stringLiteral() Expr { + return NewStrVal([]byte(g.randomOfS(words))) +} + +func (g *generator) stringExpr() Expr { + if g.atMaxDepth() { + return g.stringLiteral() + } + + options := []exprF{ + func() Expr { return g.stringLiteral() }, + func() Expr { return g.caseExpr(g.stringExpr) }, + } + + return g.randomOf(options) +} + +func (g *generator) likeExpr() Expr { + g.enter() + defer g.exit() + return &ComparisonExpr{ + Operator: LikeStr, + Left: g.stringExpr(), + Right: g.stringExpr(), + } +} + +var comparisonOps = []string{EqualStr, LessThanStr, GreaterThanStr, LessEqualStr, GreaterEqualStr, NotEqualStr, NullSafeEqualStr} + +func (g *generator) comparison(f func() Expr) Expr { + g.enter() + defer g.exit() + + cmp := &ComparisonExpr{ + Operator: g.randomOfS(comparisonOps), + Left: f(), + Right: f(), + } + return cmp +} + +func (g *generator) caseExpr(valueF func() Expr) Expr { + g.enter() + defer g.exit() + + var exp Expr + var elseExpr Expr + if g.randomBool() { + exp = valueF() + } + if g.randomBool() { + elseExpr = valueF() + } + + size := g.r.Intn(5) + 2 + var whens []*When + for i := 0; i < size; i++ { + var cond Expr + if exp == nil { + cond = g.booleanExpr() + } else { + cond = g.expression() + } + + whens = append(whens, &When{ + Cond: cond, + Val: g.expression(), + }) + } + + return &CaseExpr{ + Expr: exp, + Whens: whens, + Else: elseExpr, + } +} + +var arithmeticOps = []string{BitAndStr, BitOrStr, BitXorStr, PlusStr, MinusStr, MultStr, DivStr, IntDivStr, ModStr, ShiftRightStr, ShiftLeftStr} + +func (g *generator) arithmetic() Expr { + g.enter() + defer g.exit() + + op := arithmeticOps[g.r.Intn(len(arithmeticOps))] + + return &BinaryExpr{ + Operator: op, + Left: g.intExpr(), + Right: g.intExpr(), + } +} + +type exprF func() Expr + +func (g *generator) randomOf(options []exprF) Expr { + return options[g.r.Intn(len(options))]() +} + +func (g *generator) randomOfS(options []string) string { + return options[g.r.Intn(len(options))] +} + +func (g *generator) andExpr() Expr { + g.enter() + defer g.exit() + return &AndExpr{ + Left: g.booleanExpr(), + Right: g.booleanExpr(), + } +} + +func (g *generator) orExpr() Expr { + g.enter() + defer g.exit() + return &OrExpr{ + Left: g.booleanExpr(), + Right: g.booleanExpr(), + } +} + +func (g *generator) xorExpr() Expr { + g.enter() + defer g.exit() + return &XorExpr{ + Left: g.booleanExpr(), + Right: g.booleanExpr(), + } +} + +func (g *generator) notExpr() Expr { + g.enter() + defer g.exit() + return &NotExpr{g.booleanExpr()} +} + +func (g *generator) inExpr() Expr { + g.enter() + defer g.exit() + + expr := g.intExpr() + size := g.r.Intn(5) + 2 + tuples := ValTuple{} + for i := 0; i < size; i++ { + tuples = append(tuples, g.intExpr()) + } + op := InStr + if g.randomBool() { + op = NotInStr + } + + return &ComparisonExpr{ + Operator: op, + Left: expr, + Right: tuples, + } +} + +func (g *generator) between() Expr { + g.enter() + defer g.exit() + + var op string + if g.randomBool() { + op = BetweenStr + } else { + op = NotBetweenStr + } + + return &RangeCond{ + Operator: op, + Left: g.intExpr(), + From: g.intExpr(), + To: g.intExpr(), + } +} + +func (g *generator) isExpr() Expr { + g.enter() + defer g.exit() + + ops := []string{IsNullStr, IsNotNullStr, IsTrueStr, IsNotTrueStr, IsFalseStr, IsNotFalseStr} + + return &IsExpr{ + Operator: g.randomOfS(ops), + Expr: g.booleanExpr(), + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/redact_query.go b/internal/stackql-parser-fork/go/vt/sqlparser/redact_query.go new file mode 100644 index 00000000..0c8e1099 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/redact_query.go @@ -0,0 +1,35 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import querypb "github.com/stackql/stackql-parser/go/vt/proto/query" + +// RedactSQLQuery returns a sql string with the params stripped out for display +func RedactSQLQuery(sql string) (string, error) { + bv := map[string]*querypb.BindVariable{} + sqlStripped, comments := SplitMarginComments(sql) + + stmt, err := Parse(sqlStripped) + if err != nil { + return "", err + } + + prefix := "redacted" + Normalize(stmt, bv, prefix) + + return comments.Leading + String(stmt) + comments.Trailing, nil +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/redact_query_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/redact_query_test.go new file mode 100644 index 00000000..1921b52e --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/redact_query_test.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "testing" +) + +func TestRedactSQLStatements(t *testing.T) { + sql := "select a,b,c from t where x = 1234 and y = 1234 and z = 'apple'" + redactedSQL, err := RedactSQLQuery(sql) + if err != nil { + t.Fatalf("redacting sql failed: %v", err) + } + + if redactedSQL != "select a, b, c from t where x = :redacted1 and y = :redacted1 and z = :redacted2" { + t.Fatalf("Unknown sql redaction: %v", redactedSQL) + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/rewriter.go b/internal/stackql-parser-fork/go/vt/sqlparser/rewriter.go new file mode 100644 index 00000000..44ffb8d4 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/rewriter.go @@ -0,0 +1,1554 @@ +// Code generated by visitorgen/main/main.go. DO NOT EDIT. + +package sqlparser + +//go:generate go run ./visitorgen/main -input=ast.go -output=rewriter.go + +import ( + "reflect" +) + +type replacerFunc func(newNode, parent SQLNode) + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor +} + +func replaceAliasedExprAs(newNode, parent SQLNode) { + parent.(*AliasedExpr).As = newNode.(ColIdent) +} + +func replaceAliasedExprExpr(newNode, parent SQLNode) { + parent.(*AliasedExpr).Expr = newNode.(Expr) +} + +func replaceAliasedTableExprAs(newNode, parent SQLNode) { + parent.(*AliasedTableExpr).As = newNode.(TableIdent) +} + +func replaceAliasedTableExprExpr(newNode, parent SQLNode) { + parent.(*AliasedTableExpr).Expr = newNode.(SimpleTableExpr) +} + +func replaceAliasedTableExprHints(newNode, parent SQLNode) { + parent.(*AliasedTableExpr).Hints = newNode.(*IndexHints) +} + +func replaceAliasedTableExprPartitions(newNode, parent SQLNode) { + parent.(*AliasedTableExpr).Partitions = newNode.(Partitions) +} + +func replaceAndExprLeft(newNode, parent SQLNode) { + parent.(*AndExpr).Left = newNode.(Expr) +} + +func replaceAndExprRight(newNode, parent SQLNode) { + parent.(*AndExpr).Right = newNode.(Expr) +} + +func replaceAuthSessionAuth(newNode, parent SQLNode) { + parent.(*Auth).SessionAuth = newNode.(BoolVal) +} + +func replaceAuthRevokeSessionAuth(newNode, parent SQLNode) { + parent.(*AuthRevoke).SessionAuth = newNode.(BoolVal) +} + +func replaceAutoIncSpecColumn(newNode, parent SQLNode) { + parent.(*AutoIncSpec).Column = newNode.(ColIdent) +} + +func replaceAutoIncSpecSequence(newNode, parent SQLNode) { + parent.(*AutoIncSpec).Sequence = newNode.(TableName) +} + +func replaceBinaryExprLeft(newNode, parent SQLNode) { + parent.(*BinaryExpr).Left = newNode.(Expr) +} + +func replaceBinaryExprRight(newNode, parent SQLNode) { + parent.(*BinaryExpr).Right = newNode.(Expr) +} + +func replaceCaseExprElse(newNode, parent SQLNode) { + parent.(*CaseExpr).Else = newNode.(Expr) +} + +func replaceCaseExprExpr(newNode, parent SQLNode) { + parent.(*CaseExpr).Expr = newNode.(Expr) +} + +type replaceCaseExprWhens int + +func (r *replaceCaseExprWhens) replace(newNode, container SQLNode) { + container.(*CaseExpr).Whens[int(*r)] = newNode.(*When) +} + +func (r *replaceCaseExprWhens) inc() { + *r++ +} + +func replaceColNameName(newNode, parent SQLNode) { + parent.(*ColName).Name = newNode.(ColIdent) +} + +func replaceColNameQualifier(newNode, parent SQLNode) { + parent.(*ColName).Qualifier = newNode.(TableName) +} + +func replaceCollateExprExpr(newNode, parent SQLNode) { + parent.(*CollateExpr).Expr = newNode.(Expr) +} + +func replaceColumnDefinitionName(newNode, parent SQLNode) { + parent.(*ColumnDefinition).Name = newNode.(ColIdent) +} + +func replaceColumnTypeAutoincrement(newNode, parent SQLNode) { + parent.(*ColumnType).Autoincrement = newNode.(BoolVal) +} + +func replaceColumnTypeComment(newNode, parent SQLNode) { + parent.(*ColumnType).Comment = newNode.(*SQLVal) +} + +func replaceColumnTypeDefault(newNode, parent SQLNode) { + parent.(*ColumnType).Default = newNode.(Expr) +} + +func replaceColumnTypeLength(newNode, parent SQLNode) { + parent.(*ColumnType).Length = newNode.(*SQLVal) +} + +func replaceColumnTypeNotNull(newNode, parent SQLNode) { + parent.(*ColumnType).NotNull = newNode.(BoolVal) +} + +func replaceColumnTypeOnUpdate(newNode, parent SQLNode) { + parent.(*ColumnType).OnUpdate = newNode.(Expr) +} + +func replaceColumnTypeScale(newNode, parent SQLNode) { + parent.(*ColumnType).Scale = newNode.(*SQLVal) +} + +func replaceColumnTypeUnsigned(newNode, parent SQLNode) { + parent.(*ColumnType).Unsigned = newNode.(BoolVal) +} + +func replaceColumnTypeZerofill(newNode, parent SQLNode) { + parent.(*ColumnType).Zerofill = newNode.(BoolVal) +} + +type replaceColumnsItems int + +func (r *replaceColumnsItems) replace(newNode, container SQLNode) { + container.(Columns)[int(*r)] = newNode.(ColIdent) +} + +func (r *replaceColumnsItems) inc() { + *r++ +} + +func replaceComparisonExprEscape(newNode, parent SQLNode) { + parent.(*ComparisonExpr).Escape = newNode.(Expr) +} + +func replaceComparisonExprLeft(newNode, parent SQLNode) { + parent.(*ComparisonExpr).Left = newNode.(Expr) +} + +func replaceComparisonExprRight(newNode, parent SQLNode) { + parent.(*ComparisonExpr).Right = newNode.(Expr) +} + +func replaceConstraintDefinitionDetails(newNode, parent SQLNode) { + parent.(*ConstraintDefinition).Details = newNode.(ConstraintInfo) +} + +func replaceConvertExprExpr(newNode, parent SQLNode) { + parent.(*ConvertExpr).Expr = newNode.(Expr) +} + +func replaceConvertExprType(newNode, parent SQLNode) { + parent.(*ConvertExpr).Type = newNode.(*ConvertType) +} + +func replaceConvertTypeLength(newNode, parent SQLNode) { + parent.(*ConvertType).Length = newNode.(*SQLVal) +} + +func replaceConvertTypeScale(newNode, parent SQLNode) { + parent.(*ConvertType).Scale = newNode.(*SQLVal) +} + +func replaceConvertUsingExprExpr(newNode, parent SQLNode) { + parent.(*ConvertUsingExpr).Expr = newNode.(Expr) +} + +func replaceCurTimeFuncExprFsp(newNode, parent SQLNode) { + parent.(*CurTimeFuncExpr).Fsp = newNode.(Expr) +} + +func replaceCurTimeFuncExprName(newNode, parent SQLNode) { + parent.(*CurTimeFuncExpr).Name = newNode.(ColIdent) +} + +func replaceDDLAutoIncSpec(newNode, parent SQLNode) { + parent.(*DDL).AutoIncSpec = newNode.(*AutoIncSpec) +} + +func replaceDDLFromTables(newNode, parent SQLNode) { + parent.(*DDL).FromTables = newNode.(TableNames) +} + +func replaceDDLOptLike(newNode, parent SQLNode) { + parent.(*DDL).OptLike = newNode.(*OptLike) +} + +func replaceDDLPartitionSpec(newNode, parent SQLNode) { + parent.(*DDL).PartitionSpec = newNode.(*PartitionSpec) +} + +func replaceDDLSelectStatement(newNode, parent SQLNode) { + parent.(*DDL).SelectStatement = newNode.(SelectStatement) +} + +func replaceDDLTable(newNode, parent SQLNode) { + parent.(*DDL).Table = newNode.(TableName) +} + +func replaceDDLTableSpec(newNode, parent SQLNode) { + parent.(*DDL).TableSpec = newNode.(*TableSpec) +} + +func replaceDDLToTables(newNode, parent SQLNode) { + parent.(*DDL).ToTables = newNode.(TableNames) +} + +type replaceDDLVindexCols int + +func (r *replaceDDLVindexCols) replace(newNode, container SQLNode) { + container.(*DDL).VindexCols[int(*r)] = newNode.(ColIdent) +} + +func (r *replaceDDLVindexCols) inc() { + *r++ +} + +func replaceDDLVindexSpec(newNode, parent SQLNode) { + parent.(*DDL).VindexSpec = newNode.(*VindexSpec) +} + +func replaceDeleteComments(newNode, parent SQLNode) { + parent.(*Delete).Comments = newNode.(Comments) +} + +func replaceDeleteLimit(newNode, parent SQLNode) { + parent.(*Delete).Limit = newNode.(*Limit) +} + +func replaceDeleteOrderBy(newNode, parent SQLNode) { + parent.(*Delete).OrderBy = newNode.(OrderBy) +} + +func replaceDeletePartitions(newNode, parent SQLNode) { + parent.(*Delete).Partitions = newNode.(Partitions) +} + +func replaceDeleteSelectExprs(newNode, parent SQLNode) { + parent.(*Delete).SelectExprs = newNode.(SelectExprs) +} + +func replaceDeleteTableExprs(newNode, parent SQLNode) { + parent.(*Delete).TableExprs = newNode.(TableExprs) +} + +func replaceDeleteTargets(newNode, parent SQLNode) { + parent.(*Delete).Targets = newNode.(TableNames) +} + +func replaceDeleteWhere(newNode, parent SQLNode) { + parent.(*Delete).Where = newNode.(*Where) +} + +func replaceDescribeTableTable(newNode, parent SQLNode) { + parent.(*DescribeTable).Table = newNode.(TableName) +} + +func replaceExecComments(newNode, parent SQLNode) { + parent.(*Exec).Comments = newNode.(Comments) +} + +func replaceExecMethodName(newNode, parent SQLNode) { + parent.(*Exec).MethodName = newNode.(TableName) +} + +func replaceExecSubqueryExec(newNode, parent SQLNode) { + parent.(*ExecSubquery).Exec = newNode.(*Exec) +} + +func replaceExistsExprSubquery(newNode, parent SQLNode) { + parent.(*ExistsExpr).Subquery = newNode.(*Subquery) +} + +func replaceExplainStatement(newNode, parent SQLNode) { + parent.(*Explain).Statement = newNode.(Statement) +} + +type replaceExprsItems int + +func (r *replaceExprsItems) replace(newNode, container SQLNode) { + container.(Exprs)[int(*r)] = newNode.(Expr) +} + +func (r *replaceExprsItems) inc() { + *r++ +} + +func replaceForeignKeyDefinitionOnDelete(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).OnDelete = newNode.(ReferenceAction) +} + +func replaceForeignKeyDefinitionOnUpdate(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).OnUpdate = newNode.(ReferenceAction) +} + +func replaceForeignKeyDefinitionReferencedColumns(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).ReferencedColumns = newNode.(Columns) +} + +func replaceForeignKeyDefinitionReferencedTable(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).ReferencedTable = newNode.(TableName) +} + +func replaceForeignKeyDefinitionSource(newNode, parent SQLNode) { + parent.(*ForeignKeyDefinition).Source = newNode.(Columns) +} + +func replaceFuncExprExprs(newNode, parent SQLNode) { + parent.(*FuncExpr).Exprs = newNode.(SelectExprs) +} + +func replaceFuncExprName(newNode, parent SQLNode) { + parent.(*FuncExpr).Name = newNode.(ColIdent) +} + +func replaceFuncExprQualifier(newNode, parent SQLNode) { + parent.(*FuncExpr).Qualifier = newNode.(TableIdent) +} + +type replaceGroupByItems int + +func (r *replaceGroupByItems) replace(newNode, container SQLNode) { + container.(GroupBy)[int(*r)] = newNode.(Expr) +} + +func (r *replaceGroupByItems) inc() { + *r++ +} + +func replaceGroupConcatExprExprs(newNode, parent SQLNode) { + parent.(*GroupConcatExpr).Exprs = newNode.(SelectExprs) +} + +func replaceGroupConcatExprLimit(newNode, parent SQLNode) { + parent.(*GroupConcatExpr).Limit = newNode.(*Limit) +} + +func replaceGroupConcatExprOrderBy(newNode, parent SQLNode) { + parent.(*GroupConcatExpr).OrderBy = newNode.(OrderBy) +} + +func replaceIndexDefinitionInfo(newNode, parent SQLNode) { + parent.(*IndexDefinition).Info = newNode.(*IndexInfo) +} + +type replaceIndexHintsIndexes int + +func (r *replaceIndexHintsIndexes) replace(newNode, container SQLNode) { + container.(*IndexHints).Indexes[int(*r)] = newNode.(ColIdent) +} + +func (r *replaceIndexHintsIndexes) inc() { + *r++ +} + +func replaceIndexInfoName(newNode, parent SQLNode) { + parent.(*IndexInfo).Name = newNode.(ColIdent) +} + +func replaceInsertColumns(newNode, parent SQLNode) { + parent.(*Insert).Columns = newNode.(Columns) +} + +func replaceInsertComments(newNode, parent SQLNode) { + parent.(*Insert).Comments = newNode.(Comments) +} + +func replaceInsertOnDup(newNode, parent SQLNode) { + parent.(*Insert).OnDup = newNode.(OnDup) +} + +func replaceInsertPartitions(newNode, parent SQLNode) { + parent.(*Insert).Partitions = newNode.(Partitions) +} + +func replaceInsertRows(newNode, parent SQLNode) { + parent.(*Insert).Rows = newNode.(InsertRows) +} + +func replaceInsertSelectExprs(newNode, parent SQLNode) { + parent.(*Insert).SelectExprs = newNode.(SelectExprs) +} + +func replaceInsertTable(newNode, parent SQLNode) { + parent.(*Insert).Table = newNode.(TableName) +} + +func replaceIntervalExprExpr(newNode, parent SQLNode) { + parent.(*IntervalExpr).Expr = newNode.(Expr) +} + +func replaceIsExprExpr(newNode, parent SQLNode) { + parent.(*IsExpr).Expr = newNode.(Expr) +} + +func replaceJoinConditionOn(newNode, parent SQLNode) { + tmp := parent.(JoinCondition) + tmp.On = newNode.(Expr) +} + +func replaceJoinConditionUsing(newNode, parent SQLNode) { + tmp := parent.(JoinCondition) + tmp.Using = newNode.(Columns) +} + +func replaceJoinTableExprCondition(newNode, parent SQLNode) { + parent.(*JoinTableExpr).Condition = newNode.(JoinCondition) +} + +func replaceJoinTableExprLeftExpr(newNode, parent SQLNode) { + parent.(*JoinTableExpr).LeftExpr = newNode.(TableExpr) +} + +func replaceJoinTableExprRightExpr(newNode, parent SQLNode) { + parent.(*JoinTableExpr).RightExpr = newNode.(TableExpr) +} + +func replaceLimitOffset(newNode, parent SQLNode) { + parent.(*Limit).Offset = newNode.(Expr) +} + +func replaceLimitRowcount(newNode, parent SQLNode) { + parent.(*Limit).Rowcount = newNode.(Expr) +} + +func replaceMatchExprColumns(newNode, parent SQLNode) { + parent.(*MatchExpr).Columns = newNode.(SelectExprs) +} + +func replaceMatchExprExpr(newNode, parent SQLNode) { + parent.(*MatchExpr).Expr = newNode.(Expr) +} + +func replaceNativeQueryComments(newNode, parent SQLNode) { + parent.(*NativeQuery).Comments = newNode.(Comments) +} + +func replaceNextvalExpr(newNode, parent SQLNode) { + tmp := parent.(Nextval) + tmp.Expr = newNode.(Expr) +} + +func replaceNotExprExpr(newNode, parent SQLNode) { + parent.(*NotExpr).Expr = newNode.(Expr) +} + +type replaceOnDupItems int + +func (r *replaceOnDupItems) replace(newNode, container SQLNode) { + container.(OnDup)[int(*r)] = newNode.(*UpdateExpr) +} + +func (r *replaceOnDupItems) inc() { + *r++ +} + +func replaceOptLikeLikeTable(newNode, parent SQLNode) { + parent.(*OptLike).LikeTable = newNode.(TableName) +} + +func replaceOrExprLeft(newNode, parent SQLNode) { + parent.(*OrExpr).Left = newNode.(Expr) +} + +func replaceOrExprRight(newNode, parent SQLNode) { + parent.(*OrExpr).Right = newNode.(Expr) +} + +func replaceOrderExpr(newNode, parent SQLNode) { + parent.(*Order).Expr = newNode.(Expr) +} + +type replaceOrderByItems int + +func (r *replaceOrderByItems) replace(newNode, container SQLNode) { + container.(OrderBy)[int(*r)] = newNode.(*Order) +} + +func (r *replaceOrderByItems) inc() { + *r++ +} + +func replaceParenSelectSelect(newNode, parent SQLNode) { + parent.(*ParenSelect).Select = newNode.(SelectStatement) +} + +func replaceParenTableExprExprs(newNode, parent SQLNode) { + parent.(*ParenTableExpr).Exprs = newNode.(TableExprs) +} + +func replacePartitionDefinitionLimit(newNode, parent SQLNode) { + parent.(*PartitionDefinition).Limit = newNode.(Expr) +} + +func replacePartitionDefinitionName(newNode, parent SQLNode) { + parent.(*PartitionDefinition).Name = newNode.(ColIdent) +} + +type replacePartitionSpecDefinitions int + +func (r *replacePartitionSpecDefinitions) replace(newNode, container SQLNode) { + container.(*PartitionSpec).Definitions[int(*r)] = newNode.(*PartitionDefinition) +} + +func (r *replacePartitionSpecDefinitions) inc() { + *r++ +} + +func replacePartitionSpecName(newNode, parent SQLNode) { + parent.(*PartitionSpec).Name = newNode.(ColIdent) +} + +type replacePartitionsItems int + +func (r *replacePartitionsItems) replace(newNode, container SQLNode) { + container.(Partitions)[int(*r)] = newNode.(ColIdent) +} + +func (r *replacePartitionsItems) inc() { + *r++ +} + +func replacePurgeComments(newNode, parent SQLNode) { + parent.(*Purge).Comments = newNode.(Comments) +} + +func replacePurgeTarget(newNode, parent SQLNode) { + parent.(*Purge).Target = newNode.(TableName) +} + +func replaceRangeCondFrom(newNode, parent SQLNode) { + parent.(*RangeCond).From = newNode.(Expr) +} + +func replaceRangeCondLeft(newNode, parent SQLNode) { + parent.(*RangeCond).Left = newNode.(Expr) +} + +func replaceRangeCondTo(newNode, parent SQLNode) { + parent.(*RangeCond).To = newNode.(Expr) +} + +func replaceRefreshMaterializedViewImplicitSelect(newNode, parent SQLNode) { + parent.(*RefreshMaterializedView).ImplicitSelect = newNode.(SelectStatement) +} + +func replaceRefreshMaterializedViewViewName(newNode, parent SQLNode) { + parent.(*RefreshMaterializedView).ViewName = newNode.(TableName) +} + +func replaceReleaseName(newNode, parent SQLNode) { + parent.(*Release).Name = newNode.(ColIdent) +} + +func replaceSRollbackName(newNode, parent SQLNode) { + parent.(*SRollback).Name = newNode.(ColIdent) +} + +func replaceSavepointName(newNode, parent SQLNode) { + parent.(*Savepoint).Name = newNode.(ColIdent) +} + +func replaceSelectComments(newNode, parent SQLNode) { + parent.(*Select).Comments = newNode.(Comments) +} + +func replaceSelectFrom(newNode, parent SQLNode) { + parent.(*Select).From = newNode.(TableExprs) +} + +func replaceSelectGroupBy(newNode, parent SQLNode) { + parent.(*Select).GroupBy = newNode.(GroupBy) +} + +func replaceSelectHaving(newNode, parent SQLNode) { + parent.(*Select).Having = newNode.(*Where) +} + +func replaceSelectLimit(newNode, parent SQLNode) { + parent.(*Select).Limit = newNode.(*Limit) +} + +func replaceSelectOrderBy(newNode, parent SQLNode) { + parent.(*Select).OrderBy = newNode.(OrderBy) +} + +func replaceSelectSelectExprs(newNode, parent SQLNode) { + parent.(*Select).SelectExprs = newNode.(SelectExprs) +} + +func replaceSelectWhere(newNode, parent SQLNode) { + parent.(*Select).Where = newNode.(*Where) +} + +type replaceSelectExprsItems int + +func (r *replaceSelectExprsItems) replace(newNode, container SQLNode) { + container.(SelectExprs)[int(*r)] = newNode.(SelectExpr) +} + +func (r *replaceSelectExprsItems) inc() { + *r++ +} + +func replaceSetComments(newNode, parent SQLNode) { + parent.(*Set).Comments = newNode.(Comments) +} + +func replaceSetExprs(newNode, parent SQLNode) { + parent.(*Set).Exprs = newNode.(SetExprs) +} + +func replaceSetExprExpr(newNode, parent SQLNode) { + parent.(*SetExpr).Expr = newNode.(Expr) +} + +func replaceSetExprName(newNode, parent SQLNode) { + parent.(*SetExpr).Name = newNode.(ColIdent) +} + +type replaceSetExprsItems int + +func (r *replaceSetExprsItems) replace(newNode, container SQLNode) { + container.(SetExprs)[int(*r)] = newNode.(*SetExpr) +} + +func (r *replaceSetExprsItems) inc() { + *r++ +} + +type replaceSetTransactionCharacteristics int + +func (r *replaceSetTransactionCharacteristics) replace(newNode, container SQLNode) { + container.(*SetTransaction).Characteristics[int(*r)] = newNode.(Characteristic) +} + +func (r *replaceSetTransactionCharacteristics) inc() { + *r++ +} + +func replaceSetTransactionComments(newNode, parent SQLNode) { + parent.(*SetTransaction).Comments = newNode.(Comments) +} + +func replaceShowColumns(newNode, parent SQLNode) { + parent.(*Show).Columns = newNode.(Columns) +} + +func replaceShowComments(newNode, parent SQLNode) { + parent.(*Show).Comments = newNode.(Comments) +} + +func replaceShowOnTable(newNode, parent SQLNode) { + parent.(*Show).OnTable = newNode.(TableName) +} + +func replaceShowShowCollationFilterOpt(newNode, parent SQLNode) { + parent.(*Show).ShowCollationFilterOpt = newNode.(Expr) +} + +func replaceShowTable(newNode, parent SQLNode) { + parent.(*Show).Table = newNode.(TableName) +} + +func replaceShowFilterFilter(newNode, parent SQLNode) { + parent.(*ShowFilter).Filter = newNode.(Expr) +} + +func replaceSleepDuration(newNode, parent SQLNode) { + parent.(*Sleep).Duration = newNode.(*SQLVal) +} + +func replaceStarExprTableName(newNode, parent SQLNode) { + parent.(*StarExpr).TableName = newNode.(TableName) +} + +func replaceStreamComments(newNode, parent SQLNode) { + parent.(*Stream).Comments = newNode.(Comments) +} + +func replaceStreamSelectExpr(newNode, parent SQLNode) { + parent.(*Stream).SelectExpr = newNode.(SelectExpr) +} + +func replaceStreamTable(newNode, parent SQLNode) { + parent.(*Stream).Table = newNode.(TableName) +} + +func replaceSubquerySelect(newNode, parent SQLNode) { + parent.(*Subquery).Select = newNode.(SelectStatement) +} + +func replaceSubstrExprFrom(newNode, parent SQLNode) { + parent.(*SubstrExpr).From = newNode.(Expr) +} + +func replaceSubstrExprName(newNode, parent SQLNode) { + parent.(*SubstrExpr).Name = newNode.(*ColName) +} + +func replaceSubstrExprStrVal(newNode, parent SQLNode) { + parent.(*SubstrExpr).StrVal = newNode.(*SQLVal) +} + +func replaceSubstrExprTo(newNode, parent SQLNode) { + parent.(*SubstrExpr).To = newNode.(Expr) +} + +type replaceTableExprsItems int + +func (r *replaceTableExprsItems) replace(newNode, container SQLNode) { + container.(TableExprs)[int(*r)] = newNode.(TableExpr) +} + +func (r *replaceTableExprsItems) inc() { + *r++ +} + +func replaceTableNameName(newNode, parent SQLNode) { + tmp := parent.(TableName) + tmp.Name = newNode.(TableIdent) +} + +func replaceTableNameQualifier(newNode, parent SQLNode) { + tmp := parent.(TableName) + tmp.Qualifier = newNode.(TableIdent) +} + +func replaceTableNameQualifierSecond(newNode, parent SQLNode) { + tmp := parent.(TableName) + tmp.QualifierSecond = newNode.(TableIdent) +} + +func replaceTableNameQualifierThird(newNode, parent SQLNode) { + tmp := parent.(TableName) + tmp.QualifierThird = newNode.(TableIdent) +} + +type replaceTableNamesItems int + +func (r *replaceTableNamesItems) replace(newNode, container SQLNode) { + container.(TableNames)[int(*r)] = newNode.(TableName) +} + +func (r *replaceTableNamesItems) inc() { + *r++ +} + +type replaceTableSpecColumns int + +func (r *replaceTableSpecColumns) replace(newNode, container SQLNode) { + container.(*TableSpec).Columns[int(*r)] = newNode.(*ColumnDefinition) +} + +func (r *replaceTableSpecColumns) inc() { + *r++ +} + +type replaceTableSpecConstraints int + +func (r *replaceTableSpecConstraints) replace(newNode, container SQLNode) { + container.(*TableSpec).Constraints[int(*r)] = newNode.(*ConstraintDefinition) +} + +func (r *replaceTableSpecConstraints) inc() { + *r++ +} + +type replaceTableSpecIndexes int + +func (r *replaceTableSpecIndexes) replace(newNode, container SQLNode) { + container.(*TableSpec).Indexes[int(*r)] = newNode.(*IndexDefinition) +} + +func (r *replaceTableSpecIndexes) inc() { + *r++ +} + +func replaceTableValuedFuncTableExprAs(newNode, parent SQLNode) { + parent.(*TableValuedFuncTableExpr).As = newNode.(TableIdent) +} + +func replaceTableValuedFuncTableExprFuncExpr(newNode, parent SQLNode) { + parent.(*TableValuedFuncTableExpr).FuncExpr = newNode.(Expr) +} + +func replaceTimestampFuncExprExpr1(newNode, parent SQLNode) { + parent.(*TimestampFuncExpr).Expr1 = newNode.(Expr) +} + +func replaceTimestampFuncExprExpr2(newNode, parent SQLNode) { + parent.(*TimestampFuncExpr).Expr2 = newNode.(Expr) +} + +func replaceUnaryCastConcatamerExprExpr(newNode, parent SQLNode) { + parent.(*UnaryCastConcatamerExpr).Expr = newNode.(Expr) +} + +func replaceUnaryExprExpr(newNode, parent SQLNode) { + parent.(*UnaryExpr).Expr = newNode.(Expr) +} + +func replaceUnionFirstStatement(newNode, parent SQLNode) { + parent.(*Union).FirstStatement = newNode.(SelectStatement) +} + +func replaceUnionLimit(newNode, parent SQLNode) { + parent.(*Union).Limit = newNode.(*Limit) +} + +func replaceUnionOrderBy(newNode, parent SQLNode) { + parent.(*Union).OrderBy = newNode.(OrderBy) +} + +type replaceUnionUnionSelects int + +func (r *replaceUnionUnionSelects) replace(newNode, container SQLNode) { + container.(*Union).UnionSelects[int(*r)] = newNode.(*UnionSelect) +} + +func (r *replaceUnionUnionSelects) inc() { + *r++ +} + +func replaceUnionSelectStatement(newNode, parent SQLNode) { + parent.(*UnionSelect).Statement = newNode.(SelectStatement) +} + +func replaceUpdateComments(newNode, parent SQLNode) { + parent.(*Update).Comments = newNode.(Comments) +} + +func replaceUpdateExprs(newNode, parent SQLNode) { + parent.(*Update).Exprs = newNode.(UpdateExprs) +} + +func replaceUpdateFrom(newNode, parent SQLNode) { + parent.(*Update).From = newNode.(TableExprs) +} + +func replaceUpdateLimit(newNode, parent SQLNode) { + parent.(*Update).Limit = newNode.(*Limit) +} + +func replaceUpdateOrderBy(newNode, parent SQLNode) { + parent.(*Update).OrderBy = newNode.(OrderBy) +} + +func replaceUpdateSelectExprs(newNode, parent SQLNode) { + parent.(*Update).SelectExprs = newNode.(SelectExprs) +} + +func replaceUpdateTableExprs(newNode, parent SQLNode) { + parent.(*Update).TableExprs = newNode.(TableExprs) +} + +func replaceUpdateWhere(newNode, parent SQLNode) { + parent.(*Update).Where = newNode.(*Where) +} + +func replaceUpdateExprExpr(newNode, parent SQLNode) { + parent.(*UpdateExpr).Expr = newNode.(Expr) +} + +func replaceUpdateExprName(newNode, parent SQLNode) { + parent.(*UpdateExpr).Name = newNode.(*ColName) +} + +type replaceUpdateExprsItems int + +func (r *replaceUpdateExprsItems) replace(newNode, container SQLNode) { + container.(UpdateExprs)[int(*r)] = newNode.(*UpdateExpr) +} + +func (r *replaceUpdateExprsItems) inc() { + *r++ +} + +func replaceUseDBName(newNode, parent SQLNode) { + parent.(*Use).DBName = newNode.(TableIdent) +} + +type replaceValTupleItems int + +func (r *replaceValTupleItems) replace(newNode, container SQLNode) { + container.(ValTuple)[int(*r)] = newNode.(Expr) +} + +func (r *replaceValTupleItems) inc() { + *r++ +} + +type replaceValuesItems int + +func (r *replaceValuesItems) replace(newNode, container SQLNode) { + container.(Values)[int(*r)] = newNode.(ValTuple) +} + +func (r *replaceValuesItems) inc() { + *r++ +} + +func replaceValuesFuncExprName(newNode, parent SQLNode) { + parent.(*ValuesFuncExpr).Name = newNode.(*ColName) +} + +func replaceVindexParamKey(newNode, parent SQLNode) { + tmp := parent.(VindexParam) + tmp.Key = newNode.(ColIdent) +} + +func replaceVindexSpecName(newNode, parent SQLNode) { + parent.(*VindexSpec).Name = newNode.(ColIdent) +} + +type replaceVindexSpecParams int + +func (r *replaceVindexSpecParams) replace(newNode, container SQLNode) { + container.(*VindexSpec).Params[int(*r)] = newNode.(VindexParam) +} + +func (r *replaceVindexSpecParams) inc() { + *r++ +} + +func replaceVindexSpecType(newNode, parent SQLNode) { + parent.(*VindexSpec).Type = newNode.(ColIdent) +} + +func replaceWhenCond(newNode, parent SQLNode) { + parent.(*When).Cond = newNode.(Expr) +} + +func replaceWhenVal(newNode, parent SQLNode) { + parent.(*When).Val = newNode.(Expr) +} + +func replaceWhereExpr(newNode, parent SQLNode) { + parent.(*Where).Expr = newNode.(Expr) +} + +func replaceXorExprLeft(newNode, parent SQLNode) { + parent.(*XorExpr).Left = newNode.(Expr) +} + +func replaceXorExprRight(newNode, parent SQLNode) { + parent.(*XorExpr).Right = newNode.(Expr) +} + +// apply is where the visiting happens. Here is where we keep the big switch-case that will be used +// to do the actual visiting of SQLNodes +func (a *application) apply(parent, node SQLNode, replacer replacerFunc) { + if node == nil || isNilValue(node) { + return + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.replacer = replacer + a.cursor.node = node + a.cursor.parent = parent + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases is alphabetical) + switch n := node.(type) { + case nil: + case *AccessMode: + + case *AliasedExpr: + a.apply(node, n.As, replaceAliasedExprAs) + a.apply(node, n.Expr, replaceAliasedExprExpr) + + case *AliasedTableExpr: + a.apply(node, n.As, replaceAliasedTableExprAs) + a.apply(node, n.Expr, replaceAliasedTableExprExpr) + a.apply(node, n.Hints, replaceAliasedTableExprHints) + a.apply(node, n.Partitions, replaceAliasedTableExprPartitions) + + case *AndExpr: + a.apply(node, n.Left, replaceAndExprLeft) + a.apply(node, n.Right, replaceAndExprRight) + + case *Auth: + a.apply(node, n.SessionAuth, replaceAuthSessionAuth) + + case *AuthRevoke: + a.apply(node, n.SessionAuth, replaceAuthRevokeSessionAuth) + + case *AutoIncSpec: + a.apply(node, n.Column, replaceAutoIncSpecColumn) + a.apply(node, n.Sequence, replaceAutoIncSpecSequence) + + case *Begin: + + case *BinaryExpr: + a.apply(node, n.Left, replaceBinaryExprLeft) + a.apply(node, n.Right, replaceBinaryExprRight) + + case BoolVal: + + case *CaseExpr: + a.apply(node, n.Else, replaceCaseExprElse) + a.apply(node, n.Expr, replaceCaseExprExpr) + replacerWhens := replaceCaseExprWhens(0) + replacerWhensB := &replacerWhens + for _, item := range n.Whens { + a.apply(node, item, replacerWhensB.replace) + replacerWhensB.inc() + } + + case ColIdent: + + case *ColName: + a.apply(node, n.Name, replaceColNameName) + a.apply(node, n.Qualifier, replaceColNameQualifier) + + case *CollateExpr: + a.apply(node, n.Expr, replaceCollateExprExpr) + + case *ColumnDefinition: + a.apply(node, n.Name, replaceColumnDefinitionName) + + case *ColumnType: + a.apply(node, n.Autoincrement, replaceColumnTypeAutoincrement) + a.apply(node, n.Comment, replaceColumnTypeComment) + a.apply(node, n.Default, replaceColumnTypeDefault) + a.apply(node, n.Length, replaceColumnTypeLength) + a.apply(node, n.NotNull, replaceColumnTypeNotNull) + a.apply(node, n.OnUpdate, replaceColumnTypeOnUpdate) + a.apply(node, n.Scale, replaceColumnTypeScale) + a.apply(node, n.Unsigned, replaceColumnTypeUnsigned) + a.apply(node, n.Zerofill, replaceColumnTypeZerofill) + + case Columns: + replacer := replaceColumnsItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case Comments: + + case *Commit: + + case *ComparisonExpr: + a.apply(node, n.Escape, replaceComparisonExprEscape) + a.apply(node, n.Left, replaceComparisonExprLeft) + a.apply(node, n.Right, replaceComparisonExprRight) + + case *ConstraintDefinition: + a.apply(node, n.Details, replaceConstraintDefinitionDetails) + + case *ConvertExpr: + a.apply(node, n.Expr, replaceConvertExprExpr) + a.apply(node, n.Type, replaceConvertExprType) + + case *ConvertType: + a.apply(node, n.Length, replaceConvertTypeLength) + a.apply(node, n.Scale, replaceConvertTypeScale) + + case *ConvertUsingExpr: + a.apply(node, n.Expr, replaceConvertUsingExprExpr) + + case *CurTimeFuncExpr: + a.apply(node, n.Fsp, replaceCurTimeFuncExprFsp) + a.apply(node, n.Name, replaceCurTimeFuncExprName) + + case *DBDDL: + + case *DDL: + a.apply(node, n.AutoIncSpec, replaceDDLAutoIncSpec) + a.apply(node, n.FromTables, replaceDDLFromTables) + a.apply(node, n.OptLike, replaceDDLOptLike) + a.apply(node, n.PartitionSpec, replaceDDLPartitionSpec) + a.apply(node, n.SelectStatement, replaceDDLSelectStatement) + a.apply(node, n.Table, replaceDDLTable) + a.apply(node, n.TableSpec, replaceDDLTableSpec) + a.apply(node, n.ToTables, replaceDDLToTables) + replacerVindexCols := replaceDDLVindexCols(0) + replacerVindexColsB := &replacerVindexCols + for _, item := range n.VindexCols { + a.apply(node, item, replacerVindexColsB.replace) + replacerVindexColsB.inc() + } + a.apply(node, n.VindexSpec, replaceDDLVindexSpec) + + case *Default: + + case *Delete: + a.apply(node, n.Comments, replaceDeleteComments) + a.apply(node, n.Limit, replaceDeleteLimit) + a.apply(node, n.OrderBy, replaceDeleteOrderBy) + a.apply(node, n.Partitions, replaceDeletePartitions) + a.apply(node, n.SelectExprs, replaceDeleteSelectExprs) + a.apply(node, n.TableExprs, replaceDeleteTableExprs) + a.apply(node, n.Targets, replaceDeleteTargets) + a.apply(node, n.Where, replaceDeleteWhere) + + case *DescribeTable: + a.apply(node, n.Table, replaceDescribeTableTable) + + case *Exec: + a.apply(node, n.Comments, replaceExecComments) + a.apply(node, n.MethodName, replaceExecMethodName) + + case *ExecSubquery: + a.apply(node, n.Exec, replaceExecSubqueryExec) + + case *ExistsExpr: + a.apply(node, n.Subquery, replaceExistsExprSubquery) + + case *Explain: + a.apply(node, n.Statement, replaceExplainStatement) + + case Exprs: + replacer := replaceExprsItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case *ForeignKeyDefinition: + a.apply(node, n.OnDelete, replaceForeignKeyDefinitionOnDelete) + a.apply(node, n.OnUpdate, replaceForeignKeyDefinitionOnUpdate) + a.apply(node, n.ReferencedColumns, replaceForeignKeyDefinitionReferencedColumns) + a.apply(node, n.ReferencedTable, replaceForeignKeyDefinitionReferencedTable) + a.apply(node, n.Source, replaceForeignKeyDefinitionSource) + + case *FuncExpr: + a.apply(node, n.Exprs, replaceFuncExprExprs) + a.apply(node, n.Name, replaceFuncExprName) + a.apply(node, n.Qualifier, replaceFuncExprQualifier) + + case GroupBy: + replacer := replaceGroupByItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case *GroupConcatExpr: + a.apply(node, n.Exprs, replaceGroupConcatExprExprs) + a.apply(node, n.Limit, replaceGroupConcatExprLimit) + a.apply(node, n.OrderBy, replaceGroupConcatExprOrderBy) + + case *IndexDefinition: + a.apply(node, n.Info, replaceIndexDefinitionInfo) + + case *IndexHints: + replacerIndexes := replaceIndexHintsIndexes(0) + replacerIndexesB := &replacerIndexes + for _, item := range n.Indexes { + a.apply(node, item, replacerIndexesB.replace) + replacerIndexesB.inc() + } + + case *IndexInfo: + a.apply(node, n.Name, replaceIndexInfoName) + + case *Insert: + a.apply(node, n.Columns, replaceInsertColumns) + a.apply(node, n.Comments, replaceInsertComments) + a.apply(node, n.OnDup, replaceInsertOnDup) + a.apply(node, n.Partitions, replaceInsertPartitions) + a.apply(node, n.Rows, replaceInsertRows) + a.apply(node, n.SelectExprs, replaceInsertSelectExprs) + a.apply(node, n.Table, replaceInsertTable) + + case *IntervalExpr: + a.apply(node, n.Expr, replaceIntervalExprExpr) + + case *IsExpr: + a.apply(node, n.Expr, replaceIsExprExpr) + + case *IsolationLevel: + + case JoinCondition: + a.apply(node, n.On, replaceJoinConditionOn) + a.apply(node, n.Using, replaceJoinConditionUsing) + + case *JoinTableExpr: + a.apply(node, n.Condition, replaceJoinTableExprCondition) + a.apply(node, n.LeftExpr, replaceJoinTableExprLeftExpr) + a.apply(node, n.RightExpr, replaceJoinTableExprRightExpr) + + case *Limit: + a.apply(node, n.Offset, replaceLimitOffset) + a.apply(node, n.Rowcount, replaceLimitRowcount) + + case ListArg: + + case *MatchExpr: + a.apply(node, n.Columns, replaceMatchExprColumns) + a.apply(node, n.Expr, replaceMatchExprExpr) + + case *NativeQuery: + a.apply(node, n.Comments, replaceNativeQueryComments) + + case Nextval: + a.apply(node, n.Expr, replaceNextvalExpr) + + case *NotExpr: + a.apply(node, n.Expr, replaceNotExprExpr) + + case *NullVal: + + case OnDup: + replacer := replaceOnDupItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case *OptLike: + a.apply(node, n.LikeTable, replaceOptLikeLikeTable) + + case *OrExpr: + a.apply(node, n.Left, replaceOrExprLeft) + a.apply(node, n.Right, replaceOrExprRight) + + case *Order: + a.apply(node, n.Expr, replaceOrderExpr) + + case OrderBy: + replacer := replaceOrderByItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case *OtherAdmin: + + case *OtherRead: + + case *ParenSelect: + a.apply(node, n.Select, replaceParenSelectSelect) + + case *ParenTableExpr: + a.apply(node, n.Exprs, replaceParenTableExprExprs) + + case *PartitionDefinition: + a.apply(node, n.Limit, replacePartitionDefinitionLimit) + a.apply(node, n.Name, replacePartitionDefinitionName) + + case *PartitionSpec: + replacerDefinitions := replacePartitionSpecDefinitions(0) + replacerDefinitionsB := &replacerDefinitions + for _, item := range n.Definitions { + a.apply(node, item, replacerDefinitionsB.replace) + replacerDefinitionsB.inc() + } + a.apply(node, n.Name, replacePartitionSpecName) + + case Partitions: + replacer := replacePartitionsItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case *Purge: + a.apply(node, n.Comments, replacePurgeComments) + a.apply(node, n.Target, replacePurgeTarget) + + case *RangeCond: + a.apply(node, n.From, replaceRangeCondFrom) + a.apply(node, n.Left, replaceRangeCondLeft) + a.apply(node, n.To, replaceRangeCondTo) + + case ReferenceAction: + + case *RefreshMaterializedView: + a.apply(node, n.ImplicitSelect, replaceRefreshMaterializedViewImplicitSelect) + a.apply(node, n.ViewName, replaceRefreshMaterializedViewViewName) + + case *Registry: + + case *Release: + a.apply(node, n.Name, replaceReleaseName) + + case *Rollback: + + case *SQLVal: + + case *SRollback: + a.apply(node, n.Name, replaceSRollbackName) + + case *Savepoint: + a.apply(node, n.Name, replaceSavepointName) + + case *Select: + a.apply(node, n.Comments, replaceSelectComments) + a.apply(node, n.From, replaceSelectFrom) + a.apply(node, n.GroupBy, replaceSelectGroupBy) + a.apply(node, n.Having, replaceSelectHaving) + a.apply(node, n.Limit, replaceSelectLimit) + a.apply(node, n.OrderBy, replaceSelectOrderBy) + a.apply(node, n.SelectExprs, replaceSelectSelectExprs) + a.apply(node, n.Where, replaceSelectWhere) + + case SelectExprs: + replacer := replaceSelectExprsItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case *Set: + a.apply(node, n.Comments, replaceSetComments) + a.apply(node, n.Exprs, replaceSetExprs) + + case *SetExpr: + a.apply(node, n.Expr, replaceSetExprExpr) + a.apply(node, n.Name, replaceSetExprName) + + case SetExprs: + replacer := replaceSetExprsItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case *SetTransaction: + replacerCharacteristics := replaceSetTransactionCharacteristics(0) + replacerCharacteristicsB := &replacerCharacteristics + for _, item := range n.Characteristics { + a.apply(node, item, replacerCharacteristicsB.replace) + replacerCharacteristicsB.inc() + } + a.apply(node, n.Comments, replaceSetTransactionComments) + + case *Show: + a.apply(node, n.Columns, replaceShowColumns) + a.apply(node, n.Comments, replaceShowComments) + a.apply(node, n.OnTable, replaceShowOnTable) + a.apply(node, n.ShowCollationFilterOpt, replaceShowShowCollationFilterOpt) + a.apply(node, n.Table, replaceShowTable) + + case *ShowFilter: + a.apply(node, n.Filter, replaceShowFilterFilter) + + case *Sleep: + a.apply(node, n.Duration, replaceSleepDuration) + + case *StarExpr: + a.apply(node, n.TableName, replaceStarExprTableName) + + case *Stream: + a.apply(node, n.Comments, replaceStreamComments) + a.apply(node, n.SelectExpr, replaceStreamSelectExpr) + a.apply(node, n.Table, replaceStreamTable) + + case *Subquery: + a.apply(node, n.Select, replaceSubquerySelect) + + case *SubstrExpr: + a.apply(node, n.From, replaceSubstrExprFrom) + a.apply(node, n.Name, replaceSubstrExprName) + a.apply(node, n.StrVal, replaceSubstrExprStrVal) + a.apply(node, n.To, replaceSubstrExprTo) + + case TableExprs: + replacer := replaceTableExprsItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case TableIdent: + + case TableName: + a.apply(node, n.Name, replaceTableNameName) + a.apply(node, n.Qualifier, replaceTableNameQualifier) + a.apply(node, n.QualifierSecond, replaceTableNameQualifierSecond) + a.apply(node, n.QualifierThird, replaceTableNameQualifierThird) + + case TableNames: + replacer := replaceTableNamesItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case *TableSpec: + replacerColumns := replaceTableSpecColumns(0) + replacerColumnsB := &replacerColumns + for _, item := range n.Columns { + a.apply(node, item, replacerColumnsB.replace) + replacerColumnsB.inc() + } + replacerConstraints := replaceTableSpecConstraints(0) + replacerConstraintsB := &replacerConstraints + for _, item := range n.Constraints { + a.apply(node, item, replacerConstraintsB.replace) + replacerConstraintsB.inc() + } + replacerIndexes := replaceTableSpecIndexes(0) + replacerIndexesB := &replacerIndexes + for _, item := range n.Indexes { + a.apply(node, item, replacerIndexesB.replace) + replacerIndexesB.inc() + } + + case *TableValuedFuncTableExpr: + a.apply(node, n.As, replaceTableValuedFuncTableExprAs) + a.apply(node, n.FuncExpr, replaceTableValuedFuncTableExprFuncExpr) + + case *TimestampFuncExpr: + a.apply(node, n.Expr1, replaceTimestampFuncExprExpr1) + a.apply(node, n.Expr2, replaceTimestampFuncExprExpr2) + + case *UnaryCastConcatamerExpr: + a.apply(node, n.Expr, replaceUnaryCastConcatamerExprExpr) + + case *UnaryExpr: + a.apply(node, n.Expr, replaceUnaryExprExpr) + + case *Union: + a.apply(node, n.FirstStatement, replaceUnionFirstStatement) + a.apply(node, n.Limit, replaceUnionLimit) + a.apply(node, n.OrderBy, replaceUnionOrderBy) + replacerUnionSelects := replaceUnionUnionSelects(0) + replacerUnionSelectsB := &replacerUnionSelects + for _, item := range n.UnionSelects { + a.apply(node, item, replacerUnionSelectsB.replace) + replacerUnionSelectsB.inc() + } + + case *UnionSelect: + a.apply(node, n.Statement, replaceUnionSelectStatement) + + case *Update: + a.apply(node, n.Comments, replaceUpdateComments) + a.apply(node, n.Exprs, replaceUpdateExprs) + a.apply(node, n.From, replaceUpdateFrom) + a.apply(node, n.Limit, replaceUpdateLimit) + a.apply(node, n.OrderBy, replaceUpdateOrderBy) + a.apply(node, n.SelectExprs, replaceUpdateSelectExprs) + a.apply(node, n.TableExprs, replaceUpdateTableExprs) + a.apply(node, n.Where, replaceUpdateWhere) + + case *UpdateExpr: + a.apply(node, n.Expr, replaceUpdateExprExpr) + a.apply(node, n.Name, replaceUpdateExprName) + + case UpdateExprs: + replacer := replaceUpdateExprsItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case *Use: + a.apply(node, n.DBName, replaceUseDBName) + + case ValTuple: + replacer := replaceValTupleItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case Values: + replacer := replaceValuesItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + } + + case *ValuesFuncExpr: + a.apply(node, n.Name, replaceValuesFuncExprName) + + case VindexParam: + a.apply(node, n.Key, replaceVindexParamKey) + + case *VindexSpec: + a.apply(node, n.Name, replaceVindexSpecName) + replacerParams := replaceVindexSpecParams(0) + replacerParamsB := &replacerParams + for _, item := range n.Params { + a.apply(node, item, replacerParamsB.replace) + replacerParamsB.inc() + } + a.apply(node, n.Type, replaceVindexSpecType) + + case *When: + a.apply(node, n.Cond, replaceWhenCond) + a.apply(node, n.Val, replaceWhenVal) + + case *Where: + a.apply(node, n.Expr, replaceWhereExpr) + + case *XorExpr: + a.apply(node, n.Left, replaceXorExprLeft) + a.apply(node, n.Right, replaceXorExprRight) + + default: + panic("unknown ast type " + reflect.TypeOf(node).String()) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +func isNilValue(i interface{}) bool { + valueOf := reflect.ValueOf(i) + kind := valueOf.Kind() + isNullable := kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice + return isNullable && valueOf.IsNil() +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/rewriter_api.go b/internal/stackql-parser-fork/go/vt/sqlparser/rewriter_api.go new file mode 100644 index 00000000..c5732d1a --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/rewriter_api.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +// The rewriter was heavily inspired by https://github.com/golang/tools/blob/master/go/ast/astutil/rewrite.go + +// Rewrite traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Rewrite returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., fields of basic types (strings, []byte, etc.) are ignored. +// +func Rewrite(node SQLNode, pre, post ApplyFunc) (result SQLNode) { + parent := &struct{ SQLNode }{node} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.SQLNode + }() + + a := &application{ + pre: pre, + post: post, + cursor: Cursor{}, + } + + // this is the root-replacer, used when the user replaces the root of the ast + replacer := func(newNode SQLNode, _ SQLNode) { + parent.SQLNode = newNode + } + + a.apply(parent, node, replacer) + + return parent.SQLNode +} + +// An ApplyFunc is invoked by Rewrite for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Rewrite for details. +type ApplyFunc func(*Cursor) bool + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node and Parent methods. +type Cursor struct { + parent SQLNode + replacer replacerFunc + node SQLNode +} + +// Node returns the current Node. +func (c *Cursor) Node() SQLNode { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() SQLNode { return c.parent } + +// Replace replaces the current node in the parent field with this new object. The use needs to make sure to not +// replace the object with something of the wrong type, or the visitor will panic. +func (c *Cursor) Replace(newNode SQLNode) { + c.replacer(newNode, c.parent) +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/set_normalizer.go b/internal/stackql-parser-fork/go/vt/sqlparser/set_normalizer.go new file mode 100644 index 00000000..cd48d20d --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/set_normalizer.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strings" + + "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" + "github.com/stackql/stackql-parser/go/vt/vterrors" +) + +type setNormalizer struct { + err error +} + +func (n *setNormalizer) rewriteSetComingUp(cursor *Cursor) bool { + set, ok := cursor.node.(*Set) + if ok { + for i, expr := range set.Exprs { + exp, err := n.normalizeSetExpr(expr) + if err != nil { + n.err = err + return false + } + set.Exprs[i] = exp + } + } + return true +} + +func (n *setNormalizer) normalizeSetExpr(in *SetExpr) (*SetExpr, error) { + switch in.Name.at { // using switch so we can use break + case DoubleAt: + if in.Scope != "" { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cannot use scope and @@") + } + switch { + case strings.HasPrefix(in.Name.Lowered(), "session."): + in.Name = NewColIdent(in.Name.Lowered()[8:]) + in.Scope = SessionStr + case strings.HasPrefix(in.Name.Lowered(), "global."): + in.Name = NewColIdent(in.Name.Lowered()[7:]) + in.Scope = GlobalStr + case strings.HasPrefix(in.Name.Lowered(), "vitess_metadata."): + in.Name = NewColIdent(in.Name.Lowered()[16:]) + in.Scope = VitessMetadataStr + default: + in.Name.at = NoAt + in.Scope = SessionStr + } + return in, nil + case SingleAt: + if in.Scope != "" { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cannot mix scope and user defined variables") + } + return in, nil + case NoAt: + switch in.Scope { + case "": + in.Scope = SessionStr + case "local": + in.Scope = SessionStr + } + return in, nil + } + panic("this should never happen") +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/set_normalizer_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/set_normalizer_test.go new file mode 100644 index 00000000..394ff4f6 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/set_normalizer_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNormalizeSetExpr(t *testing.T) { + tests := []struct { + in, expected, err string + }{{ + in: "@@foo = 42", + expected: "session foo = 42", + }, { + in: "@@session.foo = 42", + expected: "session foo = 42", + }, { + in: "session foo = 42", + }, { + in: "global foo = 42", + }, { + in: "@@global.foo = 42", + expected: "global foo = 42", + }, { + in: "global @foo = 42", + err: "cannot mix scope and user defined variables", + }, { + in: "global @@foo = 42", + err: "cannot use scope and @@", + }, { + in: "session @@foo = 42", + err: "cannot use scope and @@", + }, { + in: "foo = 42", + expected: "session foo = 42", + }, { + in: "@@vitess_metadata.foo = 42", + expected: "vitess_metadata foo = 42", + }, { + in: "@@x.foo = 42", + expected: "session `x.foo` = 42", + }, { + in: "@@session.x.foo = 42", + expected: "session `x.foo` = 42", + //}, { TODO: we should support local scope as well + // in: "local foo = 42", + // expected: "session foo = 42", + }} + for _, tt := range tests { + t.Run(tt.in, func(t *testing.T) { + if tt.expected == "" { + tt.expected = tt.in + } + + statement, err := Parse("set " + tt.in) + require.NoError(t, err) + rewriter := setNormalizer{} + out, err := rewriter.normalizeSetExpr(statement.(*Set).Exprs[0]) + if tt.err != "" { + require.EqualError(t, err, tt.err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expected, String(out)) + } + }) + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/sql.go b/internal/stackql-parser-fork/go/vt/sqlparser/sql.go new file mode 100644 index 00000000..bb9b7a88 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/sql.go @@ -0,0 +1,9094 @@ +// Code generated by goyacc -o /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.go /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y. DO NOT EDIT. + +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:18 +package sqlparser + +import __yyfmt__ "fmt" + +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:18 + +import "strings" + +func setParseTree(yylex interface{}, stmt Statement) { + yylex.(*Tokenizer).ParseTree = stmt +} + +func setAllowComments(yylex interface{}, allow bool) { + yylex.(*Tokenizer).AllowComments = allow +} + +func setDDL(yylex interface{}, ddl *DDL) { + yylex.(*Tokenizer).partialDDL = ddl +} + +func incNesting(yylex interface{}) bool { + yylex.(*Tokenizer).nesting++ + if yylex.(*Tokenizer).nesting == 200 { + return true + } + return false +} + +func decNesting(yylex interface{}) { + yylex.(*Tokenizer).nesting-- +} + +// skipToEnd forces the lexer to end prematurely. Not all SQL statements +// are supported by the Parser, thus calling skipToEnd will make the lexer +// return EOF early. +func skipToEnd(yylex interface{}) { + yylex.(*Tokenizer).SkipToEnd = true +} + +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:55 +type yySymType struct { + yys int + empty struct{} + statement Statement + selStmt SelectStatement + ddl *DDL + ins *Insert + byt byte + bytes []byte + bytes2 [][]byte + str string + strs []string + selectExprs SelectExprs + selectExpr SelectExpr + columns Columns + partitions Partitions + colName *ColName + tableExprs TableExprs + tableExpr TableExpr + joinCondition JoinCondition + tableName TableName + tableNames TableNames + indexHints *IndexHints + expr Expr + exprs Exprs + boolVal BoolVal + sqlVal *SQLVal + colTuple ColTuple + values Values + valTuple ValTuple + subquery *Subquery + execsubquery *ExecSubquery + whens []*When + when *When + orderBy OrderBy + order *Order + limit *Limit + updateExprs UpdateExprs + setExprs SetExprs + updateExpr *UpdateExpr + setExpr *SetExpr + characteristic Characteristic + characteristics []Characteristic + colIdent ColIdent + tableIdent TableIdent + convertType *ConvertType + aliasedTableName *AliasedTableExpr + TableSpec *TableSpec + columnType ColumnType + colKeyOpt ColumnKeyOption + optVal Expr + LengthScaleOption LengthScaleOption + columnDefinition *ColumnDefinition + indexDefinition *IndexDefinition + indexInfo *IndexInfo + indexOption *IndexOption + indexOptions []*IndexOption + indexColumn *IndexColumn + indexColumns []*IndexColumn + constraintDefinition *ConstraintDefinition + constraintInfo ConstraintInfo + ReferenceAction ReferenceAction + partDefs []*PartitionDefinition + partDef *PartitionDefinition + partSpec *PartitionSpec + vindexParam VindexParam + vindexParams []VindexParam + showFilter *ShowFilter + optLike *OptLike + execVarDef ExecVarDef + execVarDefOpt *ExecVarDef + execVarDefs []ExecVarDef + listArgsConcat []ListArg + overClause *OverClause + windowSpec *WindowSpec + frameClause *FrameClause + framePoint *FramePoint + withClause *With + cteList []*CommonTableExpr + cte *CommonTableExpr +} + +const LEX_ERROR = 57346 +const UNION = 57347 +const SELECT = 57348 +const STREAM = 57349 +const INSERT = 57350 +const UPDATE = 57351 +const DELETE = 57352 +const FROM = 57353 +const WHERE = 57354 +const GROUP = 57355 +const HAVING = 57356 +const ORDER = 57357 +const BY = 57358 +const LIMIT = 57359 +const OFFSET = 57360 +const FOR = 57361 +const ALL = 57362 +const DISTINCT = 57363 +const AS = 57364 +const EXISTS = 57365 +const ASC = 57366 +const DESC = 57367 +const INTO = 57368 +const DUPLICATE = 57369 +const KEY = 57370 +const DEFAULT = 57371 +const SET = 57372 +const LOCK = 57373 +const UNLOCK = 57374 +const KEYS = 57375 +const DO = 57376 +const DISTINCTROW = 57377 +const VALUES = 57378 +const LAST_INSERT_ID = 57379 +const NEXT = 57380 +const VALUE = 57381 +const SHARE = 57382 +const MODE = 57383 +const SQL_NO_CACHE = 57384 +const SQL_CACHE = 57385 +const SQL_CALC_FOUND_ROWS = 57386 +const JOIN = 57387 +const STRAIGHT_JOIN = 57388 +const LEFT = 57389 +const RIGHT = 57390 +const INNER = 57391 +const OUTER = 57392 +const CROSS = 57393 +const NATURAL = 57394 +const USE = 57395 +const FORCE = 57396 +const ON = 57397 +const USING = 57398 +const ID = 57399 +const AT_ID = 57400 +const AT_AT_ID = 57401 +const HEX = 57402 +const STRING = 57403 +const INTEGRAL = 57404 +const FLOAT = 57405 +const HEXNUM = 57406 +const VALUE_ARG = 57407 +const LIST_ARG = 57408 +const COMMENT = 57409 +const COMMENT_KEYWORD = 57410 +const BIT_LITERAL = 57411 +const NULL = 57412 +const TRUE = 57413 +const FALSE = 57414 +const OFF = 57415 +const OR = 57416 +const XOR = 57417 +const AND = 57418 +const NOT = 57419 +const BETWEEN = 57420 +const CASE = 57421 +const WHEN = 57422 +const THEN = 57423 +const ELSE = 57424 +const END = 57425 +const LE = 57426 +const GE = 57427 +const NE = 57428 +const NULL_SAFE_EQUAL = 57429 +const IS = 57430 +const LIKE = 57431 +const REGEXP = 57432 +const IN = 57433 +const SHIFT_LEFT = 57434 +const SHIFT_RIGHT = 57435 +const DIV = 57436 +const MOD = 57437 +const UNARY = 57438 +const COLLATE = 57439 +const BINARY = 57440 +const UNDERSCORE_BINARY = 57441 +const UNDERSCORE_UTF8MB4 = 57442 +const UNDERSCORE_UTF8 = 57443 +const UNDERSCORE_LATIN1 = 57444 +const INTERVAL = 57445 +const JSON_EXTRACT_OP = 57446 +const JSON_UNQUOTE_EXTRACT_OP = 57447 +const CREATE = 57448 +const ALTER = 57449 +const DROP = 57450 +const RENAME = 57451 +const ANALYZE = 57452 +const ADD = 57453 +const FLUSH = 57454 +const SCHEMA = 57455 +const TABLE = 57456 +const INDEX = 57457 +const VIEW = 57458 +const TO = 57459 +const IGNORE = 57460 +const IF = 57461 +const UNIQUE = 57462 +const PRIMARY = 57463 +const COLUMN = 57464 +const SPATIAL = 57465 +const FULLTEXT = 57466 +const KEY_BLOCK_SIZE = 57467 +const CHECK = 57468 +const INDEXES = 57469 +const ACTION = 57470 +const CASCADE = 57471 +const CONSTRAINT = 57472 +const FOREIGN = 57473 +const NO = 57474 +const REFERENCES = 57475 +const RESTRICT = 57476 +const SHOW = 57477 +const DESCRIBE = 57478 +const EXPLAIN = 57479 +const DATE = 57480 +const ESCAPE = 57481 +const REPAIR = 57482 +const OPTIMIZE = 57483 +const TRUNCATE = 57484 +const MAXVALUE = 57485 +const PARTITION = 57486 +const REORGANIZE = 57487 +const LESS = 57488 +const THAN = 57489 +const PROCEDURE = 57490 +const TRIGGER = 57491 +const VINDEX = 57492 +const VINDEXES = 57493 +const STATUS = 57494 +const VARIABLES = 57495 +const WARNINGS = 57496 +const SEQUENCE = 57497 +const BEGIN = 57498 +const START = 57499 +const TRANSACTION = 57500 +const COMMIT = 57501 +const ROLLBACK = 57502 +const SAVEPOINT = 57503 +const RELEASE = 57504 +const WORK = 57505 +const BIT = 57506 +const TINYINT = 57507 +const SMALLINT = 57508 +const MEDIUMINT = 57509 +const INT = 57510 +const INTEGER = 57511 +const BIGINT = 57512 +const INTNUM = 57513 +const REAL = 57514 +const DOUBLE = 57515 +const FLOAT_TYPE = 57516 +const DECIMAL = 57517 +const NUMERIC = 57518 +const TIME = 57519 +const TIMESTAMP = 57520 +const DATETIME = 57521 +const YEAR = 57522 +const CHAR = 57523 +const VARCHAR = 57524 +const BOOL = 57525 +const CHARACTER = 57526 +const VARBINARY = 57527 +const NCHAR = 57528 +const TEXT = 57529 +const TINYTEXT = 57530 +const MEDIUMTEXT = 57531 +const LONGTEXT = 57532 +const BLOB = 57533 +const TINYBLOB = 57534 +const MEDIUMBLOB = 57535 +const LONGBLOB = 57536 +const JSON = 57537 +const ENUM = 57538 +const GEOMETRY = 57539 +const POINT = 57540 +const LINESTRING = 57541 +const POLYGON = 57542 +const GEOMETRYCOLLECTION = 57543 +const MULTIPOINT = 57544 +const MULTILINESTRING = 57545 +const MULTIPOLYGON = 57546 +const NULLX = 57547 +const AUTO_INCREMENT = 57548 +const APPROXNUM = 57549 +const SIGNED = 57550 +const UNSIGNED = 57551 +const ZEROFILL = 57552 +const COLLATION = 57553 +const DATABASES = 57554 +const TABLES = 57555 +const VITESS_METADATA = 57556 +const VSCHEMA = 57557 +const FULL = 57558 +const PROCESSLIST = 57559 +const COLUMNS = 57560 +const FIELDS = 57561 +const ENGINES = 57562 +const PLUGINS = 57563 +const EXTENDED = 57564 +const NAMES = 57565 +const CHARSET = 57566 +const GLOBAL = 57567 +const SESSION = 57568 +const ISOLATION = 57569 +const LEVEL = 57570 +const READ = 57571 +const WRITE = 57572 +const ONLY = 57573 +const REPEATABLE = 57574 +const COMMITTED = 57575 +const UNCOMMITTED = 57576 +const SERIALIZABLE = 57577 +const CURRENT_TIMESTAMP = 57578 +const DATABASE = 57579 +const CURRENT_DATE = 57580 +const CURRENT_TIME = 57581 +const LOCALTIME = 57582 +const LOCALTIMESTAMP = 57583 +const UTC_DATE = 57584 +const UTC_TIME = 57585 +const UTC_TIMESTAMP = 57586 +const REPLACE = 57587 +const CONVERT = 57588 +const CAST = 57589 +const SUBSTR = 57590 +const SUBSTRING = 57591 +const GROUP_CONCAT = 57592 +const SEPARATOR = 57593 +const TIMESTAMPADD = 57594 +const TIMESTAMPDIFF = 57595 +const MATCH = 57596 +const AGAINST = 57597 +const BOOLEAN = 57598 +const LANGUAGE = 57599 +const WITH = 57600 +const QUERY = 57601 +const EXPANSION = 57602 +const OVER = 57603 +const ROWS = 57604 +const RANGE = 57605 +const UNBOUNDED = 57606 +const PRECEDING = 57607 +const FOLLOWING = 57608 +const CURRENT = 57609 +const ROW = 57610 +const RECURSIVE = 57611 +const UNUSED = 57612 +const ARRAY = 57613 +const CUME_DIST = 57614 +const DESCRIPTION = 57615 +const DENSE_RANK = 57616 +const EMPTY = 57617 +const EXCEPT = 57618 +const FIRST_VALUE = 57619 +const GROUPING = 57620 +const GROUPS = 57621 +const JSON_TABLE = 57622 +const LAG = 57623 +const LAST_VALUE = 57624 +const LATERAL = 57625 +const LEAD = 57626 +const MEMBER = 57627 +const NTH_VALUE = 57628 +const NTILE = 57629 +const OF = 57630 +const PERCENT_RANK = 57631 +const RANK = 57632 +const ROW_NUMBER = 57633 +const SYSTEM = 57634 +const WINDOW = 57635 +const ACTIVE = 57636 +const ADMIN = 57637 +const BUCKETS = 57638 +const CLONE = 57639 +const COMPONENT = 57640 +const DEFINITION = 57641 +const ENFORCED = 57642 +const EXCLUDE = 57643 +const GEOMCOLLECTION = 57644 +const GET_MASTER_PUBLIC_KEY = 57645 +const HISTOGRAM = 57646 +const HISTORY = 57647 +const INACTIVE = 57648 +const INVISIBLE = 57649 +const LOCKED = 57650 +const MASTER_COMPRESSION_ALGORITHMS = 57651 +const MASTER_PUBLIC_KEY_PATH = 57652 +const MASTER_TLS_CIPHERSUITES = 57653 +const MASTER_ZSTD_COMPRESSION_LEVEL = 57654 +const NESTED = 57655 +const NETWORK_NAMESPACE = 57656 +const NOWAIT = 57657 +const NULLS = 57658 +const OJ = 57659 +const OLD = 57660 +const OPTIONAL = 57661 +const ORDINALITY = 57662 +const ORGANIZATION = 57663 +const OTHERS = 57664 +const PATH = 57665 +const PERSIST = 57666 +const PERSIST_ONLY = 57667 +const PRIVILEGE_CHECKS_USER = 57668 +const PROCESS = 57669 +const RANDOM = 57670 +const REFERENCE = 57671 +const REQUIRE_ROW_FORMAT = 57672 +const RESOURCE = 57673 +const RESPECT = 57674 +const RESTART = 57675 +const RETAIN = 57676 +const REUSE = 57677 +const ROLE = 57678 +const SECONDARY = 57679 +const SECONDARY_ENGINE = 57680 +const SECONDARY_LOAD = 57681 +const SECONDARY_UNLOAD = 57682 +const SKIP = 57683 +const SRID = 57684 +const THREAD_PRIORITY = 57685 +const TIES = 57686 +const VCPU = 57687 +const VISIBLE = 57688 +const FORMAT = 57689 +const TREE = 57690 +const VITESS = 57691 +const TRADITIONAL = 57692 +const AUTH = 57693 +const INTERACTIVE = 57694 +const LOGIN = 57695 +const REVOKE = 57696 +const SA = 57697 +const SERVICEACCOUNT = 57698 +const SLEEP = 57699 +const MATERIALIZED = 57700 +const TEMP = 57701 +const TEMPORARY = 57702 +const REFRESH = 57703 +const JSON_ARRAY_ELEMENTS_TEXT = 57704 +const JSON_EACH = 57705 +const UNNEST = 57706 +const REGISTRY = 57707 +const PULL = 57708 +const LIST = 57709 +const EXEC = 57710 +const PURGE = 57711 +const NATIVEQUERY = 57712 +const STACKQL = 57713 +const RETURNING = 57714 + +var yyToknames = [...]string{ + "$end", + "error", + "$unk", + "LEX_ERROR", + "UNION", + "SELECT", + "STREAM", + "INSERT", + "UPDATE", + "DELETE", + "FROM", + "WHERE", + "GROUP", + "HAVING", + "ORDER", + "BY", + "LIMIT", + "OFFSET", + "FOR", + "ALL", + "DISTINCT", + "AS", + "EXISTS", + "ASC", + "DESC", + "INTO", + "DUPLICATE", + "KEY", + "DEFAULT", + "SET", + "LOCK", + "UNLOCK", + "KEYS", + "DO", + "DISTINCTROW", + "VALUES", + "LAST_INSERT_ID", + "NEXT", + "VALUE", + "SHARE", + "MODE", + "SQL_NO_CACHE", + "SQL_CACHE", + "SQL_CALC_FOUND_ROWS", + "JOIN", + "STRAIGHT_JOIN", + "LEFT", + "RIGHT", + "INNER", + "OUTER", + "CROSS", + "NATURAL", + "USE", + "FORCE", + "ON", + "USING", + "'('", + "','", + "')'", + "ID", + "AT_ID", + "AT_AT_ID", + "HEX", + "STRING", + "INTEGRAL", + "FLOAT", + "HEXNUM", + "VALUE_ARG", + "LIST_ARG", + "COMMENT", + "COMMENT_KEYWORD", + "BIT_LITERAL", + "NULL", + "TRUE", + "FALSE", + "OFF", + "OR", + "XOR", + "AND", + "NOT", + "'!'", + "BETWEEN", + "CASE", + "WHEN", + "THEN", + "ELSE", + "END", + "'='", + "'<'", + "'>'", + "LE", + "GE", + "NE", + "NULL_SAFE_EQUAL", + "IS", + "LIKE", + "REGEXP", + "IN", + "'|'", + "'&'", + "SHIFT_LEFT", + "SHIFT_RIGHT", + "'+'", + "'-'", + "'*'", + "'/'", + "DIV", + "'%'", + "MOD", + "'^'", + "'~'", + "UNARY", + "COLLATE", + "BINARY", + "UNDERSCORE_BINARY", + "UNDERSCORE_UTF8MB4", + "UNDERSCORE_UTF8", + "UNDERSCORE_LATIN1", + "INTERVAL", + "'.'", + "JSON_EXTRACT_OP", + "JSON_UNQUOTE_EXTRACT_OP", + "CREATE", + "ALTER", + "DROP", + "RENAME", + "ANALYZE", + "ADD", + "FLUSH", + "SCHEMA", + "TABLE", + "INDEX", + "VIEW", + "TO", + "IGNORE", + "IF", + "UNIQUE", + "PRIMARY", + "COLUMN", + "SPATIAL", + "FULLTEXT", + "KEY_BLOCK_SIZE", + "CHECK", + "INDEXES", + "ACTION", + "CASCADE", + "CONSTRAINT", + "FOREIGN", + "NO", + "REFERENCES", + "RESTRICT", + "SHOW", + "DESCRIBE", + "EXPLAIN", + "DATE", + "ESCAPE", + "REPAIR", + "OPTIMIZE", + "TRUNCATE", + "MAXVALUE", + "PARTITION", + "REORGANIZE", + "LESS", + "THAN", + "PROCEDURE", + "TRIGGER", + "VINDEX", + "VINDEXES", + "STATUS", + "VARIABLES", + "WARNINGS", + "SEQUENCE", + "BEGIN", + "START", + "TRANSACTION", + "COMMIT", + "ROLLBACK", + "SAVEPOINT", + "RELEASE", + "WORK", + "BIT", + "TINYINT", + "SMALLINT", + "MEDIUMINT", + "INT", + "INTEGER", + "BIGINT", + "INTNUM", + "REAL", + "DOUBLE", + "FLOAT_TYPE", + "DECIMAL", + "NUMERIC", + "TIME", + "TIMESTAMP", + "DATETIME", + "YEAR", + "CHAR", + "VARCHAR", + "BOOL", + "CHARACTER", + "VARBINARY", + "NCHAR", + "TEXT", + "TINYTEXT", + "MEDIUMTEXT", + "LONGTEXT", + "BLOB", + "TINYBLOB", + "MEDIUMBLOB", + "LONGBLOB", + "JSON", + "ENUM", + "GEOMETRY", + "POINT", + "LINESTRING", + "POLYGON", + "GEOMETRYCOLLECTION", + "MULTIPOINT", + "MULTILINESTRING", + "MULTIPOLYGON", + "NULLX", + "AUTO_INCREMENT", + "APPROXNUM", + "SIGNED", + "UNSIGNED", + "ZEROFILL", + "COLLATION", + "DATABASES", + "TABLES", + "VITESS_METADATA", + "VSCHEMA", + "FULL", + "PROCESSLIST", + "COLUMNS", + "FIELDS", + "ENGINES", + "PLUGINS", + "EXTENDED", + "NAMES", + "CHARSET", + "GLOBAL", + "SESSION", + "ISOLATION", + "LEVEL", + "READ", + "WRITE", + "ONLY", + "REPEATABLE", + "COMMITTED", + "UNCOMMITTED", + "SERIALIZABLE", + "CURRENT_TIMESTAMP", + "DATABASE", + "CURRENT_DATE", + "CURRENT_TIME", + "LOCALTIME", + "LOCALTIMESTAMP", + "UTC_DATE", + "UTC_TIME", + "UTC_TIMESTAMP", + "REPLACE", + "CONVERT", + "CAST", + "SUBSTR", + "SUBSTRING", + "GROUP_CONCAT", + "SEPARATOR", + "TIMESTAMPADD", + "TIMESTAMPDIFF", + "MATCH", + "AGAINST", + "BOOLEAN", + "LANGUAGE", + "WITH", + "QUERY", + "EXPANSION", + "OVER", + "ROWS", + "RANGE", + "UNBOUNDED", + "PRECEDING", + "FOLLOWING", + "CURRENT", + "ROW", + "RECURSIVE", + "UNUSED", + "ARRAY", + "CUME_DIST", + "DESCRIPTION", + "DENSE_RANK", + "EMPTY", + "EXCEPT", + "FIRST_VALUE", + "GROUPING", + "GROUPS", + "JSON_TABLE", + "LAG", + "LAST_VALUE", + "LATERAL", + "LEAD", + "MEMBER", + "NTH_VALUE", + "NTILE", + "OF", + "PERCENT_RANK", + "RANK", + "ROW_NUMBER", + "SYSTEM", + "WINDOW", + "ACTIVE", + "ADMIN", + "BUCKETS", + "CLONE", + "COMPONENT", + "DEFINITION", + "ENFORCED", + "EXCLUDE", + "GEOMCOLLECTION", + "GET_MASTER_PUBLIC_KEY", + "HISTOGRAM", + "HISTORY", + "INACTIVE", + "INVISIBLE", + "LOCKED", + "MASTER_COMPRESSION_ALGORITHMS", + "MASTER_PUBLIC_KEY_PATH", + "MASTER_TLS_CIPHERSUITES", + "MASTER_ZSTD_COMPRESSION_LEVEL", + "NESTED", + "NETWORK_NAMESPACE", + "NOWAIT", + "NULLS", + "OJ", + "OLD", + "OPTIONAL", + "ORDINALITY", + "ORGANIZATION", + "OTHERS", + "PATH", + "PERSIST", + "PERSIST_ONLY", + "PRIVILEGE_CHECKS_USER", + "PROCESS", + "RANDOM", + "REFERENCE", + "REQUIRE_ROW_FORMAT", + "RESOURCE", + "RESPECT", + "RESTART", + "RETAIN", + "REUSE", + "ROLE", + "SECONDARY", + "SECONDARY_ENGINE", + "SECONDARY_LOAD", + "SECONDARY_UNLOAD", + "SKIP", + "SRID", + "THREAD_PRIORITY", + "TIES", + "VCPU", + "VISIBLE", + "FORMAT", + "TREE", + "VITESS", + "TRADITIONAL", + "AUTH", + "INTERACTIVE", + "LOGIN", + "REVOKE", + "SA", + "SERVICEACCOUNT", + "SLEEP", + "MATERIALIZED", + "TEMP", + "TEMPORARY", + "REFRESH", + "JSON_ARRAY_ELEMENTS_TEXT", + "JSON_EACH", + "UNNEST", + "REGISTRY", + "PULL", + "LIST", + "EXEC", + "PURGE", + "NATIVEQUERY", + "STACKQL", + "RETURNING", + "';'", +} + +var yyStatenames = [...]string{} + +const yyEofCode = 1 +const yyErrCode = 2 +const yyInitialStackSize = 16 + +//line yacctab:1 +var yyExca = [...]int{ + -1, 0, + 368, 100, + 382, 100, + -2, 35, + -1, 1, + 1, -1, + -2, 0, + -1, 49, + 169, 360, + 170, 360, + 230, 348, + 234, 348, + 235, 348, + 236, 348, + -2, 346, + -1, 54, + 134, 370, + -2, 368, + -1, 57, + 6, 376, + 8, 376, + 9, 376, + 10, 376, + 57, 376, + 262, 376, + -2, 348, + -1, 86, + 38, 406, + -2, 414, + -1, 107, + 133, 89, + -2, 996, + -1, 108, + 132, 1075, + -2, 87, + -1, 109, + 132, 1076, + -2, 88, + -1, 367, + 80, 945, + 82, 945, + 88, 945, + 89, 945, + 90, 945, + 91, 945, + 92, 945, + 93, 945, + 94, 945, + 96, 945, + 97, 945, + 98, 945, + 99, 945, + 100, 945, + 101, 945, + 102, 945, + 103, 945, + 104, 945, + 105, 945, + 106, 945, + 107, 945, + 108, 945, + 109, 945, + 110, 945, + 113, 945, + 120, 945, + 121, 945, + 122, 945, + -2, 508, + -1, 427, + 120, 784, + -2, 780, + -1, 428, + 120, 785, + -2, 781, + -1, 452, + 38, 407, + -2, 419, + -1, 453, + 38, 408, + -2, 420, + -1, 476, + 88, 1063, + -2, 85, + -1, 477, + 88, 966, + -2, 86, + -1, 482, + 88, 933, + 134, 933, + -2, 746, + -1, 484, + 88, 1004, + 134, 1004, + -2, 748, + -1, 809, + 1, 428, + 5, 428, + 11, 428, + 12, 428, + 13, 428, + 14, 428, + 15, 428, + 17, 428, + 19, 428, + 31, 428, + 55, 428, + 58, 428, + 59, 428, + 268, 428, + 389, 428, + 390, 428, + -2, 505, + -1, 830, + 57, 581, + -2, 978, + -1, 831, + 57, 582, + -2, 979, + -1, 832, + 56, 67, + 58, 67, + -2, 71, + -1, 1020, + 120, 787, + -2, 783, +} + +const yyPrivate = 57344 + +const yyLast = 21890 + +var yyAct = [...]int{ + 427, 1821, 1811, 1565, 1777, 1428, 1752, 1612, 1330, 371, + 1653, 740, 1492, 1696, 1352, 386, 1069, 1528, 1501, 1537, + 1148, 400, 1502, 793, 1144, 1116, 1446, 1086, 369, 1319, + 1331, 481, 1191, 1177, 1157, 1147, 1454, 651, 1507, 1513, + 1529, 640, 451, 357, 110, 936, 1253, 1061, 318, 1007, + 1014, 318, 81, 1405, 85, 3, 110, 958, 1161, 672, + 845, 1117, 1112, 1101, 83, 1394, 821, 828, 446, 454, + 373, 318, 803, 820, 798, 362, 1041, 984, 816, 606, + 470, 1187, 811, 440, 844, 86, 1094, 34, 475, 467, + 438, 613, 318, 110, 628, 949, 607, 318, 100, 318, + 834, 691, 436, 358, 80, 755, 361, 1499, 1500, 779, + 780, 435, 909, 777, 778, 316, 969, 429, 356, 331, + 325, 7, 6, 88, 89, 90, 91, 92, 754, 328, + 478, 430, 313, 308, 309, 310, 1292, 335, 449, 1294, + 1293, 5, 1214, 1795, 1447, 311, 1793, 1794, 1758, 1759, + 1814, 431, 1786, 433, 434, 36, 1213, 36, 443, 469, + 1809, 460, 1764, 1804, 608, 1566, 610, 443, 1785, 441, + 1763, 333, 1472, 1602, 611, 36, 1366, 340, 846, 1365, + 847, 342, 1367, 1532, 1533, 1682, 1531, 112, 113, 114, + 1136, 1137, 1135, 965, 615, 616, 674, 343, 1212, 36, + 360, 72, 73, 40, 671, 1326, 71, 326, 71, 71, + 359, 701, 700, 710, 711, 703, 704, 705, 706, 707, + 708, 709, 702, 666, 346, 712, 71, 667, 664, 665, + 1385, 1170, 1642, 1178, 337, 329, 1430, 338, 339, 346, + 112, 113, 114, 330, 332, 343, 1593, 327, 345, 344, + 71, 1209, 1206, 1207, 304, 1205, 312, 302, 1591, 306, + 112, 113, 114, 350, 967, 352, 670, 348, 924, 970, + 971, 972, 1724, 701, 700, 710, 711, 703, 704, 705, + 706, 707, 708, 709, 702, 659, 660, 712, 1216, 1219, + 669, 923, 1431, 1432, 1806, 921, 1017, 1801, 1023, 1753, + 1427, 1095, 1825, 318, 620, 621, 1746, 1162, 318, 925, + 630, 1829, 629, 614, 306, 848, 318, 1353, 1355, 1433, + 928, 675, 1198, 784, 1697, 318, 638, 922, 1211, 644, + 110, 646, 653, 648, 110, 632, 110, 612, 1164, 1699, + 1523, 412, 673, 418, 419, 416, 417, 415, 414, 413, + 1210, 1522, 1164, 110, 314, 355, 305, 420, 421, 618, + 1171, 849, 1521, 609, 617, 645, 647, 625, 324, 1704, + 619, 307, 1734, 1424, 341, 627, 654, 314, 303, 1426, + 1622, 1178, 1573, 635, 112, 113, 114, 724, 725, 1362, + 1215, 1762, 637, 1324, 1796, 1797, 462, 1282, 1261, 1354, + 1662, 656, 1232, 684, 685, 631, 1455, 633, 634, 679, + 1698, 840, 1226, 815, 1217, 1225, 1272, 738, 1823, 636, + 1269, 1824, 1145, 1822, 70, 1074, 70, 70, 112, 113, + 114, 649, 318, 712, 318, 622, 70, 623, 318, 722, + 624, 1500, 1163, 702, 70, 1131, 712, 1457, 959, 1725, + 963, 112, 113, 114, 643, 74, 1163, 642, 1744, 1164, + 363, 1160, 1158, 110, 1159, 318, 318, 318, 70, 301, + 1713, 1156, 1162, 677, 110, 1415, 682, 680, 1705, 1703, + 110, 1460, 95, 1425, 1459, 1423, 1464, 605, 1458, 741, + 1456, 112, 113, 114, 950, 1462, 681, 791, 1511, 781, + 655, 782, 1482, 1302, 1461, 790, 683, 1411, 1412, 1413, + 692, 478, 657, 112, 113, 114, 1474, 1463, 1465, 758, + 760, 96, 764, 766, 804, 769, 724, 725, 819, 792, + 724, 725, 818, 800, 832, 960, 690, 441, 1079, 1080, + 787, 641, 757, 759, 761, 763, 765, 767, 768, 705, + 706, 707, 708, 709, 702, 1042, 833, 712, 1167, 464, + 465, 913, 843, 1163, 809, 1168, 1042, 1550, 1279, 838, + 703, 704, 705, 706, 707, 708, 709, 702, 1803, 1414, + 712, 951, 991, 692, 1419, 1416, 1407, 1417, 1410, 1802, + 1406, 688, 689, 687, 1408, 1409, 989, 990, 988, 1830, + 950, 642, 1267, 1383, 1266, 1748, 318, 807, 1418, 690, + 907, 982, 1770, 318, 912, 692, 914, 318, 71, 318, + 110, 110, 110, 688, 689, 687, 318, 1648, 1647, 318, + 987, 1076, 318, 1398, 934, 935, 1397, 318, 1386, 110, + 432, 690, 1772, 1831, 110, 110, 110, 318, 110, 110, + 710, 711, 703, 704, 705, 706, 707, 708, 709, 702, + 110, 110, 712, 1745, 1268, 978, 980, 981, 938, 1672, + 1645, 1075, 979, 857, 1246, 1247, 1248, 1063, 318, 692, + 911, 940, 692, 1606, 915, 641, 917, 951, 689, 687, + 688, 689, 687, 926, 1480, 953, 469, 1023, 1395, 932, + 1240, 941, 692, 783, 1710, 690, 1306, 1805, 690, 916, + 910, 692, 687, 1008, 946, 692, 1709, 985, 929, 688, + 689, 687, 1010, 688, 689, 687, 1297, 933, 690, 82, + 1301, 1476, 112, 113, 114, 1546, 110, 690, 1774, 1023, + 954, 690, 692, 318, 1165, 968, 1306, 1756, 802, 952, + 688, 689, 687, 1011, 1012, 1306, 1023, 1030, 1033, 1306, + 1736, 1088, 693, 1043, 1320, 658, 986, 661, 690, 84, + 1020, 110, 110, 112, 113, 114, 1018, 1009, 673, 673, + 1057, 1058, 1067, 1023, 1019, 318, 1097, 318, 1306, 1701, + 110, 71, 1021, 112, 113, 114, 1510, 1369, 363, 112, + 113, 114, 110, 1296, 1306, 1658, 1025, 752, 1087, 318, + 1098, 741, 1087, 110, 1098, 112, 113, 114, 318, 1059, + 1638, 1637, 1624, 1023, 1051, 1052, 1088, 318, 318, 318, + 1621, 1023, 1556, 1555, 786, 318, 318, 1055, 1056, 318, + 318, 318, 110, 836, 796, 799, 785, 1020, 1552, 1553, + 1552, 1551, 1064, 1018, 1121, 836, 110, 607, 1125, 1071, + 835, 1093, 1087, 1023, 686, 1081, 1098, 1023, 686, 1023, + 1618, 1065, 938, 1510, 1089, 1712, 1090, 1066, 1087, 478, + 1554, 1114, 856, 855, 1098, 1096, 1370, 1134, 837, 1285, + 839, 1284, 835, 1149, 1077, 1091, 927, 1126, 786, 841, + 837, 1128, 835, 1127, 1179, 1180, 1181, 1788, 336, 1655, + 318, 1172, 1629, 110, 788, 110, 1192, 318, 1218, 1124, + 1129, 1542, 318, 318, 318, 318, 318, 1133, 318, 318, + 1132, 428, 318, 110, 318, 1440, 1152, 1514, 1515, 1193, + 1373, 1188, 389, 388, 391, 392, 393, 394, 1183, 71, + 318, 390, 395, 318, 1182, 318, 1429, 1656, 318, 1195, + 318, 318, 1816, 1196, 1812, 318, 353, 110, 1544, 110, + 1517, 1399, 964, 931, 1520, 111, 1342, 1197, 1340, 319, + 1519, 1343, 319, 1341, 1203, 1189, 1190, 111, 1339, 1220, + 1221, 1222, 1223, 1224, 1338, 1227, 1228, 1799, 1784, 1229, + 1237, 1231, 319, 1489, 1309, 1103, 1106, 1107, 1108, 1104, + 985, 1105, 1109, 455, 801, 1514, 1515, 1233, 1790, 1318, + 1234, 1317, 818, 319, 111, 1236, 1390, 456, 319, 1038, + 319, 854, 1241, 1235, 805, 806, 458, 639, 457, 942, + 1026, 1027, 794, 1039, 1032, 1035, 1036, 1344, 1382, 1107, + 1108, 918, 919, 920, 795, 1750, 1651, 1749, 1295, 986, + 1680, 1380, 1375, 1616, 1201, 961, 1249, 930, 1559, 1050, + 939, 1488, 1053, 1054, 1401, 943, 944, 945, 1199, 947, + 948, 1111, 908, 973, 974, 975, 976, 444, 445, 318, + 1316, 955, 956, 447, 1263, 1308, 1718, 1614, 1315, 318, + 318, 318, 318, 318, 448, 1262, 84, 1332, 1303, 441, + 1613, 318, 1493, 1320, 668, 1273, 318, 1278, 1270, 318, + 1818, 1817, 82, 1305, 957, 808, 318, 701, 700, 710, + 711, 703, 704, 705, 706, 707, 708, 709, 702, 1818, + 1732, 712, 1028, 1029, 1643, 1073, 1368, 87, 110, 1327, + 1313, 1322, 1312, 1304, 1358, 79, 1360, 1374, 1361, 1323, + 1321, 673, 673, 1113, 29, 1, 1334, 1335, 1371, 1337, + 1350, 1661, 1333, 1345, 1357, 1336, 1757, 1659, 1060, 1298, + 1351, 1299, 1810, 1300, 1254, 1149, 1062, 1567, 1359, 1652, + 1208, 1751, 1695, 1536, 1155, 1363, 110, 1146, 94, 318, + 604, 93, 1389, 1743, 1391, 1392, 1393, 1387, 1388, 1103, + 1106, 1107, 1108, 1104, 652, 1105, 1109, 1154, 1376, 1377, + 1378, 1379, 1153, 1702, 1641, 1166, 1173, 1174, 1175, 1176, + 110, 1384, 110, 318, 319, 1169, 1543, 1381, 1747, 319, + 862, 860, 1184, 1185, 1186, 1140, 1143, 319, 1404, 1396, + 861, 859, 864, 863, 1403, 858, 319, 334, 455, 473, + 1420, 111, 110, 966, 938, 111, 1402, 111, 1008, 349, + 1110, 850, 456, 1194, 810, 97, 1438, 1422, 1436, 452, + 453, 458, 1421, 457, 111, 1204, 962, 1453, 1046, 662, + 663, 1437, 720, 1314, 1441, 104, 1364, 1442, 479, 1451, + 472, 1496, 1498, 1439, 1505, 1467, 1078, 110, 797, 1277, + 751, 1450, 1258, 1259, 1020, 318, 1040, 1466, 825, 372, + 1477, 977, 827, 387, 384, 110, 385, 1452, 1019, 110, + 110, 1483, 1082, 1325, 1276, 694, 370, 1490, 364, 824, + 1102, 1473, 1100, 1099, 1200, 1481, 1202, 1485, 1503, 468, + 1516, 1512, 823, 822, 829, 1291, 1601, 110, 1509, 1723, + 450, 1037, 1451, 319, 1230, 319, 1497, 57, 1243, 319, + 354, 110, 39, 110, 110, 38, 1518, 673, 673, 1530, + 459, 64, 1491, 32, 31, 28, 30, 27, 22, 1527, + 21, 1549, 20, 1535, 111, 1524, 319, 319, 319, 19, + 318, 18, 24, 449, 17, 111, 1534, 16, 1149, 1539, + 1149, 111, 1547, 1548, 15, 626, 42, 33, 26, 25, + 14, 13, 318, 1526, 12, 11, 10, 9, 110, 8, + 1568, 110, 110, 110, 318, 4, 1540, 1541, 678, 1280, + 23, 110, 739, 2, 0, 0, 0, 0, 110, 1560, + 0, 0, 0, 0, 0, 0, 1558, 1576, 0, 0, + 0, 0, 0, 0, 1561, 0, 1563, 1557, 0, 0, + 0, 0, 1310, 1311, 799, 1581, 1582, 0, 0, 1574, + 0, 1575, 1023, 1577, 0, 0, 0, 0, 0, 1562, + 0, 0, 1589, 0, 0, 0, 0, 0, 0, 0, + 0, 1572, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1607, 1608, 0, 1615, 0, 0, 0, 0, 0, + 1332, 1584, 701, 700, 710, 711, 703, 704, 705, 706, + 707, 708, 709, 702, 0, 110, 712, 319, 0, 1625, + 0, 1626, 0, 110, 319, 1636, 0, 0, 319, 0, + 319, 111, 111, 111, 0, 1371, 0, 319, 110, 1640, + 319, 0, 0, 319, 0, 0, 0, 110, 319, 0, + 111, 0, 1149, 0, 318, 111, 111, 111, 319, 111, + 111, 0, 0, 0, 0, 1644, 0, 1646, 1665, 0, + 0, 111, 111, 0, 0, 1635, 0, 0, 0, 0, + 1484, 0, 0, 0, 1654, 938, 0, 0, 1663, 319, + 0, 0, 0, 0, 1650, 0, 0, 0, 0, 1657, + 0, 1664, 110, 110, 0, 110, 1679, 0, 0, 0, + 110, 0, 110, 110, 110, 318, 0, 1676, 1675, 110, + 1503, 1681, 0, 0, 0, 0, 1688, 0, 1689, 1691, + 1692, 0, 0, 1694, 0, 0, 110, 318, 1700, 1706, + 1693, 1434, 0, 1435, 0, 1677, 0, 111, 0, 1707, + 1671, 1708, 1714, 0, 319, 1683, 1475, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1729, 0, + 1687, 0, 0, 0, 0, 0, 0, 0, 1742, 0, + 0, 1733, 111, 111, 1503, 0, 1730, 0, 1740, 1741, + 0, 0, 0, 110, 110, 0, 319, 1494, 319, 0, + 0, 111, 0, 1754, 1715, 0, 1717, 0, 0, 0, + 1760, 0, 0, 111, 1755, 110, 0, 1765, 0, 0, + 319, 1767, 1332, 1766, 111, 0, 0, 318, 0, 319, + 1654, 1149, 0, 1769, 110, 0, 1735, 0, 319, 319, + 319, 0, 1776, 0, 1782, 879, 319, 319, 0, 0, + 319, 319, 319, 111, 1787, 0, 0, 0, 0, 0, + 1791, 1789, 1609, 1792, 0, 0, 0, 111, 1798, 110, + 0, 0, 0, 0, 0, 0, 0, 1617, 0, 0, + 0, 401, 35, 0, 0, 1800, 0, 1807, 1808, 0, + 0, 0, 0, 1815, 1771, 0, 1586, 1587, 0, 1588, + 1826, 0, 0, 1590, 0, 1592, 700, 710, 711, 703, + 704, 705, 706, 707, 708, 709, 702, 35, 0, 712, + 0, 319, 0, 0, 111, 0, 111, 0, 319, 867, + 0, 0, 0, 319, 319, 319, 319, 319, 0, 319, + 319, 0, 0, 319, 111, 319, 0, 0, 0, 0, + 0, 0, 0, 1603, 0, 0, 0, 0, 0, 0, + 0, 319, 0, 442, 319, 0, 319, 0, 0, 319, + 880, 319, 319, 0, 0, 0, 319, 366, 111, 1639, + 111, 363, 0, 0, 0, 0, 0, 0, 1627, 0, + 0, 1628, 1599, 0, 1630, 0, 893, 896, 897, 898, + 899, 900, 901, 0, 902, 903, 904, 905, 906, 881, + 882, 883, 884, 865, 866, 894, 0, 868, 0, 869, + 870, 871, 872, 873, 874, 875, 876, 877, 878, 885, + 886, 887, 888, 889, 890, 891, 892, 696, 0, 699, + 0, 0, 0, 0, 0, 713, 714, 715, 716, 717, + 718, 719, 0, 697, 698, 695, 701, 700, 710, 711, + 703, 704, 705, 706, 707, 708, 709, 702, 0, 1649, + 712, 0, 701, 700, 710, 711, 703, 704, 705, 706, + 707, 708, 709, 702, 0, 0, 712, 0, 895, 0, + 1678, 363, 0, 0, 0, 0, 1598, 1605, 0, 0, + 319, 0, 0, 0, 0, 0, 0, 0, 1022, 1024, + 319, 319, 319, 319, 319, 0, 0, 0, 0, 0, + 1604, 0, 319, 0, 0, 0, 0, 319, 0, 0, + 319, 0, 0, 0, 0, 0, 0, 319, 701, 700, + 710, 711, 703, 704, 705, 706, 707, 708, 709, 702, + 0, 0, 712, 0, 0, 0, 0, 0, 0, 111, + 1072, 701, 700, 710, 711, 703, 704, 705, 706, 707, + 708, 709, 702, 1597, 0, 712, 701, 700, 710, 711, + 703, 704, 705, 706, 707, 708, 709, 702, 0, 0, + 712, 0, 0, 0, 0, 363, 0, 0, 1443, 0, + 0, 0, 0, 0, 0, 0, 0, 111, 0, 0, + 319, 650, 0, 0, 0, 650, 0, 650, 701, 700, + 710, 711, 703, 704, 705, 706, 707, 708, 709, 702, + 0, 0, 712, 0, 0, 0, 35, 0, 0, 0, + 0, 111, 0, 111, 319, 0, 0, 0, 0, 721, + 723, 0, 0, 701, 700, 710, 711, 703, 704, 705, + 706, 707, 708, 709, 702, 0, 0, 712, 0, 0, + 0, 0, 0, 111, 0, 0, 0, 0, 0, 0, + 737, 0, 0, 0, 743, 744, 745, 746, 747, 748, + 749, 750, 0, 753, 756, 756, 756, 762, 756, 756, + 762, 756, 770, 771, 772, 773, 774, 775, 776, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 111, 0, + 0, 789, 0, 0, 35, 0, 319, 0, 0, 0, + 0, 442, 0, 0, 0, 0, 111, 0, 0, 0, + 111, 111, 0, 0, 0, 0, 0, 826, 0, 0, + 0, 0, 726, 727, 728, 729, 730, 731, 732, 733, + 734, 735, 0, 0, 0, 0, 0, 0, 111, 398, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1256, + 0, 0, 111, 1257, 111, 111, 1596, 0, 0, 0, + 0, 0, 0, 0, 0, 1264, 1265, 0, 0, 0, + 0, 1271, 0, 0, 1274, 1275, 0, 0, 0, 0, + 0, 319, 1281, 106, 0, 0, 1283, 0, 0, 1286, + 1287, 1288, 1289, 1290, 0, 351, 0, 0, 0, 0, + 0, 0, 0, 319, 0, 0, 0, 0, 1307, 111, + 0, 0, 111, 111, 111, 319, 0, 0, 0, 0, + 0, 0, 111, 0, 0, 0, 0, 0, 0, 111, + 0, 0, 480, 0, 1255, 0, 701, 700, 710, 711, + 703, 704, 705, 706, 707, 708, 709, 702, 0, 0, + 712, 1347, 1348, 1349, 701, 700, 710, 711, 703, 704, + 705, 706, 707, 708, 709, 702, 0, 0, 712, 0, + 0, 650, 650, 650, 701, 700, 710, 711, 703, 704, + 705, 706, 707, 708, 709, 702, 0, 0, 712, 0, + 650, 0, 0, 0, 0, 650, 650, 650, 0, 650, + 650, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 650, 650, 0, 0, 0, 111, 0, 0, 0, + 0, 0, 0, 0, 111, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 111, + 0, 0, 0, 0, 0, 0, 0, 0, 111, 0, + 0, 0, 0, 0, 0, 319, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1448, 1449, 0, + 0, 0, 0, 111, 111, 0, 111, 0, 0, 0, + 0, 111, 0, 111, 111, 111, 319, 0, 0, 0, + 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 111, 319, 0, + 1068, 0, 983, 0, 1486, 992, 993, 994, 995, 996, + 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 480, + 0, 0, 0, 480, 0, 480, 0, 0, 1115, 0, + 0, 1123, 0, 0, 0, 0, 826, 0, 0, 1349, + 826, 0, 676, 0, 111, 111, 1525, 0, 0, 0, + 0, 0, 0, 0, 1047, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 111, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 319, 0, + 0, 0, 0, 0, 0, 111, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 35, 0, 0, 0, 650, 0, 650, 0, 0, 0, + 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 650, 0, 0, 0, 0, 1583, + 0, 0, 0, 1585, 0, 0, 0, 0, 0, 0, + 0, 0, 813, 0, 0, 1594, 1595, 0, 0, 0, + 0, 0, 0, 480, 0, 0, 0, 0, 0, 851, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1611, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1619, 1620, 0, 1623, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1634, 0, 0, 0, 0, 0, 0, + 1260, 0, 0, 442, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1660, 0, + 35, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1250, 1251, 1252, + 826, 0, 0, 0, 0, 0, 1328, 1329, 0, 0, + 826, 826, 826, 826, 826, 0, 0, 0, 0, 480, + 480, 480, 0, 0, 0, 0, 0, 1115, 0, 0, + 1690, 0, 0, 0, 1356, 0, 0, 826, 480, 0, + 0, 0, 0, 480, 480, 480, 0, 480, 480, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1716, 480, + 480, 0, 0, 0, 0, 1719, 1720, 1721, 1722, 0, + 1726, 0, 1727, 1728, 0, 0, 1731, 36, 37, 72, + 73, 40, 0, 0, 0, 0, 0, 0, 1737, 0, + 1738, 1739, 0, 0, 0, 0, 77, 0, 0, 0, + 0, 41, 60, 61, 0, 63, 0, 0, 0, 0, + 0, 35, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1761, 0, 50, 0, 0, 0, 71, 0, + 0, 0, 0, 0, 0, 1013, 0, 0, 480, 0, + 0, 650, 0, 650, 0, 0, 0, 0, 0, 0, + 1773, 0, 0, 1044, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1048, 1049, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1070, + 0, 0, 0, 0, 43, 44, 46, 45, 48, 0, + 62, 1083, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 813, 0, 0, 480, 0, 0, 1827, 1828, + 0, 0, 0, 49, 76, 75, 0, 0, 58, 59, + 47, 0, 0, 0, 0, 0, 0, 0, 1504, 0, + 35, 480, 0, 0, 51, 52, 0, 53, 54, 55, + 56, 0, 0, 0, 0, 480, 0, 0, 0, 0, + 0, 0, 1444, 1445, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1468, 1469, 0, + 1470, 1471, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1478, 1479, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 480, 35, 480, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 399, 0, 480, 74, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 70, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1579, + 0, 0, 0, 0, 0, 0, 1242, 0, 1244, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 317, 0, + 0, 347, 0, 0, 1600, 0, 0, 0, 0, 0, + 1545, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1610, 439, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 463, + 0, 0, 471, 0, 0, 0, 0, 317, 0, 317, + 0, 0, 0, 1631, 1632, 1633, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 66, 0, 0, 0, 69, + 0, 1578, 0, 0, 0, 0, 65, 67, 68, 78, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 650, + 0, 35, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1044, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 35, 0, 0, 0, 0, 0, 0, 0, + 1504, 0, 35, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 480, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1711, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1504, 1400, 0, 0, 0, 0, + 0, 0, 0, 0, 1666, 1667, 1668, 1669, 1670, 0, + 0, 0, 1673, 1674, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 480, + 0, 480, 0, 317, 0, 0, 0, 0, 317, 0, + 0, 0, 0, 0, 0, 0, 317, 0, 0, 0, + 0, 0, 0, 0, 0, 317, 0, 0, 0, 0, + 0, 480, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 480, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1487, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1813, 0, + 0, 0, 0, 0, 480, 0, 0, 0, 1506, 1508, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1508, 0, 0, 0, + 0, 0, 317, 0, 317, 0, 1781, 1781, 439, 0, + 480, 0, 480, 1538, 0, 0, 0, 0, 0, 0, + 0, 0, 463, 0, 0, 0, 1781, 0, 0, 0, + 0, 1781, 0, 0, 0, 317, 317, 317, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1781, 1781, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1819, 0, 0, 0, 0, 0, 1564, 0, 0, + 1569, 1570, 1571, 0, 0, 0, 0, 0, 0, 0, + 1070, 0, 0, 0, 0, 0, 0, 1580, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1044, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 480, 0, 317, 0, 0, 0, + 0, 0, 1070, 317, 0, 0, 0, 317, 0, 317, + 0, 0, 0, 0, 0, 0, 317, 480, 0, 317, + 0, 0, 317, 0, 0, 0, 480, 937, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 317, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 317, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1684, 1685, 0, 1686, 0, 0, 0, 0, 1070, + 0, 1070, 1070, 1070, 0, 0, 0, 0, 1538, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1070, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 463, 937, + 0, 0, 0, 439, 463, 463, 0, 0, 463, 463, + 463, 0, 0, 0, 1045, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 463, 463, 463, 463, 463, 0, 0, + 0, 0, 480, 480, 0, 317, 0, 439, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1044, 0, 1768, 0, 0, 0, 0, 317, + 0, 0, 0, 0, 0, 0, 937, 0, 317, 0, + 0, 0, 0, 1775, 0, 0, 0, 317, 1119, 1119, + 0, 0, 0, 0, 0, 317, 317, 0, 0, 317, + 1130, 937, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1070, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 317, 0, 0, 0, 0, 0, 0, 317, 0, 0, + 0, 0, 317, 317, 317, 317, 317, 0, 317, 317, + 0, 0, 317, 0, 317, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 317, 0, 0, 317, 0, 317, 0, 0, 317, 0, + 1238, 1239, 0, 0, 0, 317, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 463, 463, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 463, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 463, 317, + 0, 0, 0, 0, 0, 0, 0, 0, 1045, 317, + 317, 317, 317, 317, 0, 0, 0, 0, 0, 0, + 0, 1346, 0, 0, 0, 0, 317, 0, 0, 1119, + 0, 0, 0, 0, 463, 0, 317, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 317, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 937, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 463, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 937, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 463, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 317, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 317, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 317, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 317, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 463, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 463, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1045, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 937, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1119, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 317, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 590, 578, 0, 535, 593, 507, 525, 601, 526, + 529, 565, 493, 547, 209, 523, 0, 511, 488, 519, + 489, 509, 537, 146, 177, 142, 580, 550, 592, 179, + 0, 512, 599, 181, 556, 0, 261, 196, 0, 0, + 0, 539, 582, 545, 575, 534, 566, 498, 555, 594, + 524, 563, 595, 0, 0, 0, 112, 113, 114, 0, + 1150, 1151, 0, 1045, 0, 0, 0, 135, 0, 560, + 589, 521, 562, 564, 603, 487, 557, 317, 491, 494, + 600, 585, 515, 516, 1372, 0, 0, 0, 0, 0, + 0, 538, 546, 571, 532, 0, 0, 0, 0, 0, + 0, 0, 0, 513, 0, 554, 0, 0, 0, 495, + 492, 0, 0, 0, 0, 536, 0, 0, 0, 497, + 0, 514, 572, 0, 485, 155, 577, 584, 533, 323, + 588, 531, 530, 591, 228, 0, 266, 158, 178, 131, + 172, 116, 127, 0, 157, 206, 237, 242, 581, 510, + 520, 139, 517, 240, 216, 282, 553, 218, 239, 182, + 272, 229, 281, 291, 292, 269, 289, 296, 257, 120, + 268, 280, 136, 251, 0, 0, 0, 122, 278, 264, + 194, 168, 169, 121, 0, 235, 145, 153, 141, 208, + 275, 276, 140, 299, 128, 288, 124, 129, 287, 202, + 271, 279, 195, 187, 123, 277, 193, 186, 174, 149, + 160, 226, 184, 227, 161, 198, 197, 199, 0, 490, + 0, 262, 285, 300, 133, 506, 270, 294, 295, 0, + 230, 134, 154, 148, 225, 152, 200, 130, 163, 259, + 173, 183, 234, 298, 215, 241, 137, 284, 260, 502, + 505, 500, 501, 548, 549, 596, 597, 598, 573, 496, + 0, 503, 504, 0, 579, 586, 587, 552, 115, 125, + 180, 297, 232, 151, 567, 0, 0, 283, 222, 156, + 0, 0, 570, 286, 486, 499, 144, 508, 0, 0, + 522, 527, 528, 540, 541, 542, 543, 544, 551, 558, + 559, 561, 568, 569, 576, 583, 602, 117, 118, 126, + 132, 138, 143, 147, 150, 159, 162, 164, 165, 166, + 171, 185, 189, 190, 191, 192, 203, 204, 205, 207, + 210, 211, 212, 213, 214, 217, 219, 220, 221, 223, + 224, 233, 236, 243, 244, 245, 246, 247, 248, 250, + 253, 254, 255, 256, 263, 267, 273, 274, 290, 293, + 0, 0, 0, 0, 119, 170, 188, 249, 252, 258, + 265, 320, 321, 322, 238, 175, 176, 0, 0, 0, + 0, 518, 231, 201, 167, 574, 590, 578, 0, 535, + 593, 507, 525, 601, 526, 529, 565, 493, 547, 209, + 523, 0, 511, 488, 519, 489, 509, 537, 146, 177, + 142, 580, 550, 592, 179, 0, 512, 599, 181, 556, + 0, 261, 196, 0, 0, 0, 539, 582, 545, 575, + 534, 566, 498, 555, 594, 524, 563, 595, 0, 0, + 0, 112, 113, 114, 0, 1150, 1151, 0, 0, 0, + 0, 0, 135, 0, 560, 589, 521, 562, 564, 603, + 487, 557, 0, 491, 494, 600, 585, 515, 516, 0, + 0, 0, 0, 0, 0, 0, 538, 546, 571, 532, + 0, 0, 0, 0, 0, 0, 0, 0, 513, 0, + 554, 0, 0, 0, 495, 492, 0, 0, 0, 0, + 536, 0, 0, 0, 497, 0, 514, 572, 0, 485, + 155, 577, 584, 533, 323, 588, 531, 530, 591, 228, + 0, 266, 158, 178, 131, 172, 116, 127, 0, 157, + 206, 237, 242, 581, 510, 520, 139, 517, 240, 216, + 282, 553, 218, 239, 182, 272, 229, 281, 291, 292, + 269, 289, 296, 257, 120, 268, 280, 136, 251, 0, + 0, 0, 122, 278, 264, 194, 168, 169, 121, 0, + 235, 145, 153, 141, 208, 275, 276, 140, 299, 128, + 288, 124, 129, 287, 202, 271, 279, 195, 187, 123, + 277, 193, 186, 174, 149, 160, 226, 184, 227, 161, + 198, 197, 199, 0, 490, 0, 262, 285, 300, 133, + 506, 270, 294, 295, 0, 230, 134, 154, 148, 225, + 152, 200, 130, 163, 259, 173, 183, 234, 298, 215, + 241, 137, 284, 260, 502, 505, 500, 501, 548, 549, + 596, 597, 598, 573, 496, 0, 503, 504, 0, 579, + 586, 587, 552, 115, 125, 180, 297, 232, 151, 567, + 0, 0, 283, 222, 156, 0, 0, 570, 286, 486, + 499, 144, 508, 0, 0, 522, 527, 528, 540, 541, + 542, 543, 544, 551, 558, 559, 561, 568, 569, 576, + 583, 602, 117, 118, 126, 132, 138, 143, 147, 150, + 159, 162, 164, 165, 166, 171, 185, 189, 190, 191, + 192, 203, 204, 205, 207, 210, 211, 212, 213, 214, + 217, 219, 220, 221, 223, 224, 233, 236, 243, 244, + 245, 246, 247, 248, 250, 253, 254, 255, 256, 263, + 267, 273, 274, 290, 293, 0, 0, 0, 0, 119, + 170, 188, 249, 252, 258, 265, 320, 321, 322, 238, + 175, 176, 0, 0, 0, 0, 518, 231, 201, 167, + 574, 590, 578, 0, 535, 593, 507, 525, 601, 526, + 529, 565, 493, 547, 209, 523, 0, 511, 488, 519, + 489, 509, 537, 146, 177, 142, 580, 550, 592, 179, + 0, 512, 599, 181, 556, 0, 261, 196, 0, 0, + 0, 539, 582, 545, 575, 534, 566, 498, 555, 594, + 524, 563, 595, 71, 0, 0, 112, 113, 114, 0, + 0, 0, 0, 0, 0, 0, 0, 135, 0, 560, + 589, 521, 562, 564, 603, 487, 557, 0, 491, 494, + 600, 585, 515, 516, 0, 0, 0, 0, 0, 0, + 0, 538, 546, 571, 532, 0, 0, 0, 0, 0, + 0, 0, 0, 513, 0, 554, 0, 0, 0, 495, + 492, 0, 0, 0, 0, 536, 0, 0, 0, 497, + 0, 514, 572, 0, 485, 155, 577, 584, 533, 323, + 588, 531, 530, 591, 228, 0, 266, 158, 178, 131, + 172, 116, 127, 0, 157, 206, 237, 242, 581, 510, + 520, 139, 517, 240, 216, 282, 553, 218, 239, 182, + 272, 229, 281, 291, 292, 269, 289, 296, 257, 120, + 268, 280, 136, 251, 0, 0, 0, 122, 278, 264, + 194, 168, 169, 121, 0, 235, 145, 153, 141, 208, + 275, 276, 140, 299, 128, 288, 124, 129, 287, 202, + 271, 279, 195, 187, 123, 277, 193, 186, 174, 149, + 160, 226, 184, 227, 161, 198, 197, 199, 0, 490, + 0, 262, 285, 300, 133, 506, 270, 294, 295, 0, + 230, 134, 154, 148, 225, 152, 200, 130, 163, 259, + 173, 183, 234, 298, 215, 241, 137, 284, 260, 502, + 505, 500, 501, 548, 549, 596, 597, 598, 573, 496, + 0, 503, 504, 0, 579, 586, 587, 552, 115, 125, + 180, 297, 232, 151, 567, 0, 0, 283, 222, 156, + 0, 0, 570, 286, 486, 499, 144, 508, 0, 0, + 522, 527, 528, 540, 541, 542, 543, 544, 551, 558, + 559, 561, 568, 569, 576, 583, 602, 117, 118, 126, + 132, 138, 143, 147, 150, 159, 162, 164, 165, 166, + 171, 185, 189, 190, 191, 192, 203, 204, 205, 207, + 210, 211, 212, 213, 214, 217, 219, 220, 221, 223, + 224, 233, 236, 243, 244, 245, 246, 247, 248, 250, + 253, 254, 255, 256, 263, 267, 273, 274, 290, 293, + 0, 0, 0, 0, 119, 170, 188, 249, 252, 258, + 265, 320, 321, 322, 238, 175, 176, 0, 0, 0, + 0, 518, 231, 201, 167, 574, 590, 578, 0, 535, + 593, 507, 525, 601, 526, 529, 565, 493, 547, 209, + 523, 0, 511, 488, 519, 489, 509, 537, 146, 177, + 142, 580, 550, 592, 179, 0, 512, 599, 181, 556, + 0, 261, 196, 0, 0, 0, 539, 582, 545, 575, + 534, 566, 498, 555, 594, 524, 563, 595, 0, 0, + 0, 112, 113, 114, 0, 0, 0, 0, 0, 0, + 0, 0, 135, 0, 560, 589, 521, 562, 564, 603, + 487, 557, 0, 491, 494, 600, 585, 515, 516, 0, + 0, 0, 0, 0, 0, 0, 538, 546, 571, 532, + 0, 0, 0, 0, 0, 0, 1495, 0, 513, 0, + 554, 0, 0, 0, 495, 492, 0, 0, 0, 0, + 536, 0, 0, 0, 497, 0, 514, 572, 0, 485, + 155, 577, 584, 533, 323, 588, 531, 530, 591, 228, + 0, 266, 158, 178, 131, 172, 116, 127, 0, 157, + 206, 237, 242, 581, 510, 520, 139, 517, 240, 216, + 282, 553, 218, 239, 182, 272, 229, 281, 291, 292, + 269, 289, 296, 257, 120, 268, 280, 136, 251, 0, + 0, 0, 122, 278, 264, 194, 168, 169, 121, 0, + 235, 145, 153, 141, 208, 275, 276, 140, 299, 128, + 288, 124, 129, 287, 202, 271, 279, 195, 187, 123, + 277, 193, 186, 174, 149, 160, 226, 184, 227, 161, + 198, 197, 199, 0, 490, 0, 262, 285, 300, 133, + 506, 270, 294, 295, 0, 230, 134, 154, 148, 225, + 152, 200, 130, 163, 259, 173, 183, 234, 298, 215, + 241, 137, 284, 260, 502, 505, 500, 501, 548, 549, + 596, 597, 598, 573, 496, 0, 503, 504, 0, 579, + 586, 587, 552, 115, 125, 180, 297, 232, 151, 567, + 0, 0, 283, 222, 156, 0, 0, 570, 286, 486, + 499, 144, 508, 0, 0, 522, 527, 528, 540, 541, + 542, 543, 544, 551, 558, 559, 561, 568, 569, 576, + 583, 602, 117, 118, 126, 132, 138, 143, 147, 150, + 159, 162, 164, 165, 166, 171, 185, 189, 190, 191, + 192, 203, 204, 205, 207, 210, 211, 212, 213, 214, + 217, 219, 220, 221, 223, 224, 233, 236, 243, 244, + 245, 246, 247, 248, 250, 253, 254, 255, 256, 263, + 267, 273, 274, 290, 293, 0, 0, 0, 0, 119, + 170, 188, 249, 252, 258, 265, 320, 321, 322, 238, + 175, 176, 0, 0, 0, 0, 518, 231, 201, 167, + 574, 590, 578, 0, 535, 593, 507, 525, 601, 526, + 529, 565, 493, 547, 209, 523, 0, 511, 488, 519, + 489, 509, 537, 146, 177, 142, 580, 550, 592, 179, + 0, 512, 599, 181, 556, 0, 261, 196, 0, 0, + 0, 539, 582, 545, 575, 534, 566, 498, 555, 594, + 524, 563, 595, 0, 0, 0, 112, 113, 114, 0, + 0, 0, 0, 0, 0, 0, 0, 135, 0, 560, + 589, 521, 562, 564, 603, 487, 557, 0, 491, 494, + 600, 585, 515, 516, 0, 0, 0, 0, 0, 0, + 0, 538, 546, 571, 532, 0, 0, 0, 0, 0, + 0, 1131, 0, 513, 0, 554, 0, 0, 0, 495, + 492, 0, 0, 0, 0, 536, 0, 0, 0, 497, + 0, 514, 572, 0, 485, 155, 577, 584, 533, 323, + 588, 531, 530, 591, 228, 0, 266, 158, 178, 131, + 172, 116, 127, 0, 157, 206, 237, 242, 581, 510, + 520, 139, 517, 240, 216, 282, 553, 218, 239, 182, + 272, 229, 281, 291, 292, 269, 289, 296, 257, 120, + 268, 280, 136, 251, 0, 0, 0, 122, 278, 264, + 194, 168, 169, 121, 0, 235, 145, 153, 141, 208, + 275, 276, 140, 299, 128, 288, 124, 129, 287, 202, + 271, 279, 195, 187, 123, 277, 193, 186, 174, 149, + 160, 226, 184, 227, 161, 198, 197, 199, 0, 490, + 0, 262, 285, 300, 133, 506, 270, 294, 295, 0, + 230, 134, 154, 148, 225, 152, 200, 130, 163, 259, + 173, 183, 234, 298, 215, 241, 137, 284, 260, 502, + 505, 500, 501, 548, 549, 596, 597, 598, 573, 496, + 0, 503, 504, 0, 579, 586, 587, 552, 115, 125, + 180, 297, 232, 151, 567, 0, 0, 283, 222, 156, + 0, 0, 570, 286, 486, 499, 144, 508, 0, 0, + 522, 527, 528, 540, 541, 542, 543, 544, 551, 558, + 559, 561, 568, 569, 576, 583, 602, 117, 118, 126, + 132, 138, 143, 147, 150, 159, 162, 164, 165, 166, + 171, 185, 189, 190, 191, 192, 203, 204, 205, 207, + 210, 211, 212, 213, 214, 217, 219, 220, 221, 223, + 224, 233, 236, 243, 244, 245, 246, 247, 248, 250, + 253, 254, 255, 256, 263, 267, 273, 274, 290, 293, + 0, 0, 0, 0, 119, 170, 188, 249, 252, 258, + 265, 320, 321, 322, 238, 175, 176, 0, 0, 0, + 0, 518, 231, 201, 167, 574, 590, 578, 0, 535, + 593, 507, 525, 601, 526, 529, 565, 493, 547, 209, + 523, 0, 511, 488, 519, 489, 509, 537, 146, 177, + 142, 580, 550, 592, 179, 0, 512, 599, 181, 556, + 0, 261, 196, 0, 0, 0, 539, 582, 545, 575, + 534, 566, 498, 555, 594, 524, 563, 595, 0, 0, + 0, 112, 113, 114, 0, 0, 0, 0, 0, 0, + 0, 0, 135, 0, 560, 589, 521, 562, 564, 603, + 487, 557, 0, 491, 494, 600, 585, 515, 516, 0, + 0, 0, 0, 0, 0, 0, 538, 546, 571, 532, + 0, 0, 0, 0, 0, 0, 1092, 0, 513, 0, + 554, 0, 0, 0, 495, 492, 0, 0, 0, 0, + 536, 0, 0, 0, 497, 0, 514, 572, 0, 485, + 155, 577, 584, 533, 323, 588, 531, 530, 591, 228, + 0, 266, 158, 178, 131, 172, 116, 127, 0, 157, + 206, 237, 242, 581, 510, 520, 139, 517, 240, 216, + 282, 553, 218, 239, 182, 272, 229, 281, 291, 292, + 269, 289, 296, 257, 120, 268, 280, 136, 251, 0, + 0, 0, 122, 278, 264, 194, 168, 169, 121, 0, + 235, 145, 153, 141, 208, 275, 276, 140, 299, 128, + 288, 124, 129, 287, 202, 271, 279, 195, 187, 123, + 277, 193, 186, 174, 149, 160, 226, 184, 227, 161, + 198, 197, 199, 0, 490, 0, 262, 285, 300, 133, + 506, 270, 294, 295, 0, 230, 134, 154, 148, 225, + 152, 200, 130, 163, 259, 173, 183, 234, 298, 215, + 241, 137, 284, 260, 502, 505, 500, 501, 548, 549, + 596, 597, 598, 573, 496, 0, 503, 504, 0, 579, + 586, 587, 552, 115, 125, 180, 297, 232, 151, 567, + 0, 0, 283, 222, 156, 0, 0, 570, 286, 486, + 499, 144, 508, 0, 0, 522, 527, 528, 540, 541, + 542, 543, 544, 551, 558, 559, 561, 568, 569, 576, + 583, 602, 117, 118, 126, 132, 138, 143, 147, 150, + 159, 162, 164, 165, 166, 171, 185, 189, 190, 191, + 192, 203, 204, 205, 207, 210, 211, 212, 213, 214, + 217, 219, 220, 221, 223, 224, 233, 236, 243, 244, + 245, 246, 247, 248, 250, 253, 254, 255, 256, 263, + 267, 273, 274, 290, 293, 0, 0, 0, 0, 119, + 170, 188, 249, 252, 258, 265, 320, 321, 322, 238, + 175, 176, 0, 0, 0, 0, 518, 231, 201, 167, + 574, 590, 578, 0, 535, 593, 507, 525, 601, 526, + 529, 565, 493, 547, 209, 523, 0, 511, 488, 519, + 489, 509, 537, 146, 177, 142, 580, 550, 592, 179, + 0, 512, 599, 181, 556, 0, 261, 196, 0, 0, + 0, 539, 582, 545, 575, 534, 566, 498, 555, 594, + 524, 563, 595, 0, 0, 0, 112, 113, 114, 0, + 0, 0, 0, 0, 0, 0, 0, 135, 0, 560, + 589, 521, 562, 564, 603, 487, 557, 0, 491, 494, + 600, 585, 515, 516, 0, 0, 0, 0, 0, 0, + 0, 538, 546, 571, 532, 0, 0, 0, 0, 0, + 0, 0, 0, 513, 0, 554, 0, 0, 0, 495, + 492, 0, 0, 0, 0, 536, 0, 0, 0, 497, + 0, 514, 572, 0, 485, 155, 577, 584, 533, 323, + 588, 531, 530, 591, 228, 0, 266, 158, 178, 131, + 172, 116, 127, 0, 157, 206, 237, 242, 581, 510, + 520, 139, 517, 240, 216, 282, 553, 218, 239, 182, + 272, 229, 281, 291, 292, 269, 289, 296, 257, 120, + 268, 280, 136, 251, 0, 0, 0, 122, 278, 264, + 194, 168, 169, 121, 0, 235, 145, 153, 141, 208, + 275, 276, 140, 299, 128, 288, 124, 129, 287, 202, + 271, 279, 195, 187, 123, 277, 193, 186, 174, 149, + 160, 226, 184, 227, 161, 198, 197, 199, 0, 490, + 0, 262, 285, 300, 133, 506, 270, 294, 295, 0, + 230, 134, 154, 148, 225, 152, 200, 130, 163, 259, + 173, 183, 234, 298, 215, 241, 137, 284, 260, 502, + 505, 500, 501, 548, 549, 596, 597, 598, 573, 496, + 0, 503, 504, 0, 579, 586, 587, 552, 115, 125, + 180, 297, 232, 151, 567, 0, 0, 283, 222, 156, + 0, 0, 570, 286, 486, 499, 144, 508, 0, 0, + 522, 527, 528, 540, 541, 542, 543, 544, 551, 558, + 559, 561, 568, 569, 576, 583, 602, 117, 118, 126, + 132, 138, 143, 147, 150, 159, 162, 164, 165, 166, + 171, 185, 189, 190, 191, 192, 203, 204, 205, 207, + 210, 211, 212, 213, 214, 217, 219, 220, 221, 223, + 224, 233, 236, 243, 244, 245, 246, 247, 248, 250, + 253, 254, 255, 256, 263, 267, 273, 274, 290, 293, + 0, 0, 0, 0, 119, 170, 188, 249, 252, 258, + 265, 320, 321, 322, 238, 175, 176, 0, 0, 0, + 0, 518, 231, 201, 167, 574, 590, 578, 0, 535, + 593, 507, 525, 601, 526, 529, 565, 493, 547, 209, + 523, 0, 511, 488, 519, 489, 509, 537, 146, 177, + 142, 580, 550, 592, 179, 0, 512, 599, 181, 556, + 0, 261, 196, 0, 0, 0, 539, 582, 545, 575, + 534, 566, 498, 555, 594, 524, 563, 595, 0, 0, + 0, 112, 113, 114, 0, 0, 0, 0, 0, 0, + 0, 0, 135, 0, 560, 589, 521, 562, 564, 603, + 487, 557, 0, 491, 494, 600, 585, 515, 516, 0, + 0, 0, 0, 0, 0, 0, 538, 546, 571, 532, + 0, 0, 0, 0, 0, 0, 0, 0, 513, 0, + 554, 0, 0, 0, 495, 492, 0, 0, 0, 0, + 536, 0, 0, 0, 497, 0, 514, 572, 0, 485, + 155, 577, 584, 533, 323, 588, 531, 530, 591, 228, + 0, 266, 158, 178, 131, 172, 116, 127, 0, 157, + 206, 237, 242, 581, 510, 520, 139, 517, 240, 216, + 282, 553, 218, 239, 182, 272, 229, 281, 291, 292, + 269, 289, 296, 257, 120, 268, 280, 136, 251, 0, + 0, 0, 122, 278, 264, 194, 168, 169, 121, 0, + 235, 145, 153, 141, 208, 275, 276, 140, 299, 128, + 288, 124, 483, 287, 202, 271, 279, 195, 187, 123, + 277, 193, 186, 174, 149, 160, 226, 184, 227, 161, + 198, 197, 199, 0, 490, 0, 262, 285, 300, 133, + 506, 270, 294, 295, 0, 230, 134, 154, 148, 225, + 152, 484, 482, 163, 259, 173, 183, 234, 298, 215, + 241, 137, 284, 260, 502, 505, 500, 501, 548, 549, + 596, 597, 598, 573, 496, 0, 503, 504, 0, 579, + 586, 587, 552, 115, 125, 180, 297, 232, 151, 567, + 0, 0, 283, 222, 156, 0, 0, 570, 286, 486, + 499, 144, 508, 0, 0, 522, 527, 528, 540, 541, + 542, 543, 544, 551, 558, 559, 561, 568, 569, 576, + 583, 602, 117, 118, 126, 132, 138, 143, 147, 150, + 159, 162, 164, 165, 166, 171, 185, 189, 190, 191, + 192, 203, 204, 205, 207, 210, 211, 212, 213, 214, + 217, 219, 220, 221, 223, 224, 233, 236, 243, 244, + 245, 246, 247, 248, 250, 253, 254, 255, 256, 263, + 267, 273, 274, 290, 293, 0, 0, 0, 0, 119, + 170, 188, 249, 252, 258, 265, 320, 321, 322, 238, + 175, 176, 0, 0, 0, 0, 518, 231, 201, 167, + 574, 590, 578, 0, 535, 593, 507, 525, 601, 526, + 529, 565, 493, 547, 209, 523, 0, 511, 488, 519, + 489, 509, 537, 146, 177, 142, 580, 550, 592, 179, + 0, 512, 599, 181, 556, 0, 261, 196, 0, 0, + 0, 539, 582, 545, 575, 534, 566, 498, 555, 594, + 524, 563, 595, 0, 0, 0, 112, 113, 114, 0, + 0, 0, 0, 0, 0, 0, 0, 135, 0, 560, + 589, 521, 562, 564, 603, 487, 557, 0, 491, 494, + 600, 585, 515, 516, 0, 0, 0, 0, 0, 0, + 0, 538, 546, 571, 532, 0, 0, 0, 0, 0, + 0, 0, 0, 513, 0, 554, 0, 0, 0, 495, + 492, 0, 0, 0, 0, 536, 0, 0, 0, 497, + 0, 514, 572, 0, 485, 155, 577, 584, 533, 323, + 588, 531, 530, 591, 228, 0, 266, 158, 178, 131, + 172, 116, 127, 0, 157, 206, 237, 242, 581, 510, + 520, 139, 517, 240, 216, 282, 553, 218, 239, 182, + 272, 229, 281, 291, 292, 269, 289, 296, 257, 120, + 268, 842, 136, 251, 0, 0, 0, 122, 278, 264, + 194, 168, 169, 121, 0, 235, 145, 153, 141, 208, + 275, 276, 140, 299, 128, 288, 124, 483, 287, 202, + 271, 279, 195, 187, 123, 277, 193, 186, 174, 149, + 160, 226, 184, 227, 161, 198, 197, 199, 0, 490, + 0, 262, 285, 300, 133, 506, 270, 294, 295, 0, + 230, 134, 154, 148, 225, 152, 484, 482, 163, 259, + 173, 183, 234, 298, 215, 241, 137, 284, 260, 502, + 505, 500, 501, 548, 549, 596, 597, 598, 573, 496, + 0, 503, 504, 0, 579, 586, 587, 552, 115, 125, + 180, 297, 232, 151, 567, 0, 0, 283, 222, 156, + 0, 0, 570, 286, 486, 499, 144, 508, 0, 0, + 522, 527, 528, 540, 541, 542, 543, 544, 551, 558, + 559, 561, 568, 569, 576, 583, 602, 117, 118, 126, + 132, 138, 143, 147, 150, 159, 162, 164, 165, 166, + 171, 185, 189, 190, 191, 192, 203, 204, 205, 207, + 210, 211, 212, 213, 214, 217, 219, 220, 221, 223, + 224, 233, 236, 243, 244, 245, 246, 247, 248, 250, + 253, 254, 255, 256, 263, 267, 273, 274, 290, 293, + 0, 0, 0, 0, 119, 170, 188, 249, 252, 258, + 265, 320, 321, 322, 238, 175, 176, 0, 0, 0, + 0, 518, 231, 201, 167, 574, 590, 578, 0, 535, + 593, 507, 525, 601, 526, 529, 565, 493, 547, 209, + 523, 0, 511, 488, 519, 489, 509, 537, 146, 177, + 142, 580, 550, 592, 179, 0, 512, 599, 181, 556, + 0, 261, 196, 0, 0, 0, 539, 582, 545, 575, + 534, 566, 498, 555, 594, 524, 563, 595, 0, 0, + 0, 112, 113, 114, 0, 0, 0, 0, 0, 0, + 0, 0, 135, 0, 560, 589, 521, 562, 564, 603, + 487, 557, 0, 491, 494, 600, 585, 515, 516, 0, + 0, 0, 0, 0, 0, 0, 538, 546, 571, 532, + 0, 0, 0, 0, 0, 0, 0, 0, 513, 0, + 554, 0, 0, 0, 495, 492, 0, 0, 0, 0, + 536, 0, 0, 0, 497, 0, 514, 572, 0, 485, + 155, 577, 584, 533, 323, 588, 531, 530, 591, 228, + 0, 266, 158, 178, 131, 172, 116, 127, 0, 157, + 206, 237, 242, 581, 510, 520, 139, 517, 240, 216, + 282, 553, 218, 239, 182, 272, 229, 281, 291, 292, + 269, 289, 296, 257, 120, 268, 474, 136, 251, 0, + 0, 0, 122, 278, 264, 194, 168, 169, 121, 0, + 235, 145, 153, 141, 208, 275, 276, 140, 299, 128, + 288, 124, 483, 287, 202, 271, 279, 195, 187, 123, + 277, 193, 186, 174, 149, 160, 226, 184, 227, 161, + 198, 197, 199, 0, 490, 0, 262, 285, 300, 133, + 506, 270, 294, 295, 0, 230, 134, 154, 148, 225, + 152, 484, 482, 477, 476, 173, 183, 234, 298, 215, + 241, 137, 284, 260, 502, 505, 500, 501, 548, 549, + 596, 597, 598, 573, 496, 0, 503, 504, 0, 579, + 586, 587, 552, 115, 125, 180, 297, 232, 151, 567, + 0, 0, 283, 222, 156, 0, 0, 570, 286, 486, + 499, 144, 508, 0, 0, 522, 527, 528, 540, 541, + 542, 543, 544, 551, 558, 559, 561, 568, 569, 576, + 583, 602, 117, 118, 126, 132, 138, 143, 147, 150, + 159, 162, 164, 165, 166, 171, 185, 189, 190, 191, + 192, 203, 204, 205, 207, 210, 211, 212, 213, 214, + 217, 219, 220, 221, 223, 224, 233, 236, 243, 244, + 245, 246, 247, 248, 250, 253, 254, 255, 256, 263, + 267, 273, 274, 290, 293, 0, 0, 0, 0, 119, + 170, 188, 249, 252, 258, 265, 320, 321, 322, 238, + 175, 176, 0, 0, 0, 0, 518, 231, 201, 167, + 574, 209, 0, 0, 1015, 0, 368, 0, 0, 0, + 146, 177, 367, 0, 0, 0, 179, 0, 1016, 411, + 181, 0, 0, 261, 196, 0, 0, 0, 0, 0, + 402, 403, 0, 0, 0, 0, 0, 0, 0, 0, + 71, 0, 0, 112, 113, 114, 389, 388, 391, 392, + 393, 394, 0, 0, 135, 390, 395, 396, 397, 0, + 0, 0, 0, 365, 382, 0, 410, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 379, 380, 461, 0, + 0, 0, 425, 0, 381, 0, 0, 374, 375, 377, + 376, 378, 383, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 155, 424, 0, 0, 323, 0, 0, 422, + 0, 228, 0, 266, 158, 178, 131, 172, 116, 127, + 0, 157, 206, 237, 242, 0, 0, 0, 139, 0, + 240, 216, 282, 0, 218, 239, 182, 272, 229, 281, + 291, 292, 269, 289, 296, 257, 120, 268, 280, 136, + 251, 0, 0, 0, 122, 278, 264, 194, 168, 169, + 121, 0, 235, 145, 153, 141, 208, 275, 276, 140, + 299, 128, 288, 124, 129, 287, 202, 271, 279, 195, + 187, 123, 277, 193, 186, 174, 149, 160, 226, 184, + 227, 161, 198, 197, 199, 0, 0, 0, 262, 285, + 300, 133, 0, 270, 294, 295, 0, 230, 134, 154, + 148, 225, 152, 200, 130, 163, 259, 173, 183, 234, + 298, 215, 241, 137, 284, 260, 412, 423, 418, 419, + 416, 417, 415, 414, 413, 426, 404, 405, 406, 407, + 409, 0, 420, 421, 408, 115, 125, 180, 297, 232, + 151, 0, 0, 0, 283, 222, 156, 0, 0, 0, + 286, 0, 0, 144, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 117, 118, 126, 132, 138, 143, + 147, 150, 159, 162, 164, 165, 166, 171, 185, 189, + 190, 191, 192, 203, 204, 205, 207, 210, 211, 212, + 213, 214, 217, 219, 220, 221, 223, 224, 233, 236, + 243, 244, 245, 246, 247, 248, 250, 253, 254, 255, + 256, 263, 267, 273, 274, 290, 293, 0, 0, 0, + 0, 119, 170, 188, 249, 252, 258, 265, 320, 321, + 322, 238, 175, 176, 0, 0, 0, 209, 0, 231, + 201, 167, 368, 0, 0, 0, 146, 177, 367, 0, + 0, 0, 179, 0, 0, 411, 181, 0, 0, 261, + 196, 0, 0, 0, 0, 0, 402, 403, 0, 0, + 0, 0, 0, 0, 1141, 0, 71, 0, 0, 112, + 113, 114, 389, 388, 391, 392, 393, 394, 0, 0, + 135, 390, 395, 396, 397, 1142, 0, 0, 0, 365, + 382, 0, 410, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 379, 380, 0, 0, 0, 0, 425, 0, + 381, 0, 0, 374, 375, 377, 376, 378, 383, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 155, 424, + 0, 0, 323, 0, 0, 422, 0, 228, 0, 266, + 158, 178, 131, 172, 116, 127, 0, 157, 206, 237, + 242, 0, 0, 0, 139, 0, 240, 216, 282, 0, + 218, 239, 182, 272, 229, 281, 291, 292, 269, 289, + 296, 257, 120, 268, 280, 136, 251, 0, 0, 0, + 122, 278, 264, 194, 168, 169, 121, 0, 235, 145, + 153, 141, 208, 275, 276, 140, 299, 128, 288, 124, + 129, 287, 202, 271, 279, 195, 187, 123, 277, 193, + 186, 174, 149, 160, 226, 184, 227, 161, 198, 197, + 199, 0, 0, 0, 262, 285, 300, 133, 0, 270, + 294, 295, 0, 230, 134, 154, 148, 225, 152, 200, + 130, 163, 259, 173, 183, 234, 298, 215, 241, 137, + 284, 260, 412, 423, 418, 419, 416, 417, 415, 414, + 413, 426, 404, 405, 406, 407, 409, 0, 420, 421, + 408, 115, 125, 180, 297, 232, 151, 0, 0, 0, + 283, 222, 156, 0, 0, 0, 286, 0, 0, 144, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 117, 118, 126, 132, 138, 143, 147, 150, 159, 162, + 164, 165, 166, 171, 185, 189, 190, 191, 192, 203, + 204, 205, 207, 210, 211, 212, 213, 214, 217, 219, + 220, 221, 223, 224, 233, 236, 243, 244, 245, 246, + 247, 248, 250, 253, 254, 255, 256, 263, 267, 273, + 274, 290, 293, 0, 0, 0, 0, 119, 170, 188, + 249, 252, 258, 265, 320, 321, 322, 238, 175, 176, + 0, 0, 0, 209, 0, 231, 201, 167, 368, 0, + 0, 0, 146, 177, 367, 0, 0, 0, 179, 0, + 0, 411, 181, 0, 0, 261, 196, 0, 0, 0, + 0, 0, 402, 403, 0, 0, 0, 0, 0, 0, + 1138, 0, 71, 0, 0, 112, 113, 114, 389, 388, + 391, 392, 393, 394, 0, 0, 135, 390, 395, 396, + 397, 1139, 0, 0, 0, 365, 382, 0, 410, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 379, 380, + 0, 0, 0, 0, 425, 0, 381, 0, 0, 374, + 375, 377, 376, 378, 383, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 155, 424, 0, 0, 323, 0, + 0, 422, 0, 228, 0, 266, 158, 178, 131, 172, + 116, 127, 0, 157, 206, 237, 242, 0, 0, 0, + 139, 0, 240, 216, 282, 0, 218, 239, 182, 272, + 229, 281, 291, 292, 269, 289, 296, 257, 120, 268, + 280, 136, 251, 0, 0, 0, 122, 278, 264, 194, + 168, 169, 121, 0, 235, 145, 153, 141, 208, 275, + 276, 140, 299, 128, 288, 124, 129, 287, 202, 271, + 279, 195, 187, 123, 277, 193, 186, 174, 149, 160, + 226, 184, 227, 161, 198, 197, 199, 0, 0, 0, + 262, 285, 300, 133, 0, 270, 294, 295, 0, 230, + 134, 154, 148, 225, 152, 200, 130, 163, 259, 173, + 183, 234, 298, 215, 241, 137, 284, 260, 412, 423, + 418, 419, 416, 417, 415, 414, 413, 426, 404, 405, + 406, 407, 409, 0, 420, 421, 408, 115, 125, 180, + 297, 232, 151, 0, 0, 0, 283, 222, 156, 0, + 0, 0, 286, 0, 0, 144, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 117, 118, 126, 132, + 138, 143, 147, 150, 159, 162, 164, 165, 166, 171, + 185, 189, 190, 191, 192, 203, 204, 205, 207, 210, + 211, 212, 213, 214, 217, 219, 220, 221, 223, 224, + 233, 236, 243, 244, 245, 246, 247, 248, 250, 253, + 254, 255, 256, 263, 267, 273, 274, 290, 293, 0, + 0, 0, 0, 119, 170, 188, 249, 252, 258, 265, + 320, 321, 322, 238, 175, 176, 0, 0, 0, 209, + 0, 231, 201, 167, 368, 0, 0, 0, 146, 177, + 367, 0, 0, 0, 179, 0, 0, 411, 181, 0, + 0, 261, 196, 0, 0, 0, 0, 0, 402, 403, + 0, 0, 0, 0, 0, 0, 0, 0, 71, 0, + 1023, 112, 113, 114, 389, 388, 391, 392, 393, 394, + 0, 0, 135, 390, 395, 396, 397, 0, 0, 0, + 0, 365, 382, 0, 410, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 379, 380, 0, 0, 0, 0, + 425, 0, 381, 0, 0, 374, 375, 377, 376, 378, + 383, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 155, 424, 0, 0, 323, 0, 0, 422, 0, 228, + 0, 266, 158, 178, 131, 172, 116, 127, 0, 157, + 206, 237, 242, 0, 0, 0, 139, 0, 240, 216, + 282, 0, 218, 239, 182, 272, 229, 281, 291, 292, + 269, 289, 296, 257, 120, 268, 280, 136, 251, 0, + 0, 0, 122, 278, 264, 194, 168, 169, 121, 0, + 235, 145, 153, 141, 208, 275, 276, 140, 299, 128, + 288, 124, 129, 287, 202, 271, 279, 195, 187, 123, + 277, 193, 186, 174, 149, 160, 226, 184, 227, 161, + 198, 197, 199, 0, 0, 0, 262, 285, 300, 133, + 0, 270, 294, 295, 0, 230, 134, 154, 148, 225, + 152, 200, 130, 163, 259, 173, 183, 234, 298, 215, + 241, 137, 284, 260, 412, 423, 418, 419, 416, 417, + 415, 414, 413, 426, 404, 405, 406, 407, 409, 0, + 420, 421, 408, 115, 125, 180, 297, 232, 151, 0, + 0, 0, 283, 222, 156, 0, 0, 0, 286, 0, + 0, 144, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 117, 118, 126, 132, 138, 143, 147, 150, + 159, 162, 164, 165, 166, 171, 185, 189, 190, 191, + 192, 203, 204, 205, 207, 210, 211, 212, 213, 214, + 217, 219, 220, 221, 223, 224, 233, 236, 243, 244, + 245, 246, 247, 248, 250, 253, 254, 255, 256, 263, + 267, 273, 274, 290, 293, 0, 0, 0, 0, 119, + 170, 188, 249, 252, 258, 265, 320, 321, 322, 238, + 175, 176, 0, 0, 0, 209, 0, 231, 201, 167, + 368, 0, 0, 0, 146, 177, 367, 0, 0, 0, + 179, 0, 0, 411, 181, 0, 0, 261, 196, 0, + 0, 0, 0, 0, 402, 403, 0, 0, 0, 0, + 0, 0, 0, 0, 71, 0, 0, 112, 113, 114, + 389, 388, 391, 392, 393, 394, 0, 0, 135, 390, + 395, 396, 397, 0, 0, 0, 0, 365, 382, 0, + 410, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 379, 380, 461, 0, 0, 0, 425, 0, 381, 0, + 0, 374, 375, 377, 376, 378, 383, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 155, 424, 0, 0, + 323, 0, 0, 422, 0, 228, 0, 266, 158, 178, + 131, 172, 116, 127, 0, 157, 206, 237, 242, 0, + 0, 0, 139, 0, 240, 216, 282, 0, 218, 239, + 182, 272, 229, 281, 291, 292, 269, 289, 296, 257, + 120, 268, 280, 136, 251, 0, 0, 0, 122, 278, + 264, 194, 168, 169, 121, 0, 235, 145, 153, 141, + 208, 275, 276, 140, 299, 128, 288, 124, 129, 287, + 202, 271, 279, 195, 187, 123, 277, 193, 186, 174, + 149, 160, 226, 184, 227, 161, 198, 197, 199, 0, + 0, 0, 262, 285, 300, 133, 0, 270, 294, 295, + 0, 230, 134, 154, 148, 225, 152, 200, 130, 163, + 259, 173, 183, 234, 298, 215, 241, 137, 284, 260, + 412, 423, 418, 419, 416, 417, 415, 414, 413, 426, + 404, 405, 406, 407, 409, 0, 420, 421, 408, 115, + 125, 180, 297, 232, 151, 0, 0, 0, 283, 222, + 156, 0, 0, 0, 286, 0, 0, 144, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 117, 118, + 126, 132, 138, 143, 147, 150, 159, 162, 164, 165, + 166, 171, 185, 189, 190, 191, 192, 203, 204, 205, + 207, 210, 211, 212, 213, 214, 217, 219, 220, 221, + 223, 224, 233, 236, 243, 244, 245, 246, 247, 248, + 250, 253, 254, 255, 256, 263, 267, 273, 274, 290, + 293, 0, 0, 0, 0, 119, 170, 188, 249, 252, + 258, 265, 320, 321, 322, 238, 175, 176, 0, 0, + 0, 209, 0, 231, 201, 167, 368, 0, 0, 0, + 146, 177, 367, 0, 0, 0, 179, 0, 0, 411, + 181, 0, 0, 261, 196, 0, 0, 0, 0, 0, + 402, 403, 0, 0, 0, 0, 0, 0, 0, 0, + 71, 0, 0, 112, 113, 114, 389, 1034, 391, 392, + 393, 394, 0, 0, 135, 390, 395, 396, 397, 0, + 0, 0, 0, 365, 382, 0, 410, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 379, 380, 461, 0, + 0, 0, 425, 0, 381, 0, 0, 374, 375, 377, + 376, 378, 383, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 155, 424, 0, 0, 323, 0, 0, 422, + 0, 228, 0, 266, 158, 178, 131, 172, 116, 127, + 0, 157, 206, 237, 242, 0, 0, 0, 139, 0, + 240, 216, 282, 0, 218, 239, 182, 272, 229, 281, + 291, 292, 269, 289, 296, 257, 120, 268, 280, 136, + 251, 0, 0, 0, 122, 278, 264, 194, 168, 169, + 121, 0, 235, 145, 153, 141, 208, 275, 276, 140, + 299, 128, 288, 124, 129, 287, 202, 271, 279, 195, + 187, 123, 277, 193, 186, 174, 149, 160, 226, 184, + 227, 161, 198, 197, 199, 0, 0, 0, 262, 285, + 300, 133, 0, 270, 294, 295, 0, 230, 134, 154, + 148, 225, 152, 200, 130, 163, 259, 173, 183, 234, + 298, 215, 241, 137, 284, 260, 412, 423, 418, 419, + 416, 417, 415, 414, 413, 426, 404, 405, 406, 407, + 409, 0, 420, 421, 408, 115, 125, 180, 297, 232, + 151, 0, 0, 0, 283, 222, 156, 0, 0, 0, + 286, 0, 0, 144, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 117, 118, 126, 132, 138, 143, + 147, 150, 159, 162, 164, 165, 166, 171, 185, 189, + 190, 191, 192, 203, 204, 205, 207, 210, 211, 212, + 213, 214, 217, 219, 220, 221, 223, 224, 233, 236, + 243, 244, 245, 246, 247, 248, 250, 253, 254, 255, + 256, 263, 267, 273, 274, 290, 293, 0, 0, 0, + 0, 119, 170, 188, 249, 252, 258, 265, 320, 321, + 322, 238, 175, 176, 0, 0, 0, 209, 0, 231, + 201, 167, 368, 0, 0, 0, 146, 177, 367, 0, + 0, 0, 179, 0, 0, 411, 181, 0, 0, 261, + 196, 0, 0, 0, 0, 0, 402, 403, 0, 0, + 0, 0, 0, 0, 0, 0, 71, 0, 0, 112, + 113, 114, 389, 1031, 391, 392, 393, 394, 0, 0, + 135, 390, 395, 396, 397, 0, 0, 0, 0, 365, + 382, 0, 410, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 379, 380, 461, 0, 0, 0, 425, 0, + 381, 0, 0, 374, 375, 377, 376, 378, 383, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 155, 424, + 0, 0, 323, 0, 0, 422, 0, 228, 0, 266, + 158, 178, 131, 172, 116, 127, 0, 157, 206, 237, + 242, 0, 0, 0, 139, 0, 240, 216, 282, 0, + 218, 239, 182, 272, 229, 281, 291, 292, 269, 289, + 296, 257, 120, 268, 280, 136, 251, 0, 0, 0, + 122, 278, 264, 194, 168, 169, 121, 0, 235, 145, + 153, 141, 208, 275, 276, 140, 299, 128, 288, 124, + 129, 287, 202, 271, 279, 195, 187, 123, 277, 193, + 186, 174, 149, 160, 226, 184, 227, 161, 198, 197, + 199, 0, 0, 0, 262, 285, 300, 133, 0, 270, + 294, 295, 0, 230, 134, 154, 148, 225, 152, 200, + 130, 163, 259, 173, 183, 234, 298, 215, 241, 137, + 284, 260, 412, 423, 418, 419, 416, 417, 415, 414, + 413, 426, 404, 405, 406, 407, 409, 0, 420, 421, + 408, 115, 125, 180, 297, 232, 151, 0, 0, 0, + 283, 222, 156, 0, 0, 0, 286, 0, 0, 144, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 117, 118, 126, 132, 138, 143, 147, 150, 159, 162, + 164, 165, 166, 171, 185, 189, 190, 191, 192, 203, + 204, 205, 207, 210, 211, 212, 213, 214, 217, 219, + 220, 221, 223, 224, 233, 236, 243, 244, 245, 246, + 247, 248, 250, 253, 254, 255, 256, 263, 267, 273, + 274, 290, 293, 0, 0, 0, 0, 119, 170, 188, + 249, 252, 258, 265, 320, 321, 322, 238, 175, 176, + 443, 0, 0, 0, 0, 231, 201, 167, 0, 0, + 0, 0, 209, 0, 0, 0, 0, 368, 0, 0, + 0, 146, 177, 367, 0, 0, 0, 179, 0, 0, + 411, 181, 0, 0, 261, 196, 0, 0, 0, 0, + 0, 402, 403, 0, 0, 0, 0, 0, 0, 0, + 0, 71, 0, 0, 112, 113, 114, 389, 388, 391, + 392, 393, 394, 0, 0, 135, 390, 395, 396, 397, + 0, 0, 0, 0, 365, 382, 0, 410, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 379, 380, 0, + 0, 0, 0, 425, 0, 381, 0, 0, 374, 375, + 377, 376, 378, 383, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 155, 424, 0, 0, 323, 0, 0, + 422, 0, 228, 0, 266, 158, 178, 131, 172, 116, + 127, 0, 157, 206, 237, 242, 0, 0, 0, 139, + 0, 240, 216, 282, 0, 218, 239, 182, 272, 229, + 281, 291, 292, 269, 289, 296, 257, 120, 268, 280, + 136, 251, 0, 0, 0, 122, 278, 264, 194, 168, + 169, 121, 0, 235, 145, 153, 141, 208, 275, 276, + 140, 299, 128, 288, 124, 129, 287, 202, 271, 279, + 195, 187, 123, 277, 193, 186, 174, 149, 160, 226, + 184, 227, 161, 198, 197, 199, 0, 0, 0, 262, + 285, 300, 133, 0, 270, 294, 295, 0, 230, 134, + 154, 148, 225, 152, 200, 130, 163, 259, 173, 183, + 234, 298, 215, 241, 137, 284, 260, 412, 423, 418, + 419, 416, 417, 415, 414, 413, 426, 404, 405, 406, + 407, 409, 0, 420, 421, 408, 115, 125, 180, 742, + 232, 151, 0, 0, 0, 283, 222, 156, 0, 0, + 0, 286, 0, 0, 144, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 117, 118, 126, 132, 138, + 143, 147, 150, 159, 162, 164, 165, 166, 171, 185, + 189, 190, 191, 192, 203, 204, 205, 207, 210, 211, + 212, 213, 214, 217, 219, 220, 221, 223, 224, 233, + 236, 243, 244, 245, 246, 247, 248, 250, 253, 254, + 255, 256, 263, 267, 273, 274, 290, 293, 0, 0, + 0, 0, 119, 170, 188, 249, 252, 258, 265, 320, + 321, 322, 238, 175, 176, 209, 0, 0, 0, 0, + 231, 201, 167, 0, 146, 177, 142, 0, 0, 0, + 179, 0, 0, 411, 181, 0, 0, 261, 196, 0, + 0, 0, 0, 0, 402, 403, 0, 0, 0, 0, + 0, 0, 0, 0, 71, 0, 0, 112, 113, 114, + 389, 388, 391, 392, 393, 394, 0, 0, 135, 390, + 395, 396, 397, 0, 0, 0, 0, 0, 382, 1783, + 410, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 379, 380, 0, 0, 0, 0, 425, 0, 381, 0, + 0, 374, 375, 377, 376, 378, 383, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 155, 424, 0, 0, + 323, 0, 0, 422, 0, 228, 0, 266, 158, 178, + 131, 172, 116, 127, 0, 157, 206, 237, 242, 0, + 0, 0, 139, 0, 240, 216, 282, 0, 218, 239, + 182, 272, 229, 281, 291, 292, 269, 289, 296, 257, + 120, 268, 280, 136, 251, 0, 0, 0, 122, 278, + 264, 194, 168, 169, 121, 0, 235, 145, 153, 141, + 208, 275, 276, 140, 299, 128, 288, 124, 129, 287, + 202, 271, 279, 195, 187, 123, 277, 193, 186, 174, + 149, 160, 226, 184, 227, 161, 198, 197, 199, 0, + 0, 0, 262, 285, 300, 133, 0, 270, 294, 295, + 0, 230, 134, 154, 148, 225, 152, 200, 130, 163, + 259, 173, 183, 234, 298, 215, 241, 137, 284, 260, + 412, 423, 418, 419, 416, 417, 415, 414, 413, 426, + 404, 405, 406, 407, 409, 0, 420, 421, 408, 115, + 125, 180, 297, 232, 151, 0, 0, 0, 1779, 222, + 156, 1780, 0, 0, 286, 0, 0, 144, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 117, 118, + 126, 132, 138, 143, 147, 150, 159, 162, 164, 165, + 166, 171, 185, 189, 190, 191, 192, 203, 204, 205, + 207, 210, 211, 212, 213, 214, 217, 219, 220, 221, + 223, 224, 233, 236, 243, 244, 245, 246, 247, 248, + 250, 253, 254, 255, 256, 263, 267, 273, 274, 290, + 293, 0, 0, 0, 0, 119, 170, 188, 249, 252, + 258, 265, 320, 321, 322, 238, 175, 176, 209, 0, + 0, 0, 0, 231, 201, 167, 0, 146, 177, 142, + 0, 0, 0, 179, 0, 0, 411, 181, 0, 0, + 261, 196, 0, 0, 0, 0, 0, 402, 403, 0, + 0, 0, 0, 0, 0, 0, 0, 71, 0, 0, + 112, 113, 114, 389, 388, 391, 392, 393, 394, 0, + 0, 135, 390, 395, 396, 397, 0, 0, 0, 0, + 0, 382, 1778, 410, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 379, 380, 0, 0, 0, 0, 425, + 0, 381, 0, 0, 374, 375, 377, 376, 378, 383, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 155, + 424, 0, 0, 323, 0, 0, 422, 0, 228, 0, + 266, 158, 178, 131, 172, 116, 127, 0, 157, 206, + 237, 242, 0, 0, 0, 139, 0, 240, 216, 282, + 0, 218, 239, 182, 272, 229, 281, 291, 292, 269, + 289, 296, 257, 120, 268, 280, 136, 251, 0, 0, + 0, 122, 278, 264, 194, 168, 169, 121, 0, 235, + 145, 153, 141, 208, 275, 276, 140, 299, 128, 288, + 124, 129, 287, 202, 271, 279, 195, 187, 123, 277, + 193, 186, 174, 149, 160, 226, 184, 227, 161, 198, + 197, 199, 0, 0, 0, 262, 285, 300, 133, 0, + 270, 294, 295, 0, 230, 134, 154, 148, 225, 152, + 200, 130, 163, 259, 173, 183, 234, 298, 215, 241, + 137, 284, 260, 412, 423, 418, 419, 416, 417, 415, + 414, 413, 426, 404, 405, 406, 407, 409, 0, 420, + 421, 408, 115, 125, 180, 297, 232, 151, 0, 0, + 0, 1779, 222, 156, 1780, 0, 0, 286, 0, 0, + 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 117, 118, 126, 132, 138, 143, 147, 150, 159, + 162, 164, 165, 166, 171, 185, 189, 190, 191, 192, + 203, 204, 205, 207, 210, 211, 212, 213, 214, 217, + 219, 220, 221, 223, 224, 233, 236, 243, 244, 245, + 246, 247, 248, 250, 253, 254, 255, 256, 263, 267, + 273, 274, 290, 293, 0, 0, 0, 0, 119, 170, + 188, 249, 252, 258, 265, 320, 321, 322, 238, 175, + 176, 0, 0, 0, 209, 0, 231, 201, 167, 368, + 0, 0, 0, 146, 177, 367, 0, 0, 0, 179, + 0, 0, 411, 181, 0, 0, 261, 196, 0, 0, + 0, 0, 0, 402, 403, 0, 0, 0, 0, 0, + 0, 0, 0, 71, 0, 0, 112, 113, 114, 389, + 388, 391, 392, 393, 394, 0, 0, 135, 390, 395, + 396, 397, 0, 0, 0, 0, 365, 382, 0, 410, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 379, + 380, 0, 0, 0, 0, 425, 0, 381, 0, 0, + 374, 375, 377, 376, 378, 383, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 155, 424, 0, 0, 323, + 0, 0, 422, 0, 228, 0, 266, 158, 178, 131, + 172, 116, 127, 0, 157, 206, 237, 242, 0, 0, + 0, 139, 0, 240, 216, 282, 0, 218, 239, 182, + 272, 229, 281, 291, 292, 269, 289, 296, 257, 120, + 268, 280, 136, 251, 0, 0, 0, 122, 278, 264, + 194, 168, 169, 121, 0, 235, 145, 153, 141, 208, + 275, 276, 140, 299, 128, 288, 124, 129, 287, 202, + 271, 279, 195, 187, 123, 277, 193, 186, 174, 149, + 160, 226, 184, 227, 161, 198, 197, 199, 0, 0, + 0, 262, 285, 300, 133, 0, 270, 294, 295, 0, + 230, 134, 154, 148, 225, 152, 200, 130, 163, 259, + 173, 183, 234, 298, 215, 241, 137, 284, 260, 412, + 423, 418, 419, 416, 417, 415, 414, 413, 426, 404, + 405, 406, 407, 409, 0, 420, 421, 408, 115, 125, + 180, 297, 232, 151, 0, 0, 0, 283, 222, 156, + 0, 0, 0, 286, 0, 0, 144, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 117, 118, 126, + 132, 138, 143, 147, 150, 159, 162, 164, 165, 166, + 171, 185, 189, 190, 191, 192, 203, 204, 205, 207, + 210, 211, 212, 213, 214, 217, 219, 220, 221, 223, + 224, 233, 236, 243, 244, 245, 246, 247, 248, 250, + 253, 254, 255, 256, 263, 267, 273, 274, 290, 293, + 0, 0, 0, 0, 119, 170, 188, 249, 252, 258, + 265, 320, 321, 322, 238, 175, 176, 209, 0, 0, + 0, 0, 231, 201, 167, 0, 146, 177, 142, 0, + 0, 0, 179, 0, 0, 411, 181, 0, 0, 261, + 196, 0, 0, 0, 0, 0, 402, 403, 0, 0, + 0, 0, 0, 0, 0, 0, 71, 0, 0, 112, + 113, 114, 389, 388, 391, 392, 393, 394, 0, 0, + 135, 390, 395, 396, 397, 0, 0, 0, 0, 0, + 382, 0, 410, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 379, 380, 0, 0, 0, 0, 425, 0, + 381, 0, 0, 374, 375, 377, 376, 378, 383, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 155, 424, + 0, 0, 323, 0, 0, 422, 0, 228, 0, 266, + 158, 178, 131, 172, 116, 127, 0, 157, 206, 237, + 242, 0, 0, 0, 139, 0, 240, 216, 282, 1820, + 218, 239, 182, 272, 229, 281, 291, 292, 269, 289, + 296, 257, 120, 268, 280, 136, 251, 0, 0, 0, + 122, 278, 264, 194, 168, 169, 121, 0, 235, 145, + 153, 141, 208, 275, 276, 140, 299, 128, 288, 124, + 129, 287, 202, 271, 279, 195, 187, 123, 277, 193, + 186, 174, 149, 160, 226, 184, 227, 161, 198, 197, + 199, 0, 0, 0, 262, 285, 300, 133, 0, 270, + 294, 295, 0, 230, 134, 154, 148, 225, 152, 200, + 130, 163, 259, 173, 183, 234, 298, 215, 241, 137, + 284, 260, 412, 423, 418, 419, 416, 417, 415, 414, + 413, 426, 404, 405, 406, 407, 409, 0, 420, 421, + 408, 115, 125, 180, 297, 232, 151, 0, 0, 0, + 283, 222, 156, 0, 0, 0, 286, 0, 0, 144, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 117, 118, 126, 132, 138, 143, 147, 150, 159, 162, + 164, 165, 166, 171, 185, 189, 190, 191, 192, 203, + 204, 205, 207, 210, 211, 212, 213, 214, 217, 219, + 220, 221, 223, 224, 233, 236, 243, 244, 245, 246, + 247, 248, 250, 253, 254, 255, 256, 263, 267, 273, + 274, 290, 293, 0, 0, 0, 0, 119, 170, 188, + 249, 252, 258, 265, 320, 321, 322, 238, 175, 176, + 209, 0, 0, 0, 0, 231, 201, 167, 0, 146, + 177, 142, 0, 0, 0, 179, 0, 0, 411, 181, + 0, 0, 261, 196, 0, 0, 0, 0, 0, 402, + 403, 0, 0, 0, 0, 0, 0, 0, 0, 71, + 0, 0, 112, 113, 114, 389, 388, 391, 392, 393, + 394, 0, 0, 135, 390, 395, 396, 397, 0, 0, + 0, 0, 0, 382, 0, 410, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 379, 380, 0, 0, 0, + 0, 425, 0, 381, 0, 0, 374, 375, 377, 376, + 378, 383, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 155, 424, 0, 0, 323, 0, 0, 422, 0, + 228, 0, 266, 158, 178, 131, 172, 116, 127, 0, + 157, 206, 237, 242, 0, 0, 0, 139, 0, 240, + 216, 282, 0, 218, 239, 182, 272, 229, 281, 291, + 292, 269, 289, 296, 257, 120, 268, 280, 136, 251, + 0, 0, 0, 122, 278, 264, 194, 168, 169, 121, + 0, 235, 145, 153, 141, 208, 275, 276, 140, 299, + 128, 288, 124, 129, 287, 202, 271, 279, 195, 187, + 123, 277, 193, 186, 174, 149, 160, 226, 184, 227, + 161, 198, 197, 199, 0, 0, 0, 262, 285, 300, + 133, 0, 270, 294, 295, 0, 230, 134, 154, 148, + 225, 152, 200, 130, 163, 259, 173, 183, 234, 298, + 215, 241, 137, 284, 260, 412, 423, 418, 419, 416, + 417, 415, 414, 413, 426, 404, 405, 406, 407, 409, + 0, 420, 421, 408, 115, 125, 180, 297, 232, 151, + 0, 0, 0, 1779, 222, 156, 1780, 0, 0, 286, + 0, 0, 144, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 117, 118, 126, 132, 138, 143, 147, + 150, 159, 162, 164, 165, 166, 171, 185, 189, 190, + 191, 192, 203, 204, 205, 207, 210, 211, 212, 213, + 214, 217, 219, 220, 221, 223, 224, 233, 236, 243, + 244, 245, 246, 247, 248, 250, 253, 254, 255, 256, + 263, 267, 273, 274, 290, 293, 0, 0, 0, 0, + 119, 170, 188, 249, 252, 258, 265, 320, 321, 322, + 238, 175, 176, 209, 0, 0, 0, 0, 231, 201, + 167, 0, 146, 177, 142, 0, 0, 0, 179, 0, + 0, 411, 181, 0, 0, 261, 196, 0, 0, 0, + 0, 0, 402, 403, 0, 0, 0, 0, 0, 0, + 0, 0, 71, 0, 1023, 112, 113, 114, 389, 388, + 391, 392, 393, 394, 0, 0, 135, 390, 395, 396, + 397, 0, 0, 0, 0, 0, 382, 0, 410, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 379, 380, + 0, 0, 0, 0, 425, 0, 381, 0, 0, 374, + 375, 377, 376, 378, 383, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 155, 424, 0, 0, 323, 0, + 0, 422, 0, 228, 0, 266, 158, 178, 131, 172, + 116, 127, 0, 157, 206, 237, 242, 0, 0, 0, + 139, 0, 240, 216, 282, 0, 218, 239, 182, 272, + 229, 281, 291, 292, 269, 289, 296, 257, 120, 268, + 280, 136, 251, 0, 0, 0, 122, 278, 264, 194, + 168, 169, 121, 0, 235, 145, 153, 141, 208, 275, + 276, 140, 299, 128, 288, 124, 129, 287, 202, 271, + 279, 195, 187, 123, 277, 193, 186, 174, 149, 160, + 226, 184, 227, 161, 198, 197, 199, 0, 0, 0, + 262, 285, 300, 133, 0, 270, 294, 295, 0, 230, + 134, 154, 148, 225, 152, 200, 130, 163, 259, 173, + 183, 234, 298, 215, 241, 137, 284, 260, 412, 423, + 418, 419, 416, 417, 415, 414, 413, 426, 404, 405, + 406, 407, 409, 0, 420, 421, 408, 115, 125, 180, + 297, 232, 151, 0, 0, 0, 283, 222, 156, 0, + 0, 0, 286, 0, 0, 144, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 117, 118, 126, 132, + 138, 143, 147, 150, 159, 162, 164, 165, 166, 171, + 185, 189, 190, 191, 192, 203, 204, 205, 207, 210, + 211, 212, 213, 214, 217, 219, 220, 221, 223, 224, + 233, 236, 243, 244, 245, 246, 247, 248, 250, 253, + 254, 255, 256, 263, 267, 273, 274, 290, 293, 0, + 0, 0, 0, 119, 170, 188, 249, 252, 258, 265, + 320, 321, 322, 238, 175, 176, 209, 0, 0, 0, + 0, 231, 201, 167, 0, 146, 177, 142, 0, 0, + 0, 179, 0, 0, 411, 181, 0, 0, 261, 196, + 0, 0, 0, 0, 0, 402, 403, 0, 0, 0, + 0, 0, 0, 0, 0, 71, 0, 0, 112, 113, + 114, 389, 388, 391, 392, 393, 394, 0, 0, 135, + 390, 395, 396, 397, 0, 0, 0, 0, 0, 382, + 0, 410, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 379, 380, 0, 0, 0, 0, 425, 0, 381, + 0, 0, 374, 375, 377, 376, 378, 383, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 155, 424, 0, + 0, 323, 0, 0, 422, 0, 228, 0, 266, 158, + 178, 131, 172, 116, 127, 0, 157, 206, 237, 242, + 0, 0, 0, 139, 0, 240, 216, 282, 0, 218, + 239, 182, 272, 229, 281, 291, 292, 269, 289, 296, + 257, 120, 268, 280, 136, 251, 0, 0, 0, 122, + 278, 264, 194, 168, 169, 121, 0, 235, 145, 153, + 141, 208, 275, 276, 140, 299, 128, 288, 124, 129, + 287, 202, 271, 279, 195, 187, 123, 277, 193, 186, + 174, 149, 160, 226, 184, 227, 161, 198, 197, 199, + 0, 0, 0, 262, 285, 300, 133, 0, 270, 294, + 295, 0, 230, 134, 154, 148, 225, 152, 200, 130, + 163, 259, 173, 183, 234, 298, 215, 241, 137, 284, + 260, 412, 423, 418, 419, 416, 417, 415, 414, 413, + 426, 404, 405, 406, 407, 409, 0, 420, 421, 408, + 115, 125, 180, 297, 232, 151, 0, 0, 0, 283, + 222, 156, 0, 0, 0, 286, 0, 0, 144, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 117, + 118, 126, 132, 138, 143, 147, 150, 159, 162, 164, + 165, 166, 171, 185, 189, 190, 191, 192, 203, 204, + 205, 207, 210, 211, 212, 213, 214, 217, 219, 220, + 221, 223, 224, 233, 236, 243, 244, 245, 246, 247, + 248, 250, 253, 254, 255, 256, 263, 267, 273, 274, + 290, 293, 0, 0, 0, 0, 119, 170, 188, 249, + 252, 258, 265, 320, 321, 322, 238, 175, 176, 209, + 0, 0, 0, 0, 231, 201, 167, 0, 146, 177, + 142, 0, 0, 0, 179, 0, 0, 411, 181, 0, + 0, 261, 196, 0, 0, 0, 0, 0, 402, 403, + 0, 0, 0, 0, 0, 0, 0, 0, 71, 0, + 0, 112, 113, 114, 389, 736, 391, 392, 393, 394, + 0, 0, 135, 390, 395, 396, 397, 0, 0, 0, + 0, 0, 382, 0, 410, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 379, 380, 0, 0, 0, 0, + 425, 0, 381, 0, 0, 374, 375, 377, 376, 378, + 383, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 155, 424, 0, 0, 323, 0, 0, 422, 0, 228, + 0, 266, 158, 178, 131, 172, 116, 127, 0, 157, + 206, 237, 242, 0, 0, 0, 139, 0, 240, 216, + 282, 0, 218, 239, 182, 272, 229, 281, 291, 292, + 269, 289, 296, 257, 120, 268, 280, 136, 251, 0, + 0, 0, 122, 278, 264, 194, 168, 169, 121, 0, + 235, 145, 153, 141, 208, 275, 276, 140, 299, 128, + 288, 124, 129, 287, 202, 271, 279, 195, 187, 123, + 277, 193, 186, 174, 149, 160, 226, 184, 227, 161, + 198, 197, 199, 0, 0, 0, 262, 285, 300, 133, + 0, 270, 294, 295, 0, 230, 134, 154, 148, 225, + 152, 200, 130, 163, 259, 173, 183, 234, 298, 215, + 241, 137, 284, 260, 412, 423, 418, 419, 416, 417, + 415, 414, 413, 426, 404, 405, 406, 407, 409, 0, + 420, 421, 408, 115, 125, 180, 297, 232, 151, 0, + 0, 0, 283, 222, 156, 0, 0, 0, 286, 0, + 0, 144, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 117, 118, 126, 132, 138, 143, 147, 150, + 159, 162, 164, 165, 166, 171, 185, 189, 190, 191, + 192, 203, 204, 205, 207, 210, 211, 212, 213, 214, + 217, 219, 220, 221, 223, 224, 233, 236, 243, 244, + 245, 246, 247, 248, 250, 253, 254, 255, 256, 263, + 267, 273, 274, 290, 293, 0, 0, 0, 0, 119, + 170, 188, 249, 252, 258, 265, 320, 321, 322, 238, + 175, 176, 209, 0, 0, 0, 0, 231, 201, 167, + 0, 146, 177, 142, 0, 0, 0, 179, 0, 0, + 0, 181, 0, 0, 261, 196, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 112, 113, 114, 0, 0, 0, + 0, 0, 0, 0, 0, 135, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 701, 700, 710, 711, 703, 704, 705, + 706, 707, 708, 709, 702, 0, 0, 712, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 155, 0, 0, 0, 323, 0, 0, + 0, 0, 228, 0, 266, 158, 178, 131, 172, 116, + 127, 0, 157, 206, 237, 242, 0, 0, 0, 139, + 0, 240, 216, 282, 0, 218, 239, 182, 272, 229, + 281, 291, 292, 269, 289, 296, 257, 120, 268, 280, + 136, 251, 0, 0, 0, 122, 278, 264, 194, 168, + 169, 121, 0, 235, 145, 153, 141, 208, 275, 276, + 140, 299, 128, 288, 124, 129, 287, 202, 271, 279, + 195, 187, 123, 277, 193, 186, 174, 149, 160, 226, + 184, 227, 161, 198, 197, 199, 0, 0, 0, 262, + 285, 300, 133, 0, 270, 294, 295, 0, 230, 134, + 154, 148, 225, 152, 200, 130, 163, 259, 173, 183, + 234, 298, 215, 241, 137, 284, 260, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 115, 125, 180, 297, + 232, 151, 0, 0, 0, 283, 222, 156, 0, 0, + 0, 286, 0, 0, 144, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 117, 118, 126, 132, 138, + 143, 147, 150, 159, 162, 164, 165, 166, 171, 185, + 189, 190, 191, 192, 203, 204, 205, 207, 210, 211, + 212, 213, 214, 217, 219, 220, 221, 223, 224, 233, + 236, 243, 244, 245, 246, 247, 248, 250, 253, 254, + 255, 256, 263, 267, 273, 274, 290, 293, 0, 0, + 0, 0, 119, 170, 188, 249, 252, 258, 265, 320, + 321, 322, 238, 175, 176, 209, 0, 0, 0, 812, + 231, 201, 167, 0, 146, 177, 142, 0, 0, 0, + 179, 0, 0, 0, 181, 0, 0, 261, 196, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 112, 113, 114, + 0, 814, 0, 0, 0, 0, 692, 0, 135, 0, + 0, 0, 0, 0, 688, 689, 687, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 690, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 155, 0, 0, 0, + 323, 0, 0, 0, 0, 228, 0, 266, 158, 178, + 131, 172, 116, 127, 0, 157, 206, 237, 242, 0, + 0, 0, 139, 0, 240, 216, 282, 0, 218, 239, + 182, 272, 229, 281, 291, 292, 269, 289, 296, 257, + 120, 268, 280, 136, 251, 0, 0, 0, 122, 278, + 264, 194, 168, 169, 121, 0, 235, 145, 153, 141, + 208, 275, 276, 140, 299, 128, 288, 124, 129, 287, + 202, 271, 279, 195, 187, 123, 277, 193, 186, 174, + 149, 160, 226, 184, 227, 161, 198, 197, 199, 0, + 0, 0, 262, 285, 300, 133, 0, 270, 294, 295, + 0, 230, 134, 154, 148, 225, 152, 200, 130, 163, + 259, 173, 183, 234, 298, 215, 241, 137, 284, 260, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 115, + 125, 180, 297, 232, 151, 0, 0, 0, 283, 222, + 156, 0, 0, 0, 286, 0, 0, 144, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 117, 118, + 126, 132, 138, 143, 147, 150, 159, 162, 164, 165, + 166, 171, 185, 189, 190, 191, 192, 203, 204, 205, + 207, 210, 211, 212, 213, 214, 217, 219, 220, 221, + 223, 224, 233, 236, 243, 244, 245, 246, 247, 248, + 250, 253, 254, 255, 256, 263, 267, 273, 274, 290, + 293, 0, 0, 0, 0, 119, 170, 188, 249, 252, + 258, 265, 320, 321, 322, 238, 175, 176, 209, 0, + 0, 0, 0, 231, 201, 167, 0, 146, 177, 142, + 0, 0, 0, 179, 0, 0, 0, 181, 0, 0, + 261, 196, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 112, 113, 114, 0, 0, 0, 0, 0, 0, 0, + 0, 135, 0, 0, 0, 0, 0, 99, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 155, + 102, 103, 0, 98, 0, 0, 0, 105, 228, 0, + 266, 158, 178, 131, 172, 116, 127, 0, 157, 206, + 237, 242, 0, 0, 0, 139, 0, 240, 216, 282, + 0, 218, 239, 182, 272, 229, 281, 291, 292, 269, + 289, 296, 257, 120, 268, 280, 136, 251, 0, 0, + 0, 122, 278, 264, 194, 168, 169, 121, 0, 235, + 145, 153, 141, 208, 275, 276, 140, 299, 128, 288, + 124, 129, 287, 202, 271, 279, 195, 187, 123, 277, + 193, 186, 174, 149, 160, 226, 184, 227, 161, 198, + 197, 199, 0, 0, 0, 262, 285, 300, 133, 0, + 270, 294, 295, 0, 230, 134, 154, 148, 225, 152, + 200, 130, 163, 259, 173, 183, 234, 298, 215, 241, + 137, 284, 260, 0, 101, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 115, 125, 180, 297, 232, 151, 0, 0, + 0, 283, 222, 156, 0, 0, 0, 286, 0, 0, + 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 117, 118, 126, 132, 138, 143, 147, 150, 159, + 162, 164, 165, 166, 171, 185, 189, 190, 191, 192, + 203, 204, 205, 207, 210, 211, 212, 213, 214, 217, + 219, 220, 221, 223, 224, 233, 236, 243, 244, 245, + 246, 247, 248, 250, 253, 254, 255, 256, 263, 267, + 273, 274, 290, 293, 0, 0, 0, 0, 119, 170, + 188, 249, 252, 258, 265, 107, 108, 109, 238, 175, + 176, 36, 0, 0, 0, 0, 231, 201, 167, 0, + 0, 0, 0, 209, 0, 0, 0, 0, 0, 0, + 0, 0, 146, 177, 142, 0, 0, 0, 179, 0, + 0, 0, 181, 0, 0, 261, 196, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 71, 0, 0, 112, 113, 114, 0, 0, + 0, 0, 0, 0, 0, 0, 135, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 155, 0, 0, 0, 323, 0, + 0, 0, 0, 228, 0, 266, 158, 178, 131, 172, + 116, 127, 0, 157, 206, 237, 242, 0, 0, 0, + 139, 0, 240, 216, 282, 0, 218, 239, 182, 272, + 229, 281, 291, 292, 269, 289, 296, 257, 120, 268, + 280, 136, 251, 0, 0, 0, 122, 278, 264, 194, + 168, 169, 121, 0, 235, 145, 153, 141, 208, 275, + 276, 140, 299, 128, 288, 124, 129, 287, 202, 271, + 279, 195, 187, 123, 277, 193, 186, 174, 149, 160, + 226, 184, 227, 161, 198, 197, 199, 0, 0, 0, + 262, 285, 300, 133, 0, 270, 294, 295, 0, 230, + 134, 154, 148, 225, 152, 200, 130, 163, 259, 173, + 183, 234, 298, 215, 241, 137, 284, 260, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 115, 125, 180, + 742, 232, 151, 0, 0, 0, 283, 222, 156, 0, + 0, 0, 286, 0, 0, 144, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 117, 118, 126, 132, + 138, 143, 147, 150, 159, 162, 164, 165, 166, 171, + 185, 189, 190, 191, 192, 203, 204, 205, 207, 210, + 211, 212, 213, 214, 217, 219, 220, 221, 223, 224, + 233, 236, 243, 244, 245, 246, 247, 248, 250, 253, + 254, 255, 256, 263, 267, 273, 274, 290, 293, 0, + 0, 0, 0, 119, 170, 188, 249, 252, 258, 265, + 320, 321, 322, 238, 830, 831, 0, 0, 0, 0, + 65, 231, 201, 167, 209, 0, 0, 0, 1118, 0, + 0, 0, 0, 146, 177, 142, 0, 0, 0, 179, + 0, 0, 0, 181, 0, 0, 261, 196, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 112, 113, 114, 0, + 1120, 0, 0, 0, 0, 0, 0, 135, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 155, 0, 0, 0, 323, + 0, 0, 0, 0, 228, 0, 266, 158, 178, 131, + 172, 116, 127, 0, 157, 206, 237, 242, 0, 0, + 0, 139, 0, 240, 216, 282, 0, 218, 239, 182, + 272, 229, 281, 291, 292, 269, 289, 296, 257, 120, + 268, 280, 136, 251, 0, 0, 0, 122, 278, 264, + 194, 168, 169, 121, 0, 235, 145, 153, 141, 208, + 275, 276, 140, 299, 128, 288, 124, 129, 287, 202, + 271, 279, 195, 187, 123, 277, 193, 186, 174, 149, + 160, 226, 184, 227, 161, 198, 197, 199, 0, 0, + 0, 262, 285, 300, 133, 0, 270, 294, 295, 0, + 230, 134, 154, 148, 225, 152, 200, 130, 163, 259, + 173, 183, 234, 298, 215, 241, 137, 284, 260, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 115, 125, + 180, 297, 232, 151, 0, 0, 0, 283, 222, 156, + 0, 0, 0, 286, 0, 0, 144, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 117, 118, 126, + 132, 138, 143, 147, 150, 159, 162, 164, 165, 166, + 171, 185, 189, 190, 191, 192, 203, 204, 205, 207, + 210, 211, 212, 213, 214, 217, 219, 220, 221, 223, + 224, 233, 236, 243, 244, 245, 246, 247, 248, 250, + 253, 254, 255, 256, 263, 267, 273, 274, 290, 293, + 0, 0, 0, 0, 119, 170, 188, 249, 252, 258, + 265, 320, 321, 322, 238, 175, 176, 36, 0, 0, + 0, 0, 231, 201, 167, 0, 0, 0, 0, 209, + 0, 0, 0, 0, 0, 0, 0, 0, 146, 177, + 142, 0, 0, 0, 179, 0, 0, 0, 181, 0, + 0, 261, 196, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 71, 0, + 0, 112, 113, 114, 0, 0, 0, 0, 0, 0, + 0, 0, 135, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 155, 0, 0, 0, 323, 0, 0, 0, 0, 228, + 0, 266, 158, 178, 131, 172, 116, 127, 0, 157, + 206, 237, 242, 0, 0, 0, 139, 0, 240, 216, + 282, 0, 218, 239, 182, 272, 229, 281, 291, 292, + 269, 289, 296, 257, 120, 268, 280, 136, 251, 0, + 0, 0, 122, 278, 264, 194, 168, 169, 121, 0, + 235, 145, 153, 141, 208, 275, 276, 140, 299, 128, + 288, 124, 129, 287, 202, 271, 279, 195, 187, 123, + 277, 193, 186, 174, 149, 160, 226, 184, 227, 161, + 198, 197, 199, 0, 0, 0, 262, 285, 300, 133, + 0, 270, 294, 295, 0, 230, 134, 154, 148, 225, + 152, 200, 130, 163, 259, 173, 183, 234, 298, 215, + 241, 137, 284, 260, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 115, 125, 180, 742, 232, 151, 0, + 0, 0, 283, 222, 156, 0, 0, 0, 286, 0, + 0, 144, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 117, 118, 126, 132, 138, 143, 147, 150, + 159, 162, 164, 165, 166, 171, 185, 189, 190, 191, + 192, 203, 204, 205, 207, 210, 211, 212, 213, 214, + 217, 219, 220, 221, 223, 224, 233, 236, 243, 244, + 245, 246, 247, 248, 250, 253, 254, 255, 256, 263, + 267, 273, 274, 290, 293, 0, 0, 0, 0, 119, + 170, 188, 249, 252, 258, 265, 320, 321, 322, 238, + 175, 176, 209, 0, 0, 0, 1118, 231, 201, 167, + 0, 146, 177, 142, 0, 0, 0, 179, 0, 0, + 0, 181, 0, 0, 261, 196, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 112, 113, 114, 0, 1120, 0, + 0, 0, 0, 0, 0, 135, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 155, 0, 0, 0, 323, 0, 0, + 0, 0, 228, 0, 266, 158, 178, 131, 172, 116, + 127, 0, 157, 206, 237, 242, 0, 0, 0, 139, + 0, 240, 216, 282, 0, 1122, 239, 182, 272, 229, + 281, 291, 292, 269, 289, 296, 257, 120, 268, 280, + 136, 251, 0, 0, 0, 122, 278, 264, 194, 168, + 169, 121, 0, 235, 145, 153, 141, 208, 275, 276, + 140, 299, 128, 288, 124, 129, 287, 202, 271, 279, + 195, 187, 123, 277, 193, 186, 174, 149, 160, 226, + 184, 227, 161, 198, 197, 199, 0, 0, 0, 262, + 285, 300, 133, 0, 270, 294, 295, 0, 230, 134, + 154, 148, 225, 152, 200, 130, 163, 259, 173, 183, + 234, 298, 215, 241, 137, 284, 260, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 115, 125, 180, 297, + 232, 151, 0, 0, 0, 283, 222, 156, 0, 0, + 0, 286, 0, 0, 144, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 117, 118, 126, 132, 138, + 143, 147, 150, 159, 162, 164, 165, 166, 171, 185, + 189, 190, 191, 192, 203, 204, 205, 207, 210, 211, + 212, 213, 214, 217, 219, 220, 221, 223, 224, 233, + 236, 243, 244, 245, 246, 247, 248, 250, 253, 254, + 255, 256, 263, 267, 273, 274, 290, 293, 0, 0, + 0, 0, 119, 170, 188, 249, 252, 258, 265, 320, + 321, 322, 238, 175, 176, 209, 0, 0, 0, 0, + 231, 201, 167, 0, 146, 177, 142, 0, 0, 0, + 179, 0, 0, 0, 181, 0, 0, 261, 196, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 112, 113, 114, + 0, 0, 1084, 0, 0, 1085, 0, 0, 135, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 155, 0, 0, 0, + 323, 0, 0, 0, 0, 228, 0, 266, 158, 178, + 131, 172, 116, 127, 0, 157, 206, 237, 242, 0, + 0, 0, 139, 0, 240, 216, 282, 0, 218, 239, + 182, 272, 229, 281, 291, 292, 269, 289, 296, 257, + 120, 268, 280, 136, 251, 0, 0, 0, 122, 278, + 264, 194, 168, 169, 121, 0, 235, 145, 153, 141, + 208, 275, 276, 140, 299, 128, 288, 124, 129, 287, + 202, 271, 279, 195, 187, 123, 277, 193, 186, 174, + 149, 160, 226, 184, 227, 161, 198, 197, 199, 0, + 0, 0, 262, 285, 300, 133, 0, 270, 294, 295, + 0, 230, 134, 154, 148, 225, 152, 200, 130, 163, + 259, 173, 183, 234, 298, 215, 241, 137, 284, 260, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 115, + 125, 180, 297, 232, 151, 0, 0, 0, 283, 222, + 156, 0, 0, 0, 286, 0, 0, 144, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 117, 118, + 126, 132, 138, 143, 147, 150, 159, 162, 164, 165, + 166, 171, 185, 189, 190, 191, 192, 203, 204, 205, + 207, 210, 211, 212, 213, 214, 217, 219, 220, 221, + 223, 224, 233, 236, 243, 244, 245, 246, 247, 248, + 250, 253, 254, 255, 256, 263, 267, 273, 274, 290, + 293, 0, 0, 0, 0, 119, 170, 188, 249, 252, + 258, 265, 320, 321, 322, 238, 175, 176, 209, 0, + 0, 0, 0, 231, 201, 167, 0, 146, 177, 142, + 0, 0, 0, 179, 0, 0, 0, 181, 0, 0, + 261, 196, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1023, + 112, 113, 114, 0, 0, 0, 0, 0, 0, 0, + 0, 135, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 155, + 0, 0, 0, 323, 0, 0, 0, 0, 228, 0, + 266, 158, 178, 131, 172, 116, 127, 0, 157, 206, + 237, 242, 0, 0, 0, 139, 0, 240, 216, 282, + 0, 218, 239, 182, 272, 229, 281, 291, 292, 269, + 289, 296, 257, 120, 268, 280, 136, 251, 0, 0, + 0, 122, 278, 264, 194, 168, 169, 121, 0, 235, + 145, 153, 141, 208, 275, 276, 140, 299, 128, 288, + 124, 129, 287, 202, 271, 279, 195, 187, 123, 277, + 193, 186, 174, 149, 160, 226, 184, 227, 161, 198, + 197, 199, 0, 0, 0, 262, 285, 300, 133, 0, + 270, 294, 295, 0, 230, 134, 154, 148, 225, 152, + 200, 130, 163, 259, 173, 183, 234, 298, 215, 241, + 137, 284, 260, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 115, 125, 180, 297, 232, 151, 0, 0, + 0, 283, 222, 156, 0, 0, 0, 286, 0, 0, + 144, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 117, 118, 126, 132, 138, 143, 147, 150, 159, + 162, 164, 165, 166, 171, 185, 189, 190, 191, 192, + 203, 204, 205, 207, 210, 211, 212, 213, 214, 217, + 219, 220, 221, 223, 224, 233, 236, 243, 244, 245, + 246, 247, 248, 250, 253, 254, 255, 256, 263, 267, + 273, 274, 290, 293, 0, 0, 0, 0, 119, 170, + 188, 249, 252, 258, 265, 320, 321, 322, 238, 175, + 176, 209, 0, 0, 0, 0, 231, 201, 167, 0, + 146, 177, 142, 0, 0, 0, 179, 0, 0, 0, + 181, 0, 0, 261, 196, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 71, 0, 0, 112, 113, 114, 0, 0, 0, 0, + 0, 0, 0, 0, 135, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 155, 0, 0, 0, 323, 0, 0, 0, + 0, 228, 0, 266, 158, 178, 131, 172, 116, 127, + 0, 157, 206, 237, 242, 0, 0, 0, 139, 0, + 240, 216, 282, 0, 218, 239, 182, 272, 229, 281, + 291, 292, 269, 289, 296, 257, 120, 268, 280, 136, + 251, 0, 0, 0, 122, 278, 264, 194, 168, 169, + 121, 0, 235, 145, 153, 141, 208, 275, 276, 140, + 299, 128, 288, 124, 129, 287, 202, 271, 279, 195, + 187, 123, 277, 193, 186, 174, 149, 160, 226, 184, + 227, 161, 198, 197, 199, 0, 0, 0, 262, 285, + 300, 133, 0, 270, 294, 295, 0, 230, 134, 154, + 148, 225, 152, 200, 130, 163, 259, 173, 183, 234, + 298, 215, 241, 137, 284, 260, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 115, 125, 180, 297, 232, + 151, 0, 0, 0, 283, 222, 156, 0, 0, 0, + 286, 0, 0, 144, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 117, 118, 126, 132, 138, 143, + 147, 150, 159, 162, 164, 165, 166, 171, 185, 189, + 190, 191, 192, 203, 204, 205, 207, 210, 211, 212, + 213, 214, 217, 219, 220, 221, 223, 224, 233, 236, + 243, 244, 245, 246, 247, 248, 250, 253, 254, 255, + 256, 263, 267, 273, 274, 290, 293, 0, 0, 0, + 0, 119, 170, 188, 249, 252, 258, 265, 320, 321, + 322, 238, 175, 176, 209, 0, 0, 0, 0, 231, + 201, 167, 0, 146, 177, 142, 0, 0, 0, 179, + 0, 0, 0, 181, 0, 0, 261, 196, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 71, 0, 0, 112, 113, 114, 0, + 0, 0, 0, 0, 0, 0, 0, 135, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 155, 0, 0, 0, 323, + 0, 0, 0, 0, 228, 0, 266, 158, 178, 131, + 172, 116, 127, 0, 157, 206, 237, 242, 0, 0, + 0, 139, 0, 240, 216, 282, 0, 218, 239, 182, + 272, 229, 281, 291, 292, 269, 289, 296, 257, 120, + 268, 280, 136, 251, 0, 0, 0, 122, 278, 264, + 194, 168, 169, 121, 0, 235, 145, 153, 141, 208, + 275, 276, 140, 299, 128, 288, 124, 129, 287, 202, + 271, 279, 195, 187, 123, 277, 193, 186, 174, 149, + 160, 226, 184, 227, 161, 198, 197, 199, 0, 0, + 0, 262, 285, 300, 133, 0, 270, 294, 295, 0, + 230, 134, 154, 148, 225, 152, 200, 130, 163, 259, + 173, 183, 234, 298, 215, 241, 137, 284, 260, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 115, 125, + 180, 297, 232, 151, 0, 0, 0, 283, 222, 156, + 0, 0, 0, 286, 0, 0, 144, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 117, 118, 126, + 132, 138, 143, 147, 150, 159, 162, 164, 165, 166, + 171, 185, 189, 190, 191, 192, 203, 204, 205, 207, + 210, 211, 212, 213, 214, 217, 219, 220, 221, 223, + 224, 233, 236, 243, 244, 245, 246, 247, 248, 250, + 253, 254, 255, 256, 263, 267, 273, 274, 290, 293, + 0, 0, 0, 0, 119, 170, 188, 249, 252, 258, + 265, 320, 321, 322, 238, 830, 831, 209, 0, 0, + 0, 0, 231, 201, 167, 0, 146, 177, 142, 0, + 0, 0, 179, 0, 0, 0, 181, 0, 0, 261, + 196, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 112, + 113, 114, 0, 1120, 0, 0, 0, 0, 0, 0, + 135, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 155, 0, + 0, 0, 323, 0, 0, 0, 0, 228, 0, 266, + 158, 178, 131, 172, 116, 127, 0, 157, 206, 237, + 242, 0, 0, 0, 139, 0, 240, 216, 282, 0, + 218, 239, 182, 272, 229, 281, 291, 292, 269, 289, + 296, 257, 120, 268, 280, 136, 251, 0, 0, 0, + 122, 278, 264, 194, 168, 169, 121, 0, 235, 145, + 153, 141, 208, 275, 276, 140, 299, 128, 288, 124, + 129, 287, 202, 271, 279, 195, 187, 123, 277, 193, + 186, 174, 149, 160, 226, 184, 227, 161, 198, 197, + 199, 0, 0, 0, 262, 285, 300, 133, 0, 270, + 294, 295, 0, 230, 134, 154, 148, 225, 152, 200, + 130, 163, 259, 173, 183, 234, 298, 215, 241, 137, + 284, 260, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 115, 125, 180, 297, 232, 151, 0, 0, 0, + 283, 222, 156, 0, 0, 0, 286, 0, 0, 144, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 117, 118, 126, 132, 138, 143, 147, 150, 159, 162, + 164, 165, 166, 171, 185, 189, 190, 191, 192, 203, + 204, 205, 207, 210, 211, 212, 213, 214, 217, 219, + 220, 221, 223, 224, 233, 236, 243, 244, 245, 246, + 247, 248, 250, 253, 254, 255, 256, 263, 267, 273, + 274, 290, 293, 0, 0, 0, 0, 119, 170, 188, + 249, 252, 258, 265, 320, 321, 322, 238, 175, 176, + 209, 0, 0, 0, 0, 231, 201, 167, 0, 146, + 177, 142, 0, 0, 0, 179, 0, 0, 0, 181, + 0, 0, 261, 196, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 112, 113, 114, 0, 1245, 0, 0, 0, + 0, 0, 0, 135, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 155, 0, 0, 0, 323, 0, 0, 0, 0, + 228, 0, 266, 158, 178, 131, 172, 116, 127, 0, + 157, 206, 237, 242, 0, 0, 0, 139, 0, 240, + 216, 282, 0, 218, 239, 182, 272, 229, 281, 291, + 292, 269, 289, 296, 257, 120, 268, 280, 136, 251, + 0, 0, 0, 122, 278, 264, 194, 168, 169, 121, + 0, 235, 145, 153, 141, 208, 275, 276, 140, 299, + 128, 288, 124, 129, 287, 202, 271, 279, 195, 187, + 123, 277, 193, 186, 174, 149, 160, 226, 184, 227, + 161, 198, 197, 199, 0, 0, 0, 262, 285, 300, + 133, 0, 270, 294, 295, 0, 230, 134, 154, 148, + 225, 152, 200, 130, 163, 259, 173, 183, 234, 298, + 215, 241, 137, 284, 260, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 115, 125, 180, 297, 232, 151, + 0, 0, 0, 283, 222, 156, 0, 0, 0, 286, + 0, 0, 144, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 117, 118, 126, 132, 138, 143, 147, + 150, 159, 162, 164, 165, 166, 171, 185, 189, 190, + 191, 192, 203, 204, 205, 207, 210, 211, 212, 213, + 214, 217, 219, 220, 221, 223, 224, 233, 236, 243, + 244, 245, 246, 247, 248, 250, 253, 254, 255, 256, + 263, 267, 273, 274, 290, 293, 0, 0, 0, 0, + 119, 170, 188, 249, 252, 258, 265, 320, 321, 322, + 238, 175, 176, 209, 0, 0, 0, 0, 231, 201, + 167, 817, 146, 177, 142, 0, 0, 0, 179, 0, + 0, 0, 181, 0, 0, 261, 196, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 112, 113, 114, 0, 0, + 0, 0, 0, 0, 0, 0, 135, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 155, 0, 0, 0, 323, 0, + 0, 0, 0, 228, 0, 266, 158, 178, 131, 172, + 116, 127, 0, 157, 206, 237, 242, 0, 0, 0, + 139, 0, 240, 216, 282, 0, 218, 239, 182, 272, + 229, 281, 291, 292, 269, 289, 296, 257, 120, 268, + 280, 136, 251, 0, 0, 0, 122, 278, 264, 194, + 168, 169, 121, 0, 235, 145, 153, 141, 208, 275, + 276, 140, 299, 128, 288, 124, 129, 287, 202, 271, + 279, 195, 187, 123, 277, 193, 186, 174, 149, 160, + 226, 184, 227, 161, 198, 197, 199, 0, 0, 0, + 262, 285, 300, 133, 0, 270, 294, 295, 0, 230, + 134, 154, 148, 225, 152, 200, 130, 163, 259, 173, + 183, 234, 298, 215, 241, 137, 284, 260, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 115, 125, 180, + 297, 232, 151, 0, 0, 0, 283, 222, 156, 0, + 0, 0, 286, 0, 0, 144, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 117, 118, 126, 132, + 138, 143, 147, 150, 159, 162, 164, 165, 166, 171, + 185, 189, 190, 191, 192, 203, 204, 205, 207, 210, + 211, 212, 213, 214, 217, 219, 220, 221, 223, 224, + 233, 236, 243, 244, 245, 246, 247, 248, 250, 253, + 254, 255, 256, 263, 267, 273, 274, 290, 293, 0, + 0, 0, 0, 119, 170, 188, 249, 252, 258, 265, + 320, 321, 322, 238, 175, 176, 209, 0, 0, 0, + 0, 231, 201, 167, 0, 146, 177, 142, 0, 0, + 0, 179, 0, 0, 0, 181, 0, 0, 261, 196, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 112, 113, + 114, 0, 814, 0, 0, 0, 0, 0, 0, 135, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 155, 0, 0, + 0, 323, 0, 0, 0, 0, 228, 0, 266, 158, + 178, 131, 172, 116, 127, 0, 157, 206, 237, 242, + 0, 0, 0, 139, 0, 240, 216, 282, 0, 218, + 239, 182, 272, 229, 281, 291, 292, 269, 289, 296, + 257, 120, 268, 280, 136, 251, 0, 0, 0, 122, + 278, 264, 194, 168, 169, 121, 0, 235, 145, 153, + 141, 208, 275, 276, 140, 299, 128, 288, 124, 129, + 287, 202, 271, 279, 195, 187, 123, 277, 193, 186, + 174, 149, 160, 226, 184, 227, 161, 198, 197, 199, + 0, 0, 0, 262, 285, 300, 133, 0, 270, 294, + 295, 0, 230, 134, 154, 148, 225, 152, 200, 130, + 163, 259, 173, 183, 234, 298, 215, 241, 137, 284, + 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 115, 125, 180, 297, 232, 151, 0, 0, 0, 283, + 222, 156, 0, 0, 0, 286, 0, 0, 144, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 117, + 118, 126, 132, 138, 143, 147, 150, 159, 162, 164, + 165, 166, 171, 185, 189, 190, 191, 192, 203, 204, + 205, 207, 210, 211, 212, 213, 214, 217, 219, 220, + 221, 223, 224, 233, 236, 243, 244, 245, 246, 247, + 248, 250, 253, 254, 255, 256, 263, 267, 273, 274, + 290, 293, 0, 0, 0, 0, 119, 170, 188, 249, + 252, 258, 265, 320, 321, 322, 238, 175, 176, 209, + 0, 0, 0, 0, 231, 201, 167, 0, 146, 177, + 142, 0, 0, 0, 179, 0, 0, 0, 181, 0, + 0, 261, 196, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 112, 113, 114, 0, 0, 0, 0, 0, 0, + 0, 0, 135, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 155, 0, 0, 0, 323, 0, 0, 0, 0, 228, + 0, 266, 158, 178, 131, 172, 116, 127, 0, 157, + 206, 237, 242, 0, 0, 0, 139, 0, 240, 216, + 282, 0, 218, 239, 182, 272, 229, 281, 291, 292, + 269, 289, 296, 257, 120, 268, 280, 136, 251, 0, + 0, 0, 122, 278, 264, 194, 168, 169, 121, 0, + 235, 145, 153, 141, 208, 275, 276, 140, 299, 128, + 288, 124, 129, 287, 202, 271, 279, 195, 187, 123, + 277, 193, 186, 174, 149, 160, 226, 184, 227, 161, + 198, 197, 199, 0, 0, 0, 262, 285, 300, 133, + 0, 270, 294, 295, 0, 230, 134, 154, 148, 225, + 152, 200, 130, 163, 259, 173, 183, 234, 298, 215, + 241, 137, 284, 260, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 115, 125, 180, 297, 232, 151, 0, + 0, 0, 283, 222, 156, 0, 0, 437, 286, 0, + 0, 144, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 117, 118, 126, 132, 138, 143, 147, 150, + 159, 162, 164, 165, 166, 171, 185, 189, 190, 191, + 192, 203, 204, 205, 207, 210, 211, 212, 213, 214, + 217, 219, 220, 221, 223, 224, 233, 236, 243, 244, + 245, 246, 247, 248, 250, 253, 254, 255, 256, 263, + 267, 273, 274, 290, 293, 0, 0, 0, 0, 119, + 170, 188, 249, 252, 258, 265, 320, 321, 322, 238, + 175, 176, 209, 0, 0, 0, 0, 231, 201, 167, + 0, 146, 177, 853, 0, 0, 0, 179, 0, 0, + 0, 181, 0, 0, 261, 196, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 112, 113, 114, 0, 852, 0, + 0, 0, 0, 0, 0, 135, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 155, 0, 0, 0, 323, 0, 0, + 0, 0, 228, 0, 266, 158, 178, 131, 172, 116, + 127, 0, 157, 206, 237, 242, 0, 0, 0, 139, + 0, 240, 216, 282, 0, 218, 239, 182, 272, 229, + 281, 291, 292, 269, 289, 296, 257, 120, 268, 280, + 136, 251, 0, 0, 0, 122, 278, 264, 194, 168, + 169, 121, 0, 235, 145, 153, 141, 208, 275, 276, + 140, 299, 128, 288, 124, 129, 287, 202, 271, 279, + 195, 187, 123, 277, 193, 186, 174, 149, 160, 226, + 184, 227, 161, 198, 197, 199, 0, 0, 0, 262, + 285, 300, 133, 0, 270, 294, 295, 0, 230, 134, + 154, 148, 225, 152, 200, 130, 163, 259, 173, 183, + 234, 298, 215, 241, 137, 284, 260, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 115, 125, 180, 297, + 232, 151, 0, 0, 0, 283, 222, 156, 0, 0, + 0, 286, 0, 0, 144, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 117, 118, 126, 132, 138, + 143, 147, 150, 159, 162, 164, 165, 166, 171, 185, + 189, 190, 191, 192, 203, 204, 205, 207, 210, 211, + 212, 213, 214, 217, 219, 220, 221, 223, 224, 233, + 236, 243, 244, 245, 246, 247, 248, 250, 253, 254, + 255, 256, 263, 267, 273, 274, 290, 293, 0, 0, + 0, 0, 119, 170, 188, 249, 252, 258, 265, 320, + 321, 322, 238, 175, 176, 0, 466, 0, 0, 0, + 231, 201, 167, 209, 0, 0, 0, 0, 0, 0, + 0, 0, 146, 177, 142, 0, 0, 0, 179, 0, + 0, 0, 181, 0, 0, 261, 196, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 112, 113, 114, 0, 0, + 0, 0, 0, 0, 0, 0, 135, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 155, 0, 0, 0, 323, 0, + 0, 0, 0, 228, 0, 266, 158, 178, 131, 172, + 116, 127, 0, 157, 206, 237, 242, 0, 0, 0, + 139, 0, 240, 216, 282, 0, 218, 239, 182, 272, + 229, 281, 291, 292, 269, 289, 296, 257, 120, 268, + 280, 136, 251, 0, 0, 0, 122, 278, 264, 194, + 168, 169, 121, 0, 235, 145, 153, 141, 208, 275, + 276, 140, 299, 128, 288, 124, 129, 287, 202, 271, + 279, 195, 187, 123, 277, 193, 186, 174, 149, 160, + 226, 184, 227, 161, 198, 197, 199, 0, 0, 0, + 262, 285, 300, 133, 0, 270, 294, 295, 0, 230, + 134, 154, 148, 225, 152, 200, 130, 163, 259, 173, + 183, 234, 298, 215, 241, 137, 284, 260, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 115, 125, 180, + 297, 232, 151, 0, 0, 0, 283, 222, 156, 0, + 0, 0, 286, 0, 0, 144, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 117, 118, 126, 132, + 138, 143, 147, 150, 159, 162, 164, 165, 166, 171, + 185, 189, 190, 191, 192, 203, 204, 205, 207, 210, + 211, 212, 213, 214, 217, 219, 220, 221, 223, 224, + 233, 236, 243, 244, 245, 246, 247, 248, 250, 253, + 254, 255, 256, 263, 267, 273, 274, 290, 293, 0, + 0, 0, 0, 119, 170, 188, 249, 252, 258, 265, + 320, 321, 322, 238, 175, 176, 209, 0, 0, 0, + 0, 231, 201, 167, 0, 146, 177, 142, 0, 0, + 0, 179, 0, 0, 0, 181, 0, 0, 261, 196, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 112, 113, + 114, 0, 0, 0, 0, 0, 0, 0, 0, 135, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 155, 0, 315, + 0, 323, 0, 0, 0, 0, 228, 0, 266, 158, + 178, 131, 172, 116, 127, 0, 157, 206, 237, 242, + 0, 0, 0, 139, 0, 240, 216, 282, 0, 218, + 239, 182, 272, 229, 281, 291, 292, 269, 289, 296, + 257, 120, 268, 280, 136, 251, 0, 0, 0, 122, + 278, 264, 194, 168, 169, 121, 0, 235, 145, 153, + 141, 208, 275, 276, 140, 299, 128, 288, 124, 129, + 287, 202, 271, 279, 195, 187, 123, 277, 193, 186, + 174, 149, 160, 226, 184, 227, 161, 198, 197, 199, + 0, 0, 0, 262, 285, 300, 133, 0, 270, 294, + 295, 0, 230, 134, 154, 148, 225, 152, 200, 130, + 163, 259, 173, 183, 234, 298, 215, 241, 137, 284, + 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 115, 125, 180, 297, 232, 151, 0, 0, 0, 283, + 222, 156, 0, 0, 0, 286, 0, 0, 144, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 117, + 118, 126, 132, 138, 143, 147, 150, 159, 162, 164, + 165, 166, 171, 185, 189, 190, 191, 192, 203, 204, + 205, 207, 210, 211, 212, 213, 214, 217, 219, 220, + 221, 223, 224, 233, 236, 243, 244, 245, 246, 247, + 248, 250, 253, 254, 255, 256, 263, 267, 273, 274, + 290, 293, 0, 0, 0, 0, 119, 170, 188, 249, + 252, 258, 265, 320, 321, 322, 238, 175, 176, 209, + 0, 0, 0, 0, 231, 201, 167, 0, 146, 177, + 142, 0, 0, 0, 179, 0, 0, 0, 181, 0, + 0, 261, 196, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 112, 113, 114, 0, 0, 0, 0, 0, 0, + 0, 0, 135, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 155, 0, 0, 0, 323, 0, 0, 0, 0, 228, + 0, 266, 158, 178, 131, 172, 116, 127, 0, 157, + 206, 237, 242, 0, 0, 0, 139, 0, 240, 216, + 282, 0, 218, 239, 182, 272, 229, 281, 291, 292, + 269, 289, 296, 257, 120, 268, 280, 136, 251, 0, + 0, 0, 122, 278, 264, 194, 168, 169, 121, 0, + 235, 145, 153, 141, 208, 275, 276, 140, 299, 128, + 288, 124, 129, 287, 202, 271, 279, 195, 187, 123, + 277, 193, 186, 174, 149, 160, 226, 184, 227, 161, + 198, 197, 199, 0, 0, 0, 262, 285, 300, 133, + 0, 270, 294, 295, 0, 230, 134, 154, 148, 225, + 152, 200, 130, 163, 259, 173, 183, 234, 298, 215, + 241, 137, 284, 260, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 115, 125, 180, 297, 232, 151, 0, + 0, 0, 283, 222, 156, 0, 0, 0, 286, 0, + 0, 144, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 117, 118, 126, 132, 138, 143, 147, 150, + 159, 162, 164, 165, 166, 171, 185, 189, 190, 191, + 192, 203, 204, 205, 207, 210, 211, 212, 213, 214, + 217, 219, 220, 221, 223, 224, 233, 236, 243, 244, + 245, 246, 247, 248, 250, 253, 254, 255, 256, 263, + 267, 273, 274, 290, 293, 0, 0, 0, 0, 119, + 170, 188, 249, 252, 258, 265, 320, 321, 322, 238, + 175, 176, 0, 0, 0, 0, 0, 231, 201, 167, +} + +var yyPact = [...]int{ + 2961, -1000, -286, 1117, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 1091, 151, -1000, -1000, -1000, -1000, + -1000, -1000, 425, 15290, 124, 240, 2, 21138, 237, 6, + 21501, -1000, 92, -1000, 83, 21501, 87, -9, -1000, -1000, + -20, -30, -1000, 12386, -251, -1000, 575, -1000, -1000, -264, + 20041, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 152, 1067, 1076, 1088, 1117, 1237, -1000, 10187, 179, + 179, 20775, 8351, -1000, -1000, 391, 21501, 231, 21501, -88, + 204, 177, 177, 177, 233, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 228, 21501, 672, 672, 312, -1000, 21501, 176, 672, + 176, 202, 176, 176, -1000, 21501, -1000, 299, -1000, -1000, + -1000, -1000, -1000, -1000, 21501, 672, 1007, 445, 200, 5656, + -1000, 368, -1000, 5656, 116, 5656, -7, 1102, 120, 36, + -1000, 672, -48, -1000, -1000, -1000, -1000, -1000, -1000, 187, + -1000, -1000, 21501, -42, 193, 418, -1000, -1000, -1000, -1000, + -1000, -1000, 806, 673, -1000, 12386, 1877, 734, 734, -1000, + -1000, 266, -1000, -1000, 13838, 13838, 13838, 13838, 13838, 13838, + 13838, 13838, 13838, 14201, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 734, 297, + -1000, 11294, 734, 734, 734, 734, 734, 734, 734, 734, + 12386, 734, 734, 734, 734, 734, 734, 734, 734, 734, + 734, 734, 734, 734, 734, 734, 734, -1000, -1000, -257, + -274, 21501, -1000, 21501, 639, 190, 840, 21501, -1000, 892, + 1091, -1000, 151, -1000, -1000, -1000, 1023, 12386, 12386, 152, + 976, 10187, -1000, -1000, 992, -1000, -1000, -1000, -1000, 537, + 1114, -1000, 14927, 293, 19315, 18226, 21501, 844, 832, -1000, + -1000, 291, 841, 7966, -66, -1000, -1000, -1000, 227, 20404, + -1000, -1000, -1000, 1001, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 824, 21501, -1000, 1735, -1000, 672, + 1060, -21, 21501, 672, 481, 672, 21501, 177, 21501, 5656, + 5656, 5656, 128, 160, 137, 21501, 838, 186, 21501, 1044, + 918, 21501, 176, 672, 672, -1000, 7196, -1000, 5656, 445, + -1000, 637, 12386, 5656, 5656, 5656, 21501, 5656, 5656, -1000, + -1000, -1000, 483, 589, -1000, -1000, -1000, -1000, -1000, 5656, + 5656, -1000, 1113, 437, -1000, -1000, -1000, -1000, 12386, 354, + -1000, 917, -1000, -1000, -52, 86, -1000, 21501, -1000, 1117, + -1000, -1000, -1000, -96, -1000, -1000, 12386, 12386, 12386, 12386, + 592, 542, -1000, 441, 13838, 561, 500, 13838, 13838, 13838, + 13838, 13838, 13838, 13838, 13838, 13838, 13838, 13838, 13838, 13838, + 13838, 13838, 713, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 672, -1000, 161, 879, 879, 320, 320, 320, 320, + 320, 320, 320, 320, 320, 14564, -1000, 8723, 7196, 724, + 810, 1091, 20041, 10187, 10187, 12386, 12386, 10919, 10553, 10187, + 1008, 471, 673, 21501, -1000, -1000, 13475, -1000, -1000, -1000, + -1000, -1000, 638, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 21501, 21501, 10187, 10187, 10187, 10187, 10187, 672, 672, 672, + 755, 616, -1000, -1000, 21501, -1000, 21501, 776, 734, 21501, + 1076, 724, 992, -1000, 1136, 327, 613, 836, -1000, 514, + 1091, 17137, 750, -1000, 992, -1000, -1000, -1000, 21501, 542, + -1000, -1000, 19678, -1000, -1000, 6811, 140, 21501, -1000, 756, + 1164, -1000, -1000, -1000, -1000, 1059, 15665, 16036, 16774, 734, + -1000, -1000, 140, 802, 18226, 21501, -1000, -1000, 18226, 21501, + 6426, 7581, -66, -1000, 829, -1000, -53, -57, 9455, 9089, + 309, -1000, -1000, -1000, -1000, 5271, 324, 685, 485, 5, + -1000, -1000, -1000, 854, -1000, 854, 854, 854, 854, 32, + 32, 32, 32, -1000, -1000, -1000, -1000, -1000, 897, 891, + -1000, 854, 854, 854, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 884, 884, 884, 859, 859, 903, 151, 21501, + 189, 1056, 5656, 1041, 5656, -1000, 21501, 127, -1000, -1000, + -1000, 21501, 21501, 21501, 21501, 21501, 287, 21501, 21501, 834, + -1000, 21501, 5656, 21501, -1000, -1000, 282, -1000, -1000, -1000, + -1000, -1000, 673, -1000, -1000, -1000, -1000, -1000, -1000, 21501, + -1000, -1000, 21501, -1000, 19315, -1000, -1000, 21501, 445, 21501, + 21501, 673, -1000, 636, 21501, -1000, 21501, -1000, 18952, -1000, + -1000, -1000, -1000, 673, 441, 610, 633, -1000, -1000, 601, + -1000, -1000, -1000, 2325, -1000, -1000, -1000, -1000, 561, 13838, + 13838, 13838, 1028, 2325, 2305, 549, 1726, 320, 444, 444, + 333, 333, 333, 333, 333, 467, 467, -1000, -1000, -1000, + 638, -1000, -1000, -1000, 638, 10187, 10187, 820, 734, 278, + -1000, 152, -1000, -1000, -1000, 1076, 804, 804, 546, 642, + 409, 1107, 804, 405, 1104, 804, 804, 10187, -1000, -1000, + 482, -1000, 12386, 638, -1000, 277, -1000, 1423, 833, 831, + 804, 638, 638, 804, 804, -233, -1000, 739, -1000, -1000, + 668, -1000, 415, -1000, -1000, 992, -1000, -1000, 151, 697, + -1000, 1023, -1000, -1000, 964, 12386, 12386, 12386, -1000, -1000, + -1000, 1076, 1079, -1000, 985, 983, 1101, 10187, 18226, 992, + -1000, -1000, -1000, 273, 169, 734, -1000, 21501, 18226, 17863, + 17863, 18226, 17863, -1000, 949, 943, -1000, 933, 931, 1002, + 21501, -1000, 808, 638, 724, 15665, -1000, -1000, 18589, -1000, + -1000, 264, 734, 10187, 1101, 18226, 752, -1000, 752, -1000, + 269, -1000, -1000, 829, -66, -70, -1000, -1000, -1000, -1000, + 673, -1000, -1000, 673, -1000, 733, 828, 4886, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 883, 672, -1000, 1034, 310, + 431, 672, 1033, -1000, -1000, -1000, 1019, -1000, 530, 3, + -1000, -1000, 573, 32, 32, -1000, -1000, 309, 996, 309, + 309, 309, 634, 634, -1000, -1000, -1000, -1000, 571, -1000, + -1000, -1000, 568, -1000, 916, 21501, 1117, 1052, 21501, 151, + -1000, -1000, -1000, -1000, -1000, 447, 447, 351, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 139, + 900, -1000, -1000, -1000, -1000, 69, 126, 185, -1000, 5656, + -1000, 5656, 7196, 437, 445, 878, 437, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 1028, 2325, 2039, -1000, 13838, 13838, -1000, -134, 804, 804, + 10187, 7196, 1091, 1023, -1000, -1000, 292, 713, 292, 13838, + 13838, -1000, 13838, 13838, -1000, -100, 754, 429, -1000, 12386, + 646, -1000, 7196, -1000, 13838, 13838, -1000, -1000, -1000, -1000, + -1000, 630, -1000, -1000, -1000, -1000, -1000, 616, -1000, -1000, + 414, -1000, 879, 10187, 992, 724, 21501, 1049, -1000, 962, + 673, 673, -1000, 1023, 21501, -1000, -1000, -1000, -1000, 1099, + 12386, -1000, 826, -1000, 6041, 52, 734, 1117, 16411, 21501, + 815, -1000, 410, 1164, 882, 915, 960, -1000, -1000, -1000, + -1000, 935, -1000, 929, -1000, -1000, -1000, -1000, -1000, -1000, + 724, -1000, -1000, 230, 219, 208, 21501, 638, 1091, 752, + -281, -281, 340, -1000, -1000, -60, -67, -1000, -1000, -1000, + 5271, -1000, 5271, 21501, 159, -1000, 672, 672, -1000, -1000, + 864, 913, 13838, -1000, -1000, -1000, 676, 309, 309, -1000, + 453, -1000, -1000, -1000, 792, -1000, 790, 822, 774, 21501, + -1000, 151, 1046, 1117, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 21501, -1000, -1000, -1000, -1000, -1000, 21501, -110, 672, + 21501, 21501, 21501, 21501, -1000, -1000, 262, 445, -1000, 445, + 21501, 445, -1000, 13838, 2325, 2325, -1000, 17863, -134, -134, + 638, -1000, 1076, -1000, 638, 854, 854, -1000, 854, 859, + -1000, -1000, 854, 72, 854, 60, 638, 638, 2287, 2074, + 1997, 1893, 734, -95, -1000, 673, 12386, -1000, 1982, 1959, + 619, -1000, 879, -1000, 750, 10187, -1000, -1000, 734, -1000, + 638, -1000, 1096, 1081, 673, -1000, -281, -1000, -1000, 1036, + 10187, 812, -1000, -1000, 9821, 772, 260, 764, -1000, 1101, + 21501, 12386, -1000, -1000, 12386, 855, -1000, 12386, -1000, -1000, + -1000, 734, 734, 734, 764, -1000, 1076, -281, -1000, -1000, + -1000, -1000, -1000, -1000, 4886, -1000, 762, -1000, 854, -1000, + -1000, -1000, 21501, 9, 1135, 2325, -1000, -1000, -1000, -1000, + -1000, 32, 606, 32, 563, -1000, 562, 5656, 1117, 151, + -1000, -1000, -1000, -1000, 1030, -1000, 7196, -1000, -1000, 852, + 901, -1000, -1000, 7196, -1000, -1000, 746, -1000, 2325, 239, + -1000, -1000, -1000, -1000, 1023, -1000, -1000, 180, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 13838, 13838, 13838, 13838, + 13838, 1076, 605, 673, 13838, 13838, -1000, -1000, 1101, 750, + 151, -1000, -1000, 12386, 12386, -1000, 1032, 820, 734, -1000, + 149, 21501, 21501, -1000, 21501, 1091, -1000, 673, 673, 21501, + 673, 17500, 21501, 21501, 16036, -281, -1000, 268, 21501, -1000, + 730, 341, -1000, 88, 309, -1000, 309, 657, 645, -1000, + 1117, 734, 817, -1000, 382, 21501, 21501, -1000, -1000, 638, + -1000, 1091, 1080, -1000, -1000, -1000, 1423, 1423, 1423, 1423, + 174, 638, -1000, 1423, 1423, 1099, 1101, 724, 673, 806, + 1131, -1000, 734, 1117, 252, -1000, -1000, 1076, 701, 697, + -1000, 697, 697, 264, -1000, 268, -1000, 672, 370, 599, + -1000, 156, 534, 1029, -1000, 1027, -1000, -1000, -1000, -1000, + -1000, 138, 7196, 5271, 688, -1000, -1000, -131, 12386, -1000, + -1000, -1000, -1000, 638, 118, -114, -1000, -1000, -1000, 1096, + 1099, -1000, 21501, 812, 21501, -281, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 547, -1000, -1000, 21501, -1000, 578, -1000, + -1000, 680, -1000, 21501, -1000, -1000, 900, -1000, 12020, 11657, + 806, -1000, 957, -106, -125, -1000, 1096, 738, -1000, -1000, + -1000, 850, -1000, -1000, 138, 982, -110, -1000, 13112, -136, + -142, 112, -1000, 13112, -1000, 956, -1000, -1000, 21501, -1000, + 134, -1000, 510, -1000, -1000, -1000, -1000, -1000, 499, -112, + 648, 130, 13112, 13112, -116, 909, 734, -1000, -1000, -127, + 907, -1000, 1111, 12749, -1000, -1000, 1130, 272, 272, 1423, + 638, -1000, -1000, -1000, 166, 570, -1000, -1000, -1000, -1000, + -1000, -1000, +} + +var yyPgo = [...]int{ + 0, 1443, 1442, 54, 87, 83, 1440, 1438, 1435, 141, + 122, 121, 1429, 1427, 1426, 1425, 1424, 1421, 1420, 1419, + 1418, 1417, 1416, 1415, 1414, 1407, 1404, 1402, 1401, 1399, + 1392, 1390, 1388, 1387, 1163, 1386, 1385, 1384, 1383, 1381, + 85, 1380, 52, 1375, 1372, 1370, 1368, 1367, 1361, 1360, + 1359, 1356, 1355, 1354, 46, 296, 50, 17, 40, 72, + 42, 69, 396, 27, 62, 73, 66, 1353, 1352, 39, + 1351, 1350, 89, 1349, 1343, 63, 1342, 1340, 67, 78, + 80, 1339, 14, 29, 1338, 1336, 1335, 1333, 28, 1897, + 1332, 1326, 15, 1324, 1323, 105, 1322, 1321, 77, 11, + 18, 21, 22, 1319, 70, 1318, 9, 1316, 76, 1310, + 1309, 12, 7, 64, 1308, 74, 1306, 68, 23, 1304, + 16, 1303, 86, 38, 1302, 1301, 8, 1300, 1298, 30, + 88, 60, 84, 1296, 98, 1295, 1293, 469, 1292, 908, + 57, 1290, 1289, 119, 41, 1286, 94, 91, 1285, 1282, + 1277, 1275, 31, 931, 2289, 37, 82, 1274, 1273, 1271, + 3220, 45, 61, 25, 1270, 1269, 1263, 43, 431, 49, + 1259, 1257, 36, 1255, 1253, 1252, 1251, 1250, 1241, 1240, + 360, 1238, 1237, 1236, 33, 24, 1235, 1231, 81, 32, + 1225, 1224, 1223, 65, 79, 1222, 1217, 58, 1214, 95, + 59, 1203, 1201, 1200, 1198, 1197, 35, 20, 1194, 19, + 1193, 13, 1192, 34, 1191, 6, 1190, 10, 1189, 3, + 0, 1187, 5, 1186, 1183, 53, 1, 1182, 2, 47, + 1181, 1179, 1178, 101, 26, 1177, 1176, 4, 1171, 1171, + 102, 90, 1165, 1155, 1801, 1288, 100, 1147, 128, +} + +var yyR1 = [...]int{ + 0, 242, 243, 243, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 220, 220, 220, 223, + 224, 20, 3, 3, 3, 3, 2, 2, 8, 4, + 4, 4, 5, 5, 9, 9, 9, 43, 44, 44, + 10, 11, 11, 11, 11, 246, 246, 72, 72, 73, + 73, 122, 122, 12, 13, 13, 132, 132, 131, 131, + 131, 133, 133, 133, 133, 170, 170, 135, 135, 134, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 21, + 39, 39, 33, 33, 33, 33, 36, 36, 36, 36, + 36, 36, 52, 52, 52, 222, 222, 221, 219, 219, + 218, 218, 217, 22, 22, 202, 204, 204, 203, 203, + 203, 203, 194, 173, 173, 173, 173, 176, 176, 174, + 174, 174, 174, 174, 174, 174, 174, 174, 175, 175, + 175, 175, 175, 177, 177, 177, 177, 177, 178, 178, + 178, 178, 178, 178, 178, 178, 178, 178, 178, 178, + 178, 178, 178, 179, 179, 179, 179, 179, 179, 179, + 179, 193, 193, 180, 180, 188, 188, 189, 189, 189, + 186, 186, 187, 187, 190, 190, 190, 182, 182, 183, + 183, 191, 191, 184, 184, 184, 185, 185, 185, 192, + 192, 192, 192, 192, 181, 181, 195, 195, 212, 212, + 211, 211, 211, 35, 201, 201, 208, 208, 208, 208, + 208, 198, 198, 198, 199, 199, 197, 197, 200, 200, + 210, 210, 209, 196, 196, 213, 213, 213, 213, 227, + 228, 226, 226, 226, 226, 226, 205, 205, 205, 206, + 206, 206, 207, 207, 207, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 225, 225, 225, 225, 225, 225, 225, 225, + 225, 225, 225, 225, 225, 225, 216, 214, 214, 215, + 215, 16, 23, 23, 17, 17, 17, 17, 17, 17, + 18, 18, 24, 25, 25, 25, 25, 25, 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 25, 25, 25, 25, 141, 141, 143, 143, 139, 139, + 142, 142, 140, 140, 140, 144, 144, 144, 145, 145, + 171, 171, 171, 26, 26, 28, 28, 29, 30, 30, + 165, 165, 166, 166, 31, 32, 45, 45, 45, 45, + 45, 45, 47, 47, 47, 7, 7, 7, 7, 46, + 46, 46, 6, 6, 27, 27, 27, 27, 19, 247, + 40, 41, 41, 42, 42, 42, 49, 49, 49, 48, + 48, 48, 56, 56, 60, 60, 60, 60, 60, 61, + 61, 61, 61, 61, 61, 55, 55, 59, 59, 59, + 59, 59, 157, 157, 157, 156, 156, 63, 63, 64, + 64, 65, 65, 65, 68, 66, 66, 66, 66, 105, + 81, 81, 121, 121, 120, 120, 123, 123, 67, 67, + 67, 67, 69, 69, 70, 70, 71, 71, 164, 164, + 163, 163, 163, 162, 162, 74, 74, 74, 76, 75, + 75, 75, 75, 77, 77, 79, 79, 78, 78, 78, + 78, 80, 82, 82, 82, 82, 82, 83, 83, 62, + 62, 62, 62, 62, 62, 62, 62, 62, 138, 138, + 85, 85, 84, 84, 84, 84, 84, 84, 84, 84, + 84, 84, 97, 97, 97, 97, 97, 97, 86, 86, + 86, 86, 86, 86, 86, 54, 54, 98, 98, 98, + 233, 233, 104, 99, 99, 89, 89, 89, 89, 89, + 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, + 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, + 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, + 96, 53, 53, 93, 93, 93, 93, 91, 91, 91, + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, + 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, + 92, 92, 92, 92, 92, 92, 248, 248, 95, 94, + 94, 94, 94, 94, 94, 94, 50, 50, 50, 50, + 50, 234, 234, 234, 234, 235, 238, 238, 236, 236, + 236, 236, 236, 237, 237, 237, 237, 237, 239, 239, + 239, 240, 240, 241, 241, 169, 169, 172, 172, 172, + 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, + 172, 109, 109, 51, 51, 107, 107, 108, 110, 110, + 106, 106, 106, 88, 88, 88, 88, 88, 88, 88, + 88, 90, 90, 90, 111, 111, 112, 112, 113, 113, + 114, 114, 115, 116, 116, 116, 117, 117, 117, 117, + 118, 118, 118, 87, 87, 87, 87, 119, 119, 119, + 119, 124, 124, 125, 57, 57, 58, 100, 100, 102, + 102, 101, 103, 126, 126, 129, 127, 127, 127, 130, + 130, 130, 130, 130, 130, 130, 128, 128, 128, 159, + 159, 159, 136, 136, 146, 146, 147, 147, 137, 137, + 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, + 149, 149, 149, 150, 150, 151, 151, 151, 158, 158, + 154, 154, 155, 155, 160, 160, 161, 161, 229, 230, + 231, 231, 232, 232, 232, 34, 37, 37, 38, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 244, 245, + 167, 168, 168, 168, +} + +var yyR2 = [...]int{ + 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, + 1, 2, 4, 8, 6, 7, 4, 6, 5, 8, + 10, 11, 1, 3, 8, 7, 7, 1, 1, 1, + 11, 9, 8, 7, 7, 1, 1, 1, 3, 1, + 3, 0, 4, 3, 5, 4, 1, 3, 3, 2, + 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, + 2, 2, 8, 5, 7, 6, 8, 5, 5, 4, + 0, 1, 4, 5, 7, 4, 5, 5, 4, 4, + 4, 3, 1, 1, 1, 0, 2, 1, 0, 2, + 1, 3, 3, 4, 5, 4, 2, 4, 1, 3, + 3, 3, 8, 3, 1, 1, 1, 2, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, + 2, 2, 2, 1, 2, 2, 2, 1, 4, 4, + 2, 2, 3, 3, 3, 3, 1, 1, 1, 1, + 1, 6, 6, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 3, 0, 3, 0, 5, 0, 3, 5, + 0, 1, 0, 1, 0, 1, 2, 0, 2, 0, + 3, 0, 1, 0, 3, 3, 0, 2, 2, 0, + 2, 1, 2, 1, 0, 2, 5, 4, 1, 2, + 2, 3, 2, 2, 0, 1, 2, 3, 3, 2, + 2, 1, 1, 1, 1, 1, 1, 1, 0, 1, + 1, 3, 2, 3, 1, 10, 11, 11, 12, 3, + 3, 1, 1, 2, 2, 2, 0, 1, 3, 1, + 2, 3, 1, 1, 1, 6, 7, 7, 7, 7, + 4, 5, 4, 4, 7, 5, 5, 5, 12, 7, + 5, 9, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 7, 1, 3, 8, + 8, 3, 3, 5, 4, 6, 5, 6, 4, 4, + 3, 2, 3, 4, 4, 3, 4, 4, 4, 4, + 4, 4, 3, 2, 7, 2, 3, 4, 3, 7, + 5, 4, 2, 4, 4, 3, 3, 5, 2, 6, + 4, 3, 7, 4, 1, 1, 0, 1, 0, 1, + 1, 1, 0, 2, 2, 0, 2, 2, 0, 2, + 0, 1, 1, 2, 1, 1, 2, 1, 1, 5, + 0, 1, 0, 1, 2, 3, 0, 3, 3, 3, + 3, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 1, 1, 5, 3, 2, 2, 3, 3, 2, 0, + 2, 0, 2, 1, 2, 2, 0, 1, 1, 0, + 1, 1, 0, 1, 0, 1, 2, 3, 4, 1, + 1, 1, 1, 1, 1, 1, 3, 1, 2, 2, + 3, 5, 0, 1, 2, 1, 1, 0, 2, 1, + 3, 1, 1, 1, 2, 1, 3, 3, 3, 3, + 3, 7, 0, 3, 1, 3, 1, 3, 4, 4, + 4, 3, 2, 4, 0, 1, 0, 2, 0, 1, + 0, 1, 2, 1, 1, 1, 2, 2, 1, 2, + 3, 2, 3, 2, 2, 2, 1, 1, 3, 5, + 7, 3, 0, 5, 4, 5, 5, 0, 2, 1, + 3, 3, 3, 2, 3, 2, 1, 2, 0, 3, + 1, 1, 3, 3, 4, 4, 5, 3, 4, 5, + 6, 2, 1, 2, 1, 2, 1, 2, 1, 1, + 1, 1, 1, 1, 1, 0, 2, 1, 1, 1, + 1, 2, 3, 1, 3, 1, 1, 1, 1, 1, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, + 4, 1, 1, 5, 6, 6, 6, 4, 4, 6, + 6, 6, 8, 8, 8, 8, 9, 8, 5, 4, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 8, 8, 0, 2, 3, 4, + 4, 4, 4, 4, 4, 4, 0, 3, 4, 7, + 3, 0, 4, 3, 2, 3, 0, 3, 0, 2, + 5, 2, 5, 2, 2, 2, 2, 2, 0, 2, + 3, 1, 3, 5, 8, 1, 1, 2, 3, 3, + 1, 2, 2, 1, 1, 2, 1, 2, 2, 1, + 2, 0, 1, 0, 2, 1, 2, 4, 0, 2, + 1, 3, 5, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 2, 2, 0, 3, 0, 2, 0, 3, + 1, 3, 2, 0, 1, 1, 0, 2, 4, 4, + 0, 2, 4, 2, 1, 5, 4, 1, 3, 3, + 5, 0, 1, 5, 0, 1, 2, 1, 3, 1, + 2, 3, 1, 1, 3, 3, 1, 2, 3, 3, + 3, 3, 3, 3, 3, 3, 1, 2, 1, 1, + 1, 1, 1, 1, 0, 2, 0, 3, 0, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 1, 1, 1, 1, 0, 1, 1, 0, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, + 0, 1, 0, 1, 3, 5, 2, 3, 3, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 1, 1, +} + +var yyChk = [...]int{ + -1000, -242, -1, -3, -8, -9, -10, -11, -12, -13, + -14, -15, -16, -17, -18, -24, -25, -26, -28, -29, + -30, -31, -32, -6, -27, -19, -20, -33, -36, -34, + -35, -37, -38, -21, -4, -244, 6, 7, -43, -44, + 10, 30, -22, 123, 124, 126, 125, 159, 127, 152, + 53, 173, 174, 176, 177, 178, 179, -47, 157, 158, + 31, 32, 129, 34, -39, 385, 374, 386, 387, 378, + 275, 57, 8, 9, 262, 154, 153, 25, 388, -243, + 390, -42, 5, -113, 15, -3, -40, -247, -40, -40, + -40, -40, -40, -202, -204, 57, 96, -151, 133, 77, + -134, 254, 130, 131, -135, 137, -154, 375, 376, 377, + -220, -153, 60, 61, 62, 272, 145, 311, 312, 368, + 173, 187, 181, 208, 200, 273, 313, 146, 198, 201, + 241, 143, 314, 228, 235, 71, 176, 250, 315, 155, + 196, 192, 29, 316, 290, 190, 27, 317, 237, 213, + 318, 277, 239, 191, 236, 129, 283, 148, 141, 319, + 214, 218, 320, 242, 321, 322, 323, 388, 185, 186, + 369, 324, 144, 244, 212, 379, 380, 28, 142, 33, + 274, 37, 163, 245, 216, 325, 211, 207, 370, 326, + 327, 328, 329, 210, 184, 206, 41, 220, 219, 221, + 240, 387, 203, 330, 331, 332, 149, 333, 193, 18, + 334, 335, 336, 337, 338, 248, 158, 339, 161, 340, + 341, 342, 282, 343, 344, 238, 215, 217, 138, 165, + 234, 386, 276, 345, 246, 189, 346, 150, 378, 162, + 157, 249, 151, 347, 348, 349, 350, 351, 352, 371, + 353, 177, 372, 354, 355, 356, 357, 172, 373, 243, + 252, 40, 225, 358, 183, 374, 140, 359, 174, 169, + 230, 204, 164, 360, 361, 194, 195, 209, 182, 205, + 175, 166, 159, 281, 251, 226, 287, 202, 199, 170, + 362, 167, 168, 363, 231, 232, 171, 275, 247, 197, + 227, -137, 133, 254, 130, 232, 135, 131, 131, 132, + 133, -134, 254, 130, 375, 131, -78, -160, -220, -153, + 375, 376, 377, 133, 131, 114, 201, 241, 123, 229, + 237, -143, 238, 165, -171, 131, -139, 228, 231, 232, + 171, 368, 175, 239, 243, 242, 233, -160, 175, -165, + 180, -154, 178, -139, -45, 364, 127, -167, -167, 230, + 230, -167, -99, -62, -84, 80, -89, 29, 23, -88, + -85, -106, -103, -104, 114, 115, 117, 116, 118, 103, + 104, 111, 81, 119, -93, -91, -92, -94, 64, 63, + 72, 65, 66, 67, 68, 73, 74, 75, -154, -160, + -101, -244, 47, 48, 263, 264, 265, 266, 271, 267, + 83, 36, 253, 261, 260, 259, 257, 258, 255, 256, + 269, 270, 136, 254, 130, 109, 262, -220, -153, 368, + 382, -40, 65, -40, -40, 375, -240, 286, -241, -160, + -5, -4, -244, 6, 20, 21, -117, 17, 16, -42, + -49, -60, 42, 43, -61, 21, 35, 46, 44, -41, + -59, 105, -62, -160, -137, -137, 11, -72, -73, -78, + -80, -160, -127, -170, 175, -130, 243, 242, -155, -128, + -154, -152, 241, 201, 240, 128, 288, 79, 22, 24, + 223, 82, 114, 16, 83, 113, 263, 123, 51, 289, + 255, 256, 253, 265, 266, 254, 229, 10, 291, 25, + 153, 21, 35, 107, 125, 86, 87, 156, 385, 23, + 154, 75, 294, 19, 54, 11, 13, 295, 296, 14, + 136, 135, 98, 132, 49, 8, 119, 26, 95, 45, + 297, 298, 299, 300, 301, 47, 96, 17, 257, 258, + 31, 302, 271, 160, 109, 52, 38, 80, 303, 304, + 73, 305, 76, 55, 77, 15, 50, 278, 306, 307, + 286, 97, 126, 262, 389, 48, 308, 130, 6, 268, + 30, 152, 46, 309, 131, 85, 269, 270, 134, 74, + 5, 137, 32, 9, 53, 56, 259, 260, 261, 36, + 84, 12, 310, 78, -203, 96, -194, -220, -78, 132, + -78, 262, 133, -147, 136, -147, -147, 131, 131, -78, + -220, -220, 123, 125, 128, 55, -23, -78, -146, 136, + -220, -146, 133, -146, -146, -78, 120, -78, -220, 30, + -144, 96, 12, 254, -220, 165, 131, 166, 133, -168, + -244, -155, -198, -220, 8, 132, 33, 144, -168, 169, + 170, -168, -142, -141, 235, 236, 230, 234, 12, 170, + 230, 168, -200, -220, 244, 134, -154, -143, -7, -3, + -10, -9, -11, 88, -167, -167, 58, 79, 77, 78, + 95, -233, 69, -62, -86, 98, 80, 96, 97, 82, + 100, 99, 110, 103, 104, 105, 106, 107, 108, 109, + 101, 102, 113, 88, 89, 90, 91, 92, 93, 94, + -138, -244, -104, -244, 121, 122, -89, -89, -89, -89, + -89, -89, -89, -89, -89, -89, 64, -244, 120, -2, + -99, -4, 275, -244, -244, -244, -244, -244, -244, -244, + -244, -109, -62, -244, -248, -95, -244, -248, -95, -248, + -95, -248, -244, -248, -95, -248, -95, -248, -248, -95, + -244, -244, -244, -244, -244, -244, -244, 370, 371, 383, + 384, -78, -78, 64, 133, 6, 58, -240, 22, -244, + -113, -3, -40, -118, 19, 31, -62, -114, -115, -62, + -5, 38, -55, -59, -61, 42, 43, 70, 11, -233, + -157, -156, 22, -154, 64, 120, -79, 26, -78, -64, + -65, -66, -67, -68, -81, -105, -244, -96, -78, -53, + 379, 380, -78, -72, -246, 58, 11, 56, -246, 58, + 120, 58, 175, -130, -132, -131, 244, 246, 88, 134, + -159, -154, 64, 29, 30, 59, 58, -78, -173, -176, + -178, -177, -179, -174, -175, 198, 199, 114, 202, 204, + 205, 206, 207, 208, 209, 210, 211, 212, 213, 30, + 155, 194, 195, 196, 197, 214, 215, 216, 217, 218, + 219, 220, 221, 181, 200, 273, 182, 183, 184, 185, + 186, 187, 189, 190, 191, 192, 193, -220, 22, 133, + -134, -78, -220, 80, -220, -78, -147, -78, -168, -168, + -168, 167, 167, 131, 131, 172, -78, 58, 134, -72, + 23, 55, -78, -146, -220, -220, -161, -160, -152, -168, + -144, 64, -62, -168, -168, -168, -78, -168, -168, -199, + 11, 98, -199, -144, -40, -168, -168, 11, -140, 11, + 98, -62, -145, 96, 55, 245, -166, 178, -78, 212, + 365, 366, 367, -62, -62, -62, -62, -97, 73, 80, + 74, 75, 69, -89, -98, -101, -104, 69, 98, 96, + 97, 82, -89, -89, -89, -89, -89, -89, -89, -89, + -89, -89, -89, -89, -89, -89, -89, -169, -220, 64, + -220, -88, -88, -154, -56, 21, 35, -55, -155, -161, + -152, -42, -245, 59, -245, -113, -55, -55, -62, -62, + -106, 64, -55, -106, 64, -55, -55, -48, 21, 35, + -107, -108, 84, -106, -154, -160, -245, -89, -154, -154, + -55, -56, -56, -55, -55, -200, -200, -220, -220, 64, + -232, -229, -223, 61, -78, -40, -241, 6, -244, -120, + -154, -117, -245, 9, 98, 58, 18, 58, -116, 24, + 25, -113, -90, -154, 65, 68, -63, 58, 11, -61, + -78, -156, 105, -161, -122, 161, -78, 30, 58, -74, + -76, -75, -77, 45, 49, 51, 46, 47, 48, 52, + -164, 22, -64, -34, -3, -244, -163, -162, 22, -160, + 64, -163, 161, -244, -122, 56, -64, -78, -64, -80, + -160, 105, -130, -132, 58, 245, 247, 248, 55, 76, + -62, 55, 76, -62, -185, 113, -205, -206, -207, -155, + 64, 65, -194, -195, -196, -208, 147, -213, 138, 140, + 137, -197, 148, 132, 28, 59, -190, 73, 80, -186, + 226, -180, 57, -180, -180, -180, -180, -184, 201, -184, + -184, -184, 57, 57, -180, -180, -180, -188, 57, -188, + -188, -189, 57, -189, -158, 56, -3, -78, 133, 22, + -168, 23, -168, -78, -148, 128, 125, 126, -216, 124, + 223, 201, 71, 29, 15, 263, 161, 287, -220, 162, + -78, -78, -78, -78, -78, 128, 125, -78, -78, -78, + -168, -78, 120, -78, -78, -79, -78, -144, -160, -160, + 64, -78, -154, -46, -154, 64, 73, 74, 75, -98, + -89, -89, -89, -54, 156, 79, -245, -245, -55, -55, + -244, 120, -5, -117, -245, -245, 58, 56, 22, 11, + 11, -245, 11, 11, -245, -245, -55, -110, -108, 86, + -62, -245, 120, -245, 58, 58, -245, -245, -245, -245, + -245, -52, 369, 373, 372, -220, 64, 58, -231, -230, + -224, 62, 88, -60, -40, -3, 58, -245, -118, 40, + -62, -62, -115, -117, -136, 19, 11, 36, 36, -83, + 12, -59, -64, -61, 120, -87, 36, -3, -244, -244, + -126, -129, -106, -65, -66, -66, -65, -66, 45, 45, + 45, 50, 45, 50, 45, -75, -160, -245, -245, -245, + -3, -162, -82, 53, 135, 54, -244, -56, -83, -64, + -83, -83, 120, -131, -133, 249, 246, 252, -220, 64, + 58, -207, 88, 57, -220, 28, -197, -197, -200, -200, + 28, -182, 29, 73, -187, 227, 65, -184, -184, -185, + 30, -185, -185, -185, -193, 64, -193, 65, 65, 55, + -154, 22, -78, -3, -167, -225, 143, 139, 147, 148, + 141, 60, 61, 62, 132, 28, 138, 140, 161, 137, + -225, -149, -150, 134, 22, 132, 28, 161, -222, 56, + 167, 223, 167, 134, -168, -168, -161, -140, -144, -121, + 57, -140, -54, 79, -89, -89, -234, 278, -245, -245, + -56, -155, -113, -118, -172, 114, 198, 155, 196, 192, + 189, 212, 203, 225, 194, 226, -169, -172, -89, -89, + -89, -89, 272, -113, 87, -62, 85, -155, -89, -89, + 64, -229, 88, -88, -55, -60, -245, -154, 22, 41, + -118, -78, -111, 13, -62, 105, -125, -58, -124, 55, + 389, -100, -102, -101, -244, -119, -154, -123, -154, -63, + 58, 88, -70, -69, 55, 56, -71, 55, -69, 45, + 45, 132, 132, 132, -123, -245, -113, -83, -57, -58, + -57, 246, 250, 251, -206, -207, -210, -209, -154, -213, + -200, -200, 57, -183, 55, -89, 59, -185, -185, -220, + 114, 59, 58, 59, 58, 59, 58, -78, -3, 22, + -167, -167, -78, -167, -154, -219, 275, -221, -220, -154, + -154, -154, -78, 120, -144, -144, -120, -144, -89, -244, + -154, -234, -234, -245, -117, -245, -180, -180, -180, -189, + -180, 186, -180, 186, -245, -245, 19, 19, 19, 19, + -244, -51, 268, -62, 58, 58, 64, -88, -63, -55, + -244, -245, -112, 14, 16, -57, 27, -55, 58, -245, + -245, 58, 120, -245, 58, -83, -129, -62, -62, 57, + -62, -244, -244, -244, -245, -117, -57, 59, 58, -180, + -120, -191, 223, 9, -184, 64, -184, 65, 65, -168, + -3, 26, -218, -217, -155, 57, 56, -161, 59, -235, + -245, -238, 161, -118, -184, -220, -89, -89, -89, -89, + -89, -117, 64, -89, -89, -83, -63, -3, -62, -99, + 28, -102, 36, -3, -154, -154, -154, -113, -120, -120, + -245, -120, -120, -163, -57, -212, -211, 56, 142, 71, + -209, 59, -192, 138, 28, 137, -92, -185, -185, 59, + 59, -244, 58, 88, -120, -78, -245, -113, 16, -245, + -245, -245, -245, -50, 98, 275, -245, -245, -245, -111, + -83, -245, 9, -100, 120, -117, 59, -245, -245, -245, + -82, -211, -220, -201, 88, 64, 150, -181, 71, 28, + 28, -214, -215, 161, -217, -207, 59, -236, 279, 280, + -99, -245, 273, 52, 276, -112, -111, -126, -154, -57, + 65, -78, 64, -245, 58, -154, -222, -237, 82, 281, + 284, -89, -237, 82, 41, 274, 277, -112, 57, -215, + 36, -219, -237, 282, 283, 285, 282, 283, -237, 41, + -120, 163, 79, 79, 275, 59, 164, -237, -237, 276, + -227, -228, 55, -244, 277, -228, 55, 10, 9, -89, + 160, -226, 151, 146, 149, 30, -226, -245, -245, 145, + 29, 73, +} + +var yyDef = [...]int{ + -2, -2, 2, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 698, 0, 399, 399, 399, 399, + 399, 399, 0, 775, 758, 0, 0, 0, 0, -2, + 364, 365, 0, 367, -2, 0, 0, -2, 1110, 1110, + 0, 0, 1110, 0, 0, 399, 0, 399, 399, 0, + 0, 1108, 57, 58, 59, 382, 383, 384, 101, 1, + 3, 0, 403, 706, 0, 0, -2, 401, 0, 758, + 758, 0, 0, 90, 91, 0, 0, 0, 1097, 0, + 0, 756, 756, 756, 0, 776, 777, -2, -2, -2, + 780, 781, 36, 37, 38, 918, 919, 920, 921, 922, + 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, + 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, + 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, + 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, + 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, + 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, + 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, + 993, 994, 995, 997, 998, 999, 1000, 1001, 1002, 1003, + 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, + 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, + 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, + 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, + 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, + 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, + 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, + 1074, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, + 1096, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, + 1107, 0, 0, 0, 0, 0, 759, 0, 754, 0, + 754, 0, 754, 754, 89, 0, 311, 487, 784, 785, + 996, 1075, 1076, 1097, 0, 0, 0, 355, 0, 1111, + 323, 0, 325, 1111, 0, 1111, 0, 332, 0, 0, + 338, 238, 0, 347, 361, 362, 349, 363, 366, 0, + 371, 374, 0, 346, 0, 0, 381, 394, 395, 1110, + 1110, 398, 41, 543, 499, 0, 506, -2, 0, 545, + 546, 547, 548, 549, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 576, 577, 578, 579, 683, 684, + 685, 686, 687, 688, 689, 690, 510, 511, 680, 0, + 732, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 671, 0, 616, 616, 616, 616, 616, 616, 616, 616, + 0, 0, 0, 0, 0, 0, 0, -2, -2, 0, + 0, 0, 223, 796, 0, 0, 0, 0, 651, 0, + 698, 52, 0, 399, 404, 405, 710, 0, 0, 0, + 0, 0, -2, -2, 415, 421, 422, 423, 424, 400, + 0, 427, 432, 0, 0, 0, 0, 0, 0, 67, + 69, 487, 73, 0, 1086, 736, -2, -2, 0, 0, + 782, 783, -2, 932, -2, 799, 800, 801, 802, 803, + 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, + 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, + 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, + 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, + 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, + 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, + 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, + 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, + 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, + 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, + 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, + 914, 915, 916, 917, 0, 0, 128, 0, 126, 0, + 0, 0, 0, 0, 0, 0, 0, 756, 0, 1111, + 1111, 1111, 0, 0, 0, 0, 301, 0, 0, 0, + 0, 0, 754, 0, 0, 310, 0, 312, 1111, 355, + 315, 0, 0, 1111, 1111, 1111, 0, 1111, 1111, 322, + 1112, 1113, 0, 355, 399, 231, 232, 233, 326, 1111, + 1111, 328, 0, 352, 350, 351, 344, 345, 0, 358, + 335, 336, 341, 239, 0, 372, 375, 0, 393, 385, + 386, 387, 388, 0, 396, 397, 0, 0, 0, 0, + 0, 505, 540, 503, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 528, 529, 530, 531, 532, 533, 534, + 507, 0, 521, 0, 0, 0, 565, 566, 567, 568, + 569, 570, 571, 572, 573, 0, 575, 412, 0, 0, + 0, 698, 1104, 0, 0, 0, 0, 0, 0, 0, + 409, 0, 672, 0, 600, 608, 0, 601, 609, 602, + 610, 603, 0, 604, 611, 605, 612, 606, 607, 613, + 0, 0, 0, 412, 412, 0, 0, 238, 238, 0, + 111, 792, 797, 798, 0, 399, 0, 0, 0, 0, + 706, 0, 414, 42, 0, 0, 707, 699, 700, 703, + 698, 0, 437, 425, 416, 419, 420, 402, 0, -2, + 429, 433, 0, 435, 436, 0, 71, 0, 486, 0, + 439, 441, 442, 443, 445, 468, 0, 470, 470, 0, + -2, -2, -2, 0, 0, 0, 65, 66, 0, 0, + 0, 0, 1086, 737, 75, 76, 0, 0, 0, 0, + 206, 749, 750, 751, 747, 256, 0, 0, 194, 190, + 134, 135, 136, 183, 138, 183, 183, 183, 183, 203, + 203, 203, 203, 166, 167, 168, 169, 170, 0, 0, + 153, 183, 183, 183, 157, 173, 174, 175, 176, 177, + 178, 179, 180, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 185, 185, 185, 187, 187, 778, 0, 0, + 0, 0, 1111, 0, 1111, 123, 0, 0, 270, 272, + 273, 0, 0, 0, 0, 0, 0, 0, 0, 304, + 755, 0, 1111, 0, 308, 309, 488, 786, 787, 313, + 314, 356, 357, 316, 317, 318, 319, 320, 321, 0, + 234, 235, 0, 340, 0, 327, 331, 0, 355, 0, + 0, 333, 334, 0, 0, 343, 0, 373, 389, 377, + 378, 379, 380, 544, 500, 501, 502, 504, 522, 0, + 524, 526, 541, 512, 513, 537, 538, 539, 0, 0, + 0, 0, 535, 517, 0, 550, 551, 552, 553, 554, + 555, 556, 557, 558, 559, 560, 561, 564, 655, 656, + 0, 562, 563, 574, 0, 0, 0, 413, 681, 0, + -2, 0, 542, 1109, 731, 706, 0, 0, 0, 0, + 547, 683, 0, 547, 683, 0, 0, 0, 410, 411, + 678, 675, 0, 0, 680, 0, 617, 0, 0, 0, + 0, 0, 0, 0, 0, 102, 105, 108, 109, 110, + 790, 793, 0, 39, 99, 414, 652, 399, 0, 0, + 454, 710, 53, 711, 0, 0, 0, 0, 702, 704, + 705, 706, 0, 691, 0, 0, 497, 0, 0, 417, + 48, 434, 430, 0, 0, 0, 485, 0, 0, 0, + 0, 0, 0, 475, 0, 0, 478, 0, 0, 0, + 0, 469, 0, 0, 0, 0, 444, 471, 0, 473, + 474, 492, 1022, 412, 497, 0, 497, 68, 497, 70, + 0, 491, 738, 74, 0, 0, 79, 80, 739, 740, + 741, 742, 743, 744, 745, 0, 125, 257, 259, 262, + 263, 264, 129, 130, 131, 0, 0, 244, 0, 0, + 238, 238, 0, 236, 237, 127, 197, 195, 0, 192, + 191, 137, 0, 203, 203, 160, 161, 206, 0, 206, + 206, 206, 0, 0, 154, 155, 156, 148, 0, 149, + 150, 151, 0, 152, 0, 0, 93, 0, 0, 0, + 97, 757, 98, 124, 1110, 0, 0, 770, 271, 760, + 761, 762, 763, 764, 765, 766, 767, 768, 769, 0, + 115, 275, 277, 276, 280, 0, 0, 0, 302, 1111, + 306, 1111, 0, 352, 355, 452, 352, 330, 353, 354, + 359, 337, 369, 392, 390, 391, 523, 525, 527, 514, + 535, 518, 0, 515, 0, 0, 509, 631, 0, 0, + 412, 0, 698, 710, 587, 588, 0, 0, 0, 0, + 0, 624, 0, 0, 625, 0, 698, 0, 676, 0, + 0, 599, 0, 618, 0, 0, 619, 620, 621, 622, + 623, 103, 112, 113, 114, 106, 107, 0, 795, 791, + 0, 40, 0, 0, 414, 0, 0, 0, 44, 0, + 708, 709, 701, 710, 0, 752, 753, 692, 693, 694, + 0, 426, 438, 418, 0, 721, 0, 714, 0, 0, + 437, 733, 0, 440, 464, 466, 0, 461, 476, 477, + 479, 0, 481, 0, 483, 484, 446, 447, 448, 449, + 0, 472, 450, 0, 0, 0, 0, 0, 698, 497, + 724, 724, 0, 77, 78, 0, 0, 84, 207, 208, + 0, 260, 0, 0, 0, 226, 238, 238, 229, 230, + 0, 199, 0, 196, 133, 193, 0, 206, 206, 162, + 0, 163, 164, 165, 0, 181, 0, 0, 0, 0, + 779, 0, 0, 95, 265, 1110, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, + 1110, 0, 1110, 771, 772, 773, 774, 0, 118, 0, + 0, 0, 0, 0, 305, 307, 489, 355, 339, 355, + 0, 355, 516, 0, 536, 519, 583, 0, 631, 631, + 0, 682, 706, 46, 0, 183, 183, 660, 183, 187, + 663, 664, 183, 666, 183, 669, 0, 0, 0, 0, + 0, 0, 0, 673, 598, 679, 0, 681, 0, 0, + 0, 794, 0, 788, 437, 0, 653, 455, 0, 712, + 44, 45, 696, 0, 498, 431, 722, 55, 56, 0, + 0, 713, 727, 729, 0, 0, 717, 0, 456, 497, + 0, 0, 458, 465, 0, 0, 459, 0, 460, 480, + 482, 0, 0, 0, 0, 580, 706, 724, 63, 725, + 64, 81, 82, 83, 258, 261, 0, 240, 183, 243, + 227, 228, 0, 201, 0, 198, 184, 158, 159, 204, + 205, 203, 0, 203, 0, 188, 0, 1111, 94, 0, + 266, 267, 268, 269, 0, 274, 0, 116, 117, 0, + 0, 279, 303, 0, 324, 342, 0, 329, 520, 636, + 634, 584, 585, 586, 710, 589, 657, 203, 661, 662, + 665, 667, 668, 670, 591, 590, 0, 0, 0, 0, + 0, 706, 0, 677, 0, 0, 104, 789, 497, 437, + 0, 43, 49, 0, 0, 54, 0, 726, 0, 730, + 0, 0, 0, 72, 0, 698, 734, 735, 462, 0, + 467, 0, 0, 0, 470, 724, 62, 217, 0, 242, + 0, 209, 202, 0, 206, 182, 206, 0, 0, 92, + 96, 0, 119, 120, 0, 0, 0, 490, 453, 0, + 633, 698, 0, 47, 658, 659, 0, 0, 0, 0, + 626, 0, 674, 0, 0, 694, 497, 0, 697, 695, + 0, 728, 0, 716, 719, 718, 457, 706, 0, 0, + 494, 0, 0, 492, 61, 216, 218, 0, 224, 0, + 241, 0, 214, 0, 211, 213, 200, 171, 172, 186, + 189, 0, 0, 0, 0, 281, 632, 638, 0, 592, + 594, 593, 595, 0, 0, 0, 597, 614, 615, 696, + 694, 654, 0, 715, 0, 724, 463, 493, 495, 496, + 451, 219, 220, 0, 225, 222, 0, 132, 0, 210, + 212, 0, 297, 0, 121, 122, 115, 635, 0, 0, + 637, 596, 0, 0, 0, 50, 696, 723, 720, 60, + 221, 0, 215, 296, 0, 0, 118, 639, 0, 1089, + 0, 0, 641, 0, 627, 0, 630, 51, 0, 298, + 0, 278, 0, 643, 644, 645, 646, 647, 0, 628, + 0, 0, 0, 0, 0, 245, 0, 640, 642, 0, + 246, 247, 0, 0, 629, 248, 0, 0, 0, 0, + 0, 249, 251, 252, 0, 0, 250, 299, 300, 253, + 254, 255, +} + +var yyTok1 = [...]int{ + 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 81, 3, 3, 3, 108, 100, 3, + 57, 59, 105, 103, 58, 104, 120, 106, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 390, + 89, 88, 90, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 110, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 99, 3, 111, +} + +var yyTok2 = [...]int{ + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 82, 83, 84, 85, + 86, 87, 91, 92, 93, 94, 95, 96, 97, 98, + 101, 102, 107, 109, 112, 113, 114, 115, 116, 117, + 118, 119, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, + 269, 270, 271, 272, 273, 274, +} + +var yyTok3 = [...]int{ + 57600, 275, 57601, 276, 57602, 277, 57603, 278, 57604, 279, + 57605, 280, 57606, 281, 57607, 282, 57608, 283, 57609, 284, + 57610, 285, 57611, 286, 57612, 287, 57613, 288, 57614, 289, + 57615, 290, 57616, 291, 57617, 292, 57618, 293, 57619, 294, + 57620, 295, 57621, 296, 57622, 297, 57623, 298, 57624, 299, + 57625, 300, 57626, 301, 57627, 302, 57628, 303, 57629, 304, + 57630, 305, 57631, 306, 57632, 307, 57633, 308, 57634, 309, + 57635, 310, 57636, 311, 57637, 312, 57638, 313, 57639, 314, + 57640, 315, 57641, 316, 57642, 317, 57643, 318, 57644, 319, + 57645, 320, 57646, 321, 57647, 322, 57648, 323, 57649, 324, + 57650, 325, 57651, 326, 57652, 327, 57653, 328, 57654, 329, + 57655, 330, 57656, 331, 57657, 332, 57658, 333, 57659, 334, + 57660, 335, 57661, 336, 57662, 337, 57663, 338, 57664, 339, + 57665, 340, 57666, 341, 57667, 342, 57668, 343, 57669, 344, + 57670, 345, 57671, 346, 57672, 347, 57673, 348, 57674, 349, + 57675, 350, 57676, 351, 57677, 352, 57678, 353, 57679, 354, + 57680, 355, 57681, 356, 57682, 357, 57683, 358, 57684, 359, + 57685, 360, 57686, 361, 57687, 362, 57688, 363, 57689, 364, + 57690, 365, 57691, 366, 57692, 367, 57693, 368, 57694, 369, + 57695, 370, 57696, 371, 57697, 372, 57698, 373, 57699, 374, + 57700, 375, 57701, 376, 57702, 377, 57703, 378, 57704, 379, + 57705, 380, 57706, 381, 57707, 382, 57708, 383, 57709, 384, + 57710, 385, 57711, 386, 57712, 387, 57713, 388, 57714, 389, + 0, +} + +var yyErrorMessages = [...]struct { + state int + token int + msg string +}{} + +//line yaccpar:1 + +/* parser for yacc output */ + +var ( + yyDebug = 0 + yyErrorVerbose = false +) + +type yyLexer interface { + Lex(lval *yySymType) int + Error(s string) +} + +type yyParser interface { + Parse(yyLexer) int + Lookahead() int +} + +type yyParserImpl struct { + lval yySymType + stack [yyInitialStackSize]yySymType + char int +} + +func (p *yyParserImpl) Lookahead() int { + return p.char +} + +func yyNewParser() yyParser { + return &yyParserImpl{} +} + +const yyFlag = -1000 + +func yyTokname(c int) string { + if c >= 1 && c-1 < len(yyToknames) { + if yyToknames[c-1] != "" { + return yyToknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func yyStatname(s int) string { + if s >= 0 && s < len(yyStatenames) { + if yyStatenames[s] != "" { + return yyStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func yyErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !yyErrorVerbose { + return "syntax error" + } + + for _, e := range yyErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + yyTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := yyPact[state] + for tok := TOKSTART; tok-1 < len(yyToknames); tok++ { + if n := base + tok; n >= 0 && n < yyLast && yyChk[yyAct[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if yyDef[state] == -2 { + i := 0 + for yyExca[i] != -1 || yyExca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; yyExca[i] >= 0; i += 2 { + tok := yyExca[i] + if tok < TOKSTART || yyExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if yyExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += yyTokname(tok) + } + return res +} + +func yylex1(lex yyLexer, lval *yySymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = yyTok1[0] + goto out + } + if char < len(yyTok1) { + token = yyTok1[char] + goto out + } + if char >= yyPrivate { + if char < yyPrivate+len(yyTok2) { + token = yyTok2[char-yyPrivate] + goto out + } + } + for i := 0; i < len(yyTok3); i += 2 { + token = yyTok3[i+0] + if token == char { + token = yyTok3[i+1] + goto out + } + } + +out: + if token == 0 { + token = yyTok2[1] /* unknown char */ + } + if yyDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char)) + } + return char, token +} + +func yyParse(yylex yyLexer) int { + return yyNewParser().Parse(yylex) +} + +func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int { + var yyn int + var yyVAL yySymType + var yyDollar []yySymType + _ = yyDollar // silence set and not used + yyS := yyrcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + yystate := 0 + yyrcvr.char = -1 + yytoken := -1 // yyrcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + yystate = -1 + yyrcvr.char = -1 + yytoken = -1 + }() + yyp := -1 + goto yystack + +ret0: + return 0 + +ret1: + return 1 + +yystack: + /* put a state and value onto the stack */ + if yyDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate)) + } + + yyp++ + if yyp >= len(yyS) { + nyys := make([]yySymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + } + yyS[yyp] = yyVAL + yyS[yyp].yys = yystate + +yynewstate: + yyn = yyPact[yystate] + if yyn <= yyFlag { + goto yydefault /* simple state */ + } + if yyrcvr.char < 0 { + yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval) + } + yyn += yytoken + if yyn < 0 || yyn >= yyLast { + goto yydefault + } + yyn = yyAct[yyn] + if yyChk[yyn] == yytoken { /* valid shift */ + yyrcvr.char = -1 + yytoken = -1 + yyVAL = yyrcvr.lval + yystate = yyn + if Errflag > 0 { + Errflag-- + } + goto yystack + } + +yydefault: + /* default state action */ + yyn = yyDef[yystate] + if yyn == -2 { + if yyrcvr.char < 0 { + yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + yyn = yyExca[xi+0] + if yyn < 0 || yyn == yytoken { + break + } + } + yyn = yyExca[xi+1] + if yyn < 0 { + goto ret0 + } + } + if yyn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + yylex.Error(yyErrorMessage(yystate, yytoken)) + Nerrs++ + if yyDebug >= 1 { + __yyfmt__.Printf("%s", yyStatname(yystate)) + __yyfmt__.Printf(" saw %s\n", yyTokname(yytoken)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for yyp >= 0 { + yyn = yyPact[yyS[yyp].yys] + yyErrCode + if yyn >= 0 && yyn < yyLast { + yystate = yyAct[yyn] /* simulate a shift of "error" */ + if yyChk[yystate] == yyErrCode { + goto yystack + } + } + + /* the current p has no shift on "error", pop stack */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys) + } + yyp-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken)) + } + if yytoken == yyEofCode { + goto ret1 + } + yyrcvr.char = -1 + yytoken = -1 + goto yynewstate /* try again in the same state */ + } + } + + /* reduction by production yyn */ + if yyDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate)) + } + + yynt := yyn + yypt := yyp + _ = yypt // guard against "declared and not used" + + yyp -= yyR2[yyn] + // yyp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if yyp+1 >= len(yyS) { + nyys := make([]yySymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + } + yyVAL = yyS[yyp+1] + + /* consult goto table to find next state */ + yyn = yyR1[yyn] + yyg := yyPgo[yyn] + yyj := yyg + yyS[yyp].yys + 1 + + if yyj >= yyLast { + yystate = yyAct[yyg] + } else { + yystate = yyAct[yyj] + if yyChk[yystate] != -yyn { + yystate = yyAct[yyg] + } + } + // dummy call; replaced with literal code + switch yynt { + + case 1: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:403 + { + setParseTree(yylex, yyDollar[1].statement) + } + case 2: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:408 + { + } + case 3: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:409 + { + } + case 4: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:413 + { + yyVAL.statement = yyDollar[1].selStmt + } + case 35: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:447 + { + setParseTree(yylex, nil) + } + case 36: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:453 + { + yyVAL.colIdent = NewColIdentWithAt(string(yyDollar[1].bytes), NoAt) + } + case 37: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:457 + { + yyVAL.colIdent = NewColIdentWithAt(string(yyDollar[1].bytes), SingleAt) + } + case 38: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:461 + { + yyVAL.colIdent = NewColIdentWithAt(string(yyDollar[1].bytes), DoubleAt) + } + case 39: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:467 + { + yyVAL.colIdent = NewColIdentWithAt(string(yyDollar[1].bytes), SingleAt) + } + case 40: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:473 + { + yyVAL.colIdent = NewColIdentWithAt(string(yyDollar[1].bytes), DoubleAt) + } + case 41: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:479 + { + yyVAL.statement = &OtherAdmin{} + } + case 42: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:485 + { + sel := yyDollar[1].selStmt.(*Select) + sel.OrderBy = yyDollar[2].orderBy + sel.Limit = yyDollar[3].limit + sel.Lock = yyDollar[4].str + yyVAL.selStmt = sel + } + case 43: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:493 + { + yyVAL.selStmt = Unionize(yyDollar[2].selStmt, yyDollar[4].selStmt, yyDollar[3].str, yyDollar[5].orderBy, yyDollar[6].limit, yyDollar[7].str) + } + case 44: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:497 + { + yyVAL.selStmt = Unionize(yyDollar[1].selStmt, yyDollar[3].selStmt, yyDollar[2].str, yyDollar[4].orderBy, yyDollar[5].limit, yyDollar[6].str) + } + case 45: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:501 + { + yyVAL.selStmt = NewSelect(Comments(yyDollar[2].bytes2), SelectExprs{Nextval{Expr: yyDollar[5].expr}}, []string{yyDollar[3].str} /*options*/, TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}, nil /*where*/, nil /*groupBy*/, nil /*having*/) + } + case 46: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:524 + { + sel := yyDollar[1].selStmt.(*Select) + sel.OrderBy = yyDollar[2].orderBy + sel.Limit = yyDollar[3].limit + sel.Lock = yyDollar[4].str + yyVAL.selStmt = sel + } + case 47: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:532 + { + yyVAL.selStmt = Unionize(yyDollar[1].selStmt, yyDollar[3].selStmt, yyDollar[2].str, yyDollar[4].orderBy, yyDollar[5].limit, yyDollar[6].str) + } + case 48: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:538 + { + yyVAL.statement = &Stream{Comments: Comments(yyDollar[2].bytes2), SelectExpr: yyDollar[3].selectExpr, Table: yyDollar[5].tableName} + } + case 49: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:546 + { + yyVAL.selStmt = NewSelect(Comments(yyDollar[2].bytes2), yyDollar[4].selectExprs /*SelectExprs*/, yyDollar[3].strs /*options*/, yyDollar[5].tableExprs /*from*/, NewWhere(WhereStr, yyDollar[6].expr), GroupBy(yyDollar[7].exprs), NewWhere(HavingStr, yyDollar[8].expr)) + } + case 50: + yyDollar = yyS[yypt-10 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:550 + { + sel := NewSelect(Comments(yyDollar[4].bytes2), yyDollar[6].selectExprs /*SelectExprs*/, yyDollar[5].strs /*options*/, yyDollar[7].tableExprs /*from*/, NewWhere(WhereStr, yyDollar[8].expr), GroupBy(yyDollar[9].exprs), NewWhere(HavingStr, yyDollar[10].expr)) + sel.With = &With{Recursive: false, CTEs: yyDollar[2].cteList} + yyVAL.selStmt = sel + } + case 51: + yyDollar = yyS[yypt-11 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:556 + { + sel := NewSelect(Comments(yyDollar[5].bytes2), yyDollar[7].selectExprs /*SelectExprs*/, yyDollar[6].strs /*options*/, yyDollar[8].tableExprs /*from*/, NewWhere(WhereStr, yyDollar[9].expr), GroupBy(yyDollar[10].exprs), NewWhere(HavingStr, yyDollar[11].expr)) + sel.With = &With{Recursive: true, CTEs: yyDollar[3].cteList} + yyVAL.selStmt = sel + } + case 52: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:564 + { + yyVAL.selStmt = yyDollar[1].selStmt + } + case 53: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:568 + { + yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt} + } + case 54: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:575 + { + // insert_data returns a *Insert pre-filled with Columns & Values + ins := yyDollar[6].ins + ins.Action = yyDollar[1].str + ins.Comments = yyDollar[2].bytes2 + ins.Ignore = yyDollar[3].str + ins.Table = yyDollar[4].tableName + ins.Partitions = yyDollar[5].partitions + ins.OnDup = OnDup(yyDollar[7].updateExprs) + ins.SelectExprs = yyDollar[8].selectExprs + yyVAL.statement = ins + } + case 55: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:588 + { + // insert_data returns a *Insert pre-filled with Columns & Values + ins := yyDollar[6].ins + ins.Action = yyDollar[1].str + ins.Comments = yyDollar[2].bytes2 + ins.Ignore = yyDollar[3].str + ins.Table = yyDollar[4].tableName + ins.Partitions = yyDollar[5].partitions + ins.OnDup = OnDup(nil) + ins.SelectExprs = yyDollar[7].selectExprs + yyVAL.statement = ins + } + case 56: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:601 + { + // insert_data returns a *Insert pre-filled with Columns & Values + ins := yyDollar[6].ins + ins.Action = yyDollar[1].str + ins.Comments = yyDollar[2].bytes2 + ins.Ignore = yyDollar[3].str + ins.Table = yyDollar[4].tableName + ins.Partitions = yyDollar[5].partitions + ins.OnDup = OnDup(yyDollar[7].updateExprs) + yyVAL.statement = ins + } + case 57: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:615 + { + yyVAL.str = InsertStr + } + case 58: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:621 + { + yyVAL.str = UpdateStr + } + case 59: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:625 + { + yyVAL.str = ReplaceStr + } + case 60: + yyDollar = yyS[yypt-11 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:631 + { + yyVAL.statement = &Update{Action: yyDollar[1].str, Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].str, TableExprs: yyDollar[4].tableExprs, Exprs: yyDollar[6].updateExprs, From: yyDollar[7].tableExprs, Where: NewWhere(WhereStr, yyDollar[8].expr), OrderBy: yyDollar[9].orderBy, Limit: yyDollar[10].limit, SelectExprs: yyDollar[11].selectExprs} + } + case 61: + yyDollar = yyS[yypt-9 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:637 + { + yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[4].tableName}}, Partitions: yyDollar[5].partitions, Where: NewWhere(WhereStr, yyDollar[6].expr), OrderBy: yyDollar[7].orderBy, Limit: yyDollar[8].limit, SelectExprs: yyDollar[9].selectExprs} + } + case 62: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:641 + { + yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Targets: yyDollar[4].tableNames, TableExprs: yyDollar[6].tableExprs, Where: NewWhere(WhereStr, yyDollar[7].expr), SelectExprs: yyDollar[8].selectExprs} + } + case 63: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:645 + { + yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Targets: yyDollar[3].tableNames, TableExprs: yyDollar[5].tableExprs, Where: NewWhere(WhereStr, yyDollar[6].expr), SelectExprs: yyDollar[7].selectExprs} + } + case 64: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:649 + { + yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Targets: yyDollar[3].tableNames, TableExprs: yyDollar[5].tableExprs, Where: NewWhere(WhereStr, yyDollar[6].expr), SelectExprs: yyDollar[7].selectExprs} + } + case 65: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:654 + { + } + case 66: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:655 + { + } + case 67: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:659 + { + yyVAL.tableNames = TableNames{yyDollar[1].tableName} + } + case 68: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:663 + { + yyVAL.tableNames = append(yyVAL.tableNames, yyDollar[3].tableName) + } + case 69: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:669 + { + yyVAL.tableNames = TableNames{yyDollar[1].tableName} + } + case 70: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:673 + { + yyVAL.tableNames = append(yyVAL.tableNames, yyDollar[3].tableName) + } + case 71: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:678 + { + yyVAL.partitions = nil + } + case 72: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:682 + { + yyVAL.partitions = yyDollar[3].partitions + } + case 73: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:688 + { + yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Exprs: yyDollar[3].setExprs} + } + case 74: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:694 + { + yyVAL.statement = &SetTransaction{Comments: Comments(yyDollar[2].bytes2), Scope: yyDollar[3].str, Characteristics: yyDollar[5].characteristics} + } + case 75: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:698 + { + yyVAL.statement = &SetTransaction{Comments: Comments(yyDollar[2].bytes2), Characteristics: yyDollar[4].characteristics} + } + case 76: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:704 + { + yyVAL.characteristics = []Characteristic{yyDollar[1].characteristic} + } + case 77: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:708 + { + yyVAL.characteristics = append(yyVAL.characteristics, yyDollar[3].characteristic) + } + case 78: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:714 + { + yyVAL.characteristic = &IsolationLevel{Level: string(yyDollar[3].str)} + } + case 79: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:718 + { + yyVAL.characteristic = &AccessMode{Mode: TxReadWrite} + } + case 80: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:722 + { + yyVAL.characteristic = &AccessMode{Mode: TxReadOnly} + } + case 81: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:728 + { + yyVAL.str = RepeatableRead + } + case 82: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:732 + { + yyVAL.str = ReadCommitted + } + case 83: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:736 + { + yyVAL.str = ReadUncommitted + } + case 84: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:740 + { + yyVAL.str = Serializable + } + case 85: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:746 + { + yyVAL.str = SessionStr + } + case 86: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:750 + { + yyVAL.str = GlobalStr + } + case 87: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:756 + { + yyVAL.str = TempStr + } + case 88: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:760 + { + yyVAL.str = TemporaryStr + } + case 89: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:766 + { + yyVAL.str = MaterializedStr + } + case 90: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:772 + { + yyDollar[1].ddl.TableSpec = yyDollar[2].TableSpec + yyVAL.statement = yyDollar[1].ddl + } + case 91: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:777 + { + // Create table [name] like [name] + yyDollar[1].ddl.OptLike = yyDollar[2].optLike + yyVAL.statement = yyDollar[1].ddl + } + case 92: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:783 + { + // Change this to an alter statement + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[7].tableName} + } + case 93: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:788 + { + yyVAL.statement = &DDL{Action: CreateStr, Table: yyDollar[3].tableName.ToViewName(), SelectStatement: yyDollar[5].selStmt} + } + case 94: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:792 + { + yyVAL.statement = &DDL{Action: CreateStr, Table: yyDollar[5].tableName.ToViewName(), SelectStatement: yyDollar[7].selStmt, OrReplace: true} + } + case 95: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:796 + { + yyVAL.statement = &DDL{Action: CreateStr, Table: yyDollar[4].tableName.ToViewName(), SelectStatement: yyDollar[6].selStmt, Modifier: yyDollar[2].str} + } + case 96: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:800 + { + yyVAL.statement = &DDL{Action: CreateStr, Table: yyDollar[6].tableName.ToViewName(), SelectStatement: yyDollar[8].selStmt, OrReplace: true, Modifier: yyDollar[4].str} + } + case 97: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:804 + { + var notExists bool + if yyDollar[3].byt != 0 { + notExists = true + } + yyVAL.statement = &DBDDL{Action: CreateStr, DBName: string(yyDollar[4].colIdent.String()), IfNotExists: notExists} + } + case 98: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:812 + { + var notExists bool + if yyDollar[3].byt != 0 { + notExists = true + } + yyVAL.statement = &DBDDL{Action: CreateStr, DBName: string(yyDollar[4].colIdent.String()), IfNotExists: notExists} + } + case 99: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:822 + { + yyVAL.statement = &RefreshMaterializedView{ViewName: yyDollar[4].tableName} + } + case 100: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:827 + { + yyVAL.boolVal = BoolVal(false) + } + case 101: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:831 + { + yyVAL.boolVal = BoolVal(true) + } + case 102: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:837 + { + yyVAL.statement = &Auth{SessionAuth: yyDollar[1].boolVal, Provider: yyDollar[4].str} + } + case 103: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:842 + { + yyVAL.statement = &Auth{SessionAuth: yyDollar[1].boolVal, Provider: yyDollar[4].str, Type: yyDollar[5].str} + } + case 104: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:847 + { + yyVAL.statement = &Auth{SessionAuth: yyDollar[1].boolVal, Provider: yyDollar[4].str, Type: yyDollar[5].str, KeyFilePath: string(yyDollar[6].bytes), KeyEnvVar: string(yyDollar[7].bytes)} + } + case 105: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:852 + { + yyVAL.statement = &AuthRevoke{SessionAuth: yyDollar[1].boolVal, Provider: yyDollar[4].str} + } + case 106: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:858 + { + yyVAL.statement = &Registry{ActionType: string(yyDollar[3].bytes), ProviderId: yyDollar[4].colIdent.GetRawVal(), ProviderVersion: yyDollar[5].colIdent.GetRawVal()} + } + case 107: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:863 + { + yyVAL.statement = &Registry{ActionType: string(yyDollar[3].bytes), ProviderId: yyDollar[4].colIdent.GetRawVal(), ProviderVersion: string(yyDollar[5].bytes)} + } + case 108: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:868 + { + yyVAL.statement = &Registry{ActionType: string(yyDollar[3].bytes), ProviderId: yyDollar[4].colIdent.GetRawVal()} + } + case 109: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:873 + { + yyVAL.statement = &Registry{ActionType: string(yyDollar[3].bytes), ProviderId: yyDollar[4].colIdent.GetRawVal()} + } + case 110: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:878 + { + yyVAL.statement = &Registry{ActionType: string(yyDollar[3].bytes), ProviderId: string(yyDollar[4].bytes)} + } + case 111: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:883 + { + yyVAL.statement = &Registry{ActionType: string(yyDollar[3].bytes)} + } + case 112: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:889 + { + yyVAL.str = InteractiveStr + } + case 113: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:893 + { + yyVAL.str = ServiceAccountStr + } + case 114: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:897 + { + yyVAL.str = ServiceAccountStr + } + case 115: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:903 + { + yyVAL.colIdent = NewColIdent("") + } + case 116: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:907 + { + yyVAL.colIdent = yyDollar[2].colIdent + } + case 117: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:913 + { + yyVAL.colIdent = yyDollar[1].colIdent + } + case 118: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:918 + { + var v []VindexParam + yyVAL.vindexParams = v + } + case 119: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:923 + { + yyVAL.vindexParams = yyDollar[2].vindexParams + } + case 120: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:929 + { + yyVAL.vindexParams = make([]VindexParam, 0, 4) + yyVAL.vindexParams = append(yyVAL.vindexParams, yyDollar[1].vindexParam) + } + case 121: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:934 + { + yyVAL.vindexParams = append(yyVAL.vindexParams, yyDollar[3].vindexParam) + } + case 122: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:940 + { + yyVAL.vindexParam = VindexParam{Key: yyDollar[1].colIdent, Val: yyDollar[3].str} + } + case 123: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:946 + { + var notExists bool + if yyDollar[3].byt != 0 { + notExists = true + } + yyVAL.ddl = &DDL{Action: CreateStr, Table: yyDollar[4].tableName, IfNotExists: notExists} + setDDL(yylex, yyVAL.ddl) + } + case 124: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:955 + { + var notExists bool + if yyDollar[4].byt != 0 { + notExists = true + } + yyVAL.ddl = &DDL{Action: CreateStr, Table: yyDollar[5].tableName, IfNotExists: notExists, Modifier: yyDollar[2].str} + setDDL(yylex, yyVAL.ddl) + } + case 125: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:966 + { + yyVAL.TableSpec = yyDollar[2].TableSpec + yyVAL.TableSpec.Options = yyDollar[4].str + } + case 126: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:973 + { + yyVAL.optLike = &OptLike{LikeTable: yyDollar[2].tableName} + } + case 127: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:977 + { + yyVAL.optLike = &OptLike{LikeTable: yyDollar[3].tableName} + } + case 128: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:983 + { + yyVAL.TableSpec = &TableSpec{} + yyVAL.TableSpec.AddColumn(yyDollar[1].columnDefinition) + } + case 129: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:988 + { + yyVAL.TableSpec.AddColumn(yyDollar[3].columnDefinition) + } + case 130: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:992 + { + yyVAL.TableSpec.AddIndex(yyDollar[3].indexDefinition) + } + case 131: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:996 + { + yyVAL.TableSpec.AddConstraint(yyDollar[3].constraintDefinition) + } + case 132: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1002 + { + yyDollar[2].columnType.NotNull = yyDollar[3].boolVal + yyDollar[2].columnType.Default = yyDollar[4].optVal + yyDollar[2].columnType.OnUpdate = yyDollar[5].optVal + yyDollar[2].columnType.Autoincrement = yyDollar[6].boolVal + yyDollar[2].columnType.KeyOpt = yyDollar[7].colKeyOpt + yyDollar[2].columnType.Comment = yyDollar[8].sqlVal + yyVAL.columnDefinition = &ColumnDefinition{Name: yyDollar[1].colIdent, Type: yyDollar[2].columnType} + } + case 133: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1013 + { + yyVAL.columnType = yyDollar[1].columnType + yyVAL.columnType.Unsigned = yyDollar[2].boolVal + yyVAL.columnType.Zerofill = yyDollar[3].boolVal + } + case 137: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1024 + { + yyVAL.columnType = yyDollar[1].columnType + yyVAL.columnType.Length = yyDollar[2].sqlVal + } + case 138: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1029 + { + yyVAL.columnType = yyDollar[1].columnType + } + case 139: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1035 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 140: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1039 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 141: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1043 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 142: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1047 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 143: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1051 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 144: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1055 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 145: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1059 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 146: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1063 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 147: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1067 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 148: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1073 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 149: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1079 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 150: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1085 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 151: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1091 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 152: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1097 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 153: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1105 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 154: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1109 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} + } + case 155: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1113 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} + } + case 156: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1117 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} + } + case 157: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1121 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 158: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1127 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} + } + case 159: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1131 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} + } + case 160: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1135 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} + } + case 161: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1139 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} + } + case 162: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1143 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + } + case 163: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1147 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + } + case 164: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1151 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + } + case 165: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1155 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + } + case 166: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1159 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 167: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1163 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 168: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1167 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 169: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1171 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 170: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1175 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 171: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1179 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} + } + case 172: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1184 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} + } + case 173: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1190 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 174: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1194 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 175: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1198 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 176: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1202 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 177: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1206 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 178: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1210 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 179: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1214 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 180: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1218 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 181: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1224 + { + yyVAL.strs = make([]string, 0, 4) + yyVAL.strs = append(yyVAL.strs, "'"+string(yyDollar[1].bytes)+"'") + } + case 182: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1229 + { + yyVAL.strs = append(yyDollar[1].strs, "'"+string(yyDollar[3].bytes)+"'") + } + case 183: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1234 + { + yyVAL.sqlVal = nil + } + case 184: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1238 + { + yyVAL.sqlVal = NewIntVal(yyDollar[2].bytes) + } + case 185: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1243 + { + yyVAL.LengthScaleOption = LengthScaleOption{} + } + case 186: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1247 + { + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntVal(yyDollar[2].bytes), + Scale: NewIntVal(yyDollar[4].bytes), + } + } + case 187: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1255 + { + yyVAL.LengthScaleOption = LengthScaleOption{} + } + case 188: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1259 + { + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntVal(yyDollar[2].bytes), + } + } + case 189: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1265 + { + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntVal(yyDollar[2].bytes), + Scale: NewIntVal(yyDollar[4].bytes), + } + } + case 190: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1273 + { + yyVAL.boolVal = BoolVal(false) + } + case 191: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1277 + { + yyVAL.boolVal = BoolVal(true) + } + case 192: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1282 + { + yyVAL.boolVal = BoolVal(false) + } + case 193: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1286 + { + yyVAL.boolVal = BoolVal(true) + } + case 194: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1292 + { + yyVAL.boolVal = BoolVal(false) + } + case 195: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1296 + { + yyVAL.boolVal = BoolVal(false) + } + case 196: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1300 + { + yyVAL.boolVal = BoolVal(true) + } + case 197: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1305 + { + yyVAL.optVal = nil + } + case 198: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1309 + { + yyVAL.optVal = yyDollar[2].expr + } + case 199: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1314 + { + yyVAL.optVal = nil + } + case 200: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1318 + { + yyVAL.optVal = yyDollar[3].expr + } + case 201: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1323 + { + yyVAL.boolVal = BoolVal(false) + } + case 202: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1327 + { + yyVAL.boolVal = BoolVal(true) + } + case 203: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1332 + { + yyVAL.str = "" + } + case 204: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1336 + { + yyVAL.str = string(yyDollar[3].colIdent.String()) + } + case 205: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1340 + { + yyVAL.str = string(yyDollar[3].bytes) + } + case 206: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1345 + { + yyVAL.str = "" + } + case 207: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1349 + { + yyVAL.str = string(yyDollar[2].colIdent.String()) + } + case 208: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1353 + { + yyVAL.str = string(yyDollar[2].bytes) + } + case 209: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1358 + { + yyVAL.colKeyOpt = colKeyNone + } + case 210: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1362 + { + yyVAL.colKeyOpt = ColKeyPrimary + } + case 211: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1366 + { + yyVAL.colKeyOpt = ColKey + } + case 212: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1370 + { + yyVAL.colKeyOpt = ColKeyUniqueKey + } + case 213: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1374 + { + yyVAL.colKeyOpt = ColKeyUnique + } + case 214: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1379 + { + yyVAL.sqlVal = nil + } + case 215: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1383 + { + yyVAL.sqlVal = NewStrVal(yyDollar[2].bytes) + } + case 216: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1389 + { + yyVAL.indexDefinition = &IndexDefinition{Info: yyDollar[1].indexInfo, Columns: yyDollar[3].indexColumns, Options: yyDollar[5].indexOptions} + } + case 217: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1393 + { + yyVAL.indexDefinition = &IndexDefinition{Info: yyDollar[1].indexInfo, Columns: yyDollar[3].indexColumns} + } + case 218: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1399 + { + yyVAL.indexOptions = []*IndexOption{yyDollar[1].indexOption} + } + case 219: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1403 + { + yyVAL.indexOptions = append(yyVAL.indexOptions, yyDollar[2].indexOption) + } + case 220: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1409 + { + yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Using: string(yyDollar[2].colIdent.String())} + } + case 221: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1413 + { + // should not be string + yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Value: NewIntVal(yyDollar[3].bytes)} + } + case 222: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1418 + { + yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Value: NewStrVal(yyDollar[2].bytes)} + } + case 223: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1424 + { + yyVAL.statement = &Sleep{Duration: NewIntVal(yyDollar[2].bytes)} + } + case 224: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1430 + { + yyVAL.str = "" + } + case 225: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1434 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 226: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1440 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].bytes), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true} + } + case 227: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1444 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(yyDollar[3].str), Spatial: true, Unique: false} + } + case 228: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1448 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(yyDollar[3].str), Unique: true} + } + case 229: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1452 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes), Name: NewColIdent(yyDollar[2].str), Unique: true} + } + case 230: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1456 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].str), Name: NewColIdent(yyDollar[2].str), Unique: false} + } + case 231: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1462 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 232: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1466 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 233: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1470 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 234: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1477 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 235: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1481 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 236: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1487 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 237: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1491 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 238: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1496 + { + yyVAL.str = "" + } + case 239: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1500 + { + yyVAL.str = string(yyDollar[1].colIdent.String()) + } + case 240: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1506 + { + yyVAL.indexColumns = []*IndexColumn{yyDollar[1].indexColumn} + } + case 241: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1510 + { + yyVAL.indexColumns = append(yyVAL.indexColumns, yyDollar[3].indexColumn) + } + case 242: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1516 + { + yyVAL.indexColumn = &IndexColumn{Column: yyDollar[1].colIdent, Length: yyDollar[2].sqlVal} + } + case 243: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1522 + { + yyVAL.constraintDefinition = &ConstraintDefinition{Name: string(yyDollar[2].colIdent.String()), Details: yyDollar[3].constraintInfo} + } + case 244: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1526 + { + yyVAL.constraintDefinition = &ConstraintDefinition{Details: yyDollar[1].constraintInfo} + } + case 245: + yyDollar = yyS[yypt-10 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1533 + { + yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns} + } + case 246: + yyDollar = yyS[yypt-11 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1537 + { + yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns, OnDelete: yyDollar[11].ReferenceAction} + } + case 247: + yyDollar = yyS[yypt-11 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1541 + { + yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns, OnUpdate: yyDollar[11].ReferenceAction} + } + case 248: + yyDollar = yyS[yypt-12 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1545 + { + yyVAL.constraintInfo = &ForeignKeyDefinition{Source: yyDollar[4].columns, ReferencedTable: yyDollar[7].tableName, ReferencedColumns: yyDollar[9].columns, OnDelete: yyDollar[11].ReferenceAction, OnUpdate: yyDollar[12].ReferenceAction} + } + case 249: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1551 + { + yyVAL.ReferenceAction = yyDollar[3].ReferenceAction + } + case 250: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1557 + { + yyVAL.ReferenceAction = yyDollar[3].ReferenceAction + } + case 251: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1563 + { + yyVAL.ReferenceAction = Restrict + } + case 252: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1567 + { + yyVAL.ReferenceAction = Cascade + } + case 253: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1571 + { + yyVAL.ReferenceAction = NoAction + } + case 254: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1575 + { + yyVAL.ReferenceAction = SetDefault + } + case 255: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1579 + { + yyVAL.ReferenceAction = SetNull + } + case 256: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1584 + { + yyVAL.str = "" + } + case 257: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1588 + { + yyVAL.str = " " + string(yyDollar[1].str) + } + case 258: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1592 + { + yyVAL.str = string(yyDollar[1].str) + ", " + string(yyDollar[3].str) + } + case 259: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1600 + { + yyVAL.str = yyDollar[1].str + } + case 260: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1604 + { + yyVAL.str = yyDollar[1].str + " " + yyDollar[2].str + } + case 261: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1608 + { + yyVAL.str = yyDollar[1].str + "=" + yyDollar[3].str + } + case 262: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1614 + { + yyVAL.str = yyDollar[1].colIdent.String() + } + case 263: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1618 + { + yyVAL.str = "'" + string(yyDollar[1].bytes) + "'" + } + case 264: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1622 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 265: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1628 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName} + } + case 266: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1632 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName} + } + case 267: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1636 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName} + } + case 268: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1640 + { + // Change this to a rename statement + yyVAL.statement = &DDL{Action: RenameStr, FromTables: TableNames{yyDollar[4].tableName}, ToTables: TableNames{yyDollar[7].tableName}} + } + case 269: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1645 + { + // Rename an index can just be an alter + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName} + } + case 270: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1650 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName.ToViewName()} + } + case 271: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1654 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, PartitionSpec: yyDollar[5].partSpec} + } + case 272: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1658 + { + yyVAL.statement = &DBDDL{Action: AlterStr, DBName: string(yyDollar[3].colIdent.String())} + } + case 273: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1662 + { + yyVAL.statement = &DBDDL{Action: AlterStr, DBName: string(yyDollar[3].colIdent.String())} + } + case 274: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1666 + { + yyVAL.statement = &DDL{ + Action: CreateVindexStr, + Table: yyDollar[5].tableName, + VindexSpec: &VindexSpec{ + Name: NewColIdent(yyDollar[5].tableName.Name.String()), + Type: yyDollar[6].colIdent, + Params: yyDollar[7].vindexParams, + }, + } + } + case 275: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1678 + { + yyVAL.statement = &DDL{ + Action: DropVindexStr, + Table: yyDollar[5].tableName, + VindexSpec: &VindexSpec{ + Name: NewColIdent(yyDollar[5].tableName.Name.String()), + }, + } + } + case 276: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1688 + { + yyVAL.statement = &DDL{Action: AddVschemaTableStr, Table: yyDollar[5].tableName} + } + case 277: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1692 + { + yyVAL.statement = &DDL{Action: DropVschemaTableStr, Table: yyDollar[5].tableName} + } + case 278: + yyDollar = yyS[yypt-12 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1696 + { + yyVAL.statement = &DDL{ + Action: AddColVindexStr, + Table: yyDollar[4].tableName, + VindexSpec: &VindexSpec{ + Name: yyDollar[7].colIdent, + Type: yyDollar[11].colIdent, + Params: yyDollar[12].vindexParams, + }, + VindexCols: yyDollar[9].columns, + } + } + case 279: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1709 + { + yyVAL.statement = &DDL{ + Action: DropColVindexStr, + Table: yyDollar[4].tableName, + VindexSpec: &VindexSpec{ + Name: yyDollar[7].colIdent, + }, + } + } + case 280: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1719 + { + yyVAL.statement = &DDL{Action: AddSequenceStr, Table: yyDollar[5].tableName} + } + case 281: + yyDollar = yyS[yypt-9 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1723 + { + yyVAL.statement = &DDL{ + Action: AddAutoIncStr, + Table: yyDollar[4].tableName, + AutoIncSpec: &AutoIncSpec{ + Column: yyDollar[7].colIdent, + Sequence: yyDollar[9].tableName, + }, + } + } + case 296: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1752 + { + yyVAL.partSpec = &PartitionSpec{Action: ReorganizeStr, Name: yyDollar[3].colIdent, Definitions: yyDollar[6].partDefs} + } + case 297: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1758 + { + yyVAL.partDefs = []*PartitionDefinition{yyDollar[1].partDef} + } + case 298: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1762 + { + yyVAL.partDefs = append(yyDollar[1].partDefs, yyDollar[3].partDef) + } + case 299: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1768 + { + yyVAL.partDef = &PartitionDefinition{Name: yyDollar[2].colIdent, Limit: yyDollar[7].expr} + } + case 300: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1772 + { + yyVAL.partDef = &PartitionDefinition{Name: yyDollar[2].colIdent, Maxvalue: true} + } + case 301: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1778 + { + yyVAL.statement = yyDollar[3].ddl + } + case 302: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1784 + { + yyVAL.ddl = &DDL{Action: RenameStr, FromTables: TableNames{yyDollar[1].tableName}, ToTables: TableNames{yyDollar[3].tableName}} + } + case 303: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1788 + { + yyVAL.ddl = yyDollar[1].ddl + yyVAL.ddl.FromTables = append(yyVAL.ddl.FromTables, yyDollar[3].tableName) + yyVAL.ddl.ToTables = append(yyVAL.ddl.ToTables, yyDollar[5].tableName) + } + case 304: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1796 + { + var exists bool + if yyDollar[3].byt != 0 { + exists = true + } + yyVAL.statement = &DDL{Action: DropStr, FromTables: yyDollar[4].tableNames, IfExists: exists, Modifier: "table"} + } + case 305: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1804 + { + // Change this to an alter statement + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[5].tableName} + } + case 306: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1809 + { + var exists bool + if yyDollar[3].byt != 0 { + exists = true + } + yyVAL.statement = &DDL{Action: DropStr, FromTables: TableNames{yyDollar[4].tableName.ToViewName()}, IfExists: exists} + } + case 307: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1817 + { + var exists bool + if yyDollar[4].byt != 0 { + exists = true + } + yyVAL.statement = &DDL{Action: DropStr, FromTables: TableNames{yyDollar[5].tableName.ToViewName()}, IfExists: exists, Modifier: yyDollar[2].str} + } + case 308: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1825 + { + yyVAL.statement = &DBDDL{Action: DropStr, DBName: string(yyDollar[4].colIdent.String())} + } + case 309: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1829 + { + yyVAL.statement = &DBDDL{Action: DropStr, DBName: string(yyDollar[4].colIdent.String())} + } + case 310: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1835 + { + yyVAL.statement = &DDL{Action: TruncateStr, Table: yyDollar[3].tableName} + } + case 311: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1839 + { + yyVAL.statement = &DDL{Action: TruncateStr, Table: yyDollar[2].tableName} + } + case 312: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1844 + { + yyVAL.statement = &OtherRead{} + } + case 313: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1850 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].colIdent.String())} + } + case 314: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1855 + { + showTablesOpt := &ShowTablesOpt{Filter: yyDollar[4].showFilter} + yyVAL.statement = &Show{Type: CharsetStr, ShowTablesOpt: showTablesOpt} + } + case 315: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1860 + { + showTablesOpt := &ShowTablesOpt{Filter: yyDollar[3].showFilter} + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes), ShowTablesOpt: showTablesOpt} + } + case 316: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1865 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 317: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1870 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].colIdent.String())} + } + case 318: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1874 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 319: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1878 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), Table: yyDollar[4].tableName} + } + case 320: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1882 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 321: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1886 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 322: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1890 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 323: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1894 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 324: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1898 + { + showTablesOpt := &ShowTablesOpt{DbName: yyDollar[6].str, Filter: yyDollar[7].showFilter} + yyVAL.statement = &Show{Extended: string(yyDollar[2].str), Type: string(yyDollar[3].str), ShowTablesOpt: showTablesOpt, OnTable: yyDollar[5].tableName} + } + case 325: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1903 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 326: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1907 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 327: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1911 + { + yyVAL.statement = &Show{Scope: yyDollar[2].str, Type: string(yyDollar[3].bytes)} + } + case 328: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1915 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 329: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1919 + { + showTablesOpt := &ShowTablesOpt{Full: yyDollar[2].str, DbName: yyDollar[6].str, Filter: yyDollar[7].showFilter} + yyVAL.statement = &Show{Type: string(yyDollar[3].str), ShowTablesOpt: showTablesOpt, OnTable: yyDollar[5].tableName} + } + case 330: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1924 + { + // this is ugly, but I couldn't find a better way for now + if yyDollar[3].str == "processlist" { + yyVAL.statement = &Show{Type: yyDollar[3].str} + } else { + showTablesOpt := &ShowTablesOpt{Full: yyDollar[2].str, DbName: yyDollar[4].str, Filter: yyDollar[5].showFilter} + yyVAL.statement = &Show{Type: yyDollar[3].str, ShowTablesOpt: showTablesOpt} + } + } + case 331: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1934 + { + yyVAL.statement = &Show{Scope: yyDollar[2].str, Type: string(yyDollar[3].bytes)} + } + case 332: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1938 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 333: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1942 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes), ShowCollationFilterOpt: yyDollar[4].expr} + } + case 334: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1946 + { + showTablesOpt := &ShowTablesOpt{Filter: yyDollar[4].showFilter} + yyVAL.statement = &Show{Scope: string(yyDollar[2].bytes), Type: string(yyDollar[3].bytes), ShowTablesOpt: showTablesOpt} + } + case 335: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1951 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 336: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1955 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 337: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1959 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes), OnTable: yyDollar[5].tableName} + } + case 338: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1963 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 339: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1967 + { + showTablesOpt := &ShowTablesOpt{Filter: yyDollar[6].showFilter} + yyVAL.statement = &Show{Extended: string(yyDollar[2].str), Type: string(yyDollar[3].colIdent.String()), OnTable: yyDollar[5].tableName, ShowTablesOpt: showTablesOpt} + } + case 340: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1984 + { + showTablesOpt := &ShowTablesOpt{Filter: yyDollar[4].showFilter} + yyVAL.statement = &Show{Extended: string(yyDollar[2].str), Type: string(yyDollar[3].colIdent.String()), ShowTablesOpt: showTablesOpt} + } + case 341: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1989 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes), Scope: yyDollar[3].str} + } + case 342: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:1994 + { + showTablesOpt := &ShowTablesOpt{Filter: yyDollar[7].showFilter} + yyVAL.statement = &Show{Extended: string(yyDollar[2].str), Comments: Comments(yyDollar[4].bytes2), Type: string(yyDollar[3].bytes), OnTable: yyDollar[5].tableName, ShowTablesOpt: showTablesOpt, Columns: yyDollar[6].columns} + } + case 343: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2000 + { + yyVAL.statement = &Show{Type: "TRANSACTION_ISOLATION_LEVEL"} + } + case 344: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2006 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 345: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2010 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 346: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2016 + { + yyVAL.str = "" + } + case 347: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2020 + { + yyVAL.str = "extended " + } + case 348: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2026 + { + yyVAL.str = "" + } + case 349: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2030 + { + yyVAL.str = "full " + } + case 350: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2036 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 351: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2040 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 352: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2046 + { + yyVAL.str = "" + } + case 353: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2050 + { + yyVAL.str = yyDollar[2].tableIdent.v + } + case 354: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2054 + { + yyVAL.str = yyDollar[2].tableIdent.v + } + case 355: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2060 + { + yyVAL.showFilter = nil + } + case 356: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2064 + { + yyVAL.showFilter = &ShowFilter{Like: string(yyDollar[2].bytes)} + } + case 357: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2068 + { + yyVAL.showFilter = &ShowFilter{Filter: yyDollar[2].expr} + } + case 358: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2074 + { + yyVAL.showFilter = nil + } + case 359: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2078 + { + yyVAL.showFilter = &ShowFilter{Like: string(yyDollar[2].bytes)} + } + case 360: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2084 + { + yyVAL.str = "" + } + case 361: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2088 + { + yyVAL.str = SessionStr + } + case 362: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2092 + { + yyVAL.str = GlobalStr + } + case 363: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2098 + { + yyVAL.statement = &Use{DBName: yyDollar[2].tableIdent} + } + case 364: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2102 + { + yyVAL.statement = &Use{DBName: TableIdent{v: ""}} + } + case 365: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2108 + { + yyVAL.statement = &Begin{} + } + case 366: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2112 + { + yyVAL.statement = &Begin{} + } + case 367: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2118 + { + yyVAL.statement = &Commit{} + } + case 368: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2124 + { + yyVAL.statement = &Rollback{} + } + case 369: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2128 + { + yyVAL.statement = &SRollback{Name: yyDollar[5].colIdent} + } + case 370: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2133 + { + yyVAL.empty = struct{}{} + } + case 371: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2135 + { + yyVAL.empty = struct{}{} + } + case 372: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2138 + { + yyVAL.empty = struct{}{} + } + case 373: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2140 + { + yyVAL.empty = struct{}{} + } + case 374: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2145 + { + yyVAL.statement = &Savepoint{Name: yyDollar[2].colIdent} + } + case 375: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2151 + { + yyVAL.statement = &Release{Name: yyDollar[3].colIdent} + } + case 376: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2156 + { + yyVAL.str = "" + } + case 377: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2160 + { + yyVAL.str = JSONStr + } + case 378: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2164 + { + yyVAL.str = TreeStr + } + case 379: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2168 + { + yyVAL.str = VitessStr + } + case 380: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2172 + { + yyVAL.str = TraditionalStr + } + case 381: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2176 + { + yyVAL.str = AnalyzeStr + } + case 382: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2182 + { + yyVAL.bytes = yyDollar[1].bytes + } + case 383: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2186 + { + yyVAL.bytes = yyDollar[1].bytes + } + case 384: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2190 + { + yyVAL.bytes = yyDollar[1].bytes + } + case 385: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2196 + { + yyVAL.statement = yyDollar[1].selStmt + } + case 386: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2200 + { + yyVAL.statement = yyDollar[1].statement + } + case 387: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2204 + { + yyVAL.statement = yyDollar[1].statement + } + case 388: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2208 + { + yyVAL.statement = yyDollar[1].statement + } + case 389: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2213 + { + yyVAL.str = "" + } + case 390: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2217 + { + yyVAL.str = "" + } + case 391: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2221 + { + yyVAL.str = "" + } + case 392: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2227 + { + yyVAL.statement = &DescribeTable{Full: string(yyDollar[2].str), Extended: string(yyDollar[3].str), Table: yyDollar[4].tableName} + } + case 393: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2231 + { + yyVAL.statement = &Explain{Type: yyDollar[2].str, Statement: yyDollar[3].statement} + } + case 394: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2237 + { + yyVAL.statement = &OtherAdmin{} + } + case 395: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2241 + { + yyVAL.statement = &OtherAdmin{} + } + case 396: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2245 + { + yyVAL.statement = &OtherAdmin{} + } + case 397: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2249 + { + yyVAL.statement = &OtherAdmin{} + } + case 398: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2255 + { + yyVAL.statement = &DDL{Action: FlushStr} + } + case 399: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2259 + { + setAllowComments(yylex, true) + } + case 400: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2263 + { + yyVAL.bytes2 = yyDollar[2].bytes2 + setAllowComments(yylex, false) + } + case 401: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2269 + { + yyVAL.bytes2 = nil + } + case 402: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2273 + { + yyVAL.bytes2 = append(yyDollar[1].bytes2, yyDollar[2].bytes) + } + case 403: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2279 + { + yyVAL.str = UnionStr + } + case 404: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2283 + { + yyVAL.str = UnionAllStr + } + case 405: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2287 + { + yyVAL.str = UnionDistinctStr + } + case 406: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2292 + { + yyVAL.str = "" + } + case 407: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2296 + { + yyVAL.str = SQLNoCacheStr + } + case 408: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2300 + { + yyVAL.str = SQLCacheStr + } + case 409: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2305 + { + yyVAL.str = "" + } + case 410: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2309 + { + yyVAL.str = DistinctStr + } + case 411: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2313 + { + yyVAL.str = DistinctStr + } + case 412: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2318 + { + yyVAL.selectExprs = nil + } + case 413: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2322 + { + yyVAL.selectExprs = yyDollar[1].selectExprs + } + case 414: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2327 + { + yyVAL.strs = nil + } + case 415: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2331 + { + yyVAL.strs = []string{yyDollar[1].str} + } + case 416: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2335 + { // TODO: This is a hack since I couldn't get it to work in a nicer way. I got 'conflicts: 8 shift/reduce' + yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str} + } + case 417: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2339 + { + yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str} + } + case 418: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2343 + { + yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str, yyDollar[4].str} + } + case 419: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2349 + { + yyVAL.str = SQLNoCacheStr + } + case 420: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2353 + { + yyVAL.str = SQLCacheStr + } + case 421: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2357 + { + yyVAL.str = DistinctStr + } + case 422: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2361 + { + yyVAL.str = DistinctStr + } + case 423: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2365 + { + yyVAL.str = StraightJoinHint + } + case 424: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2369 + { + yyVAL.str = SQLCalcFoundRowsStr + } + case 425: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2375 + { + yyVAL.selectExprs = SelectExprs{yyDollar[1].selectExpr} + } + case 426: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2379 + { + yyVAL.selectExprs = append(yyVAL.selectExprs, yyDollar[3].selectExpr) + } + case 427: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2385 + { + yyVAL.selectExpr = &StarExpr{} + } + case 428: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2389 + { + concatamer := ListArgConcatamer(yyDollar[2].listArgsConcat) + yyVAL.selectExpr = &AliasedExpr{Expr: yyDollar[1].expr, As: NewColIdent(concatamer.String())} + } + case 429: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2394 + { + yyVAL.selectExpr = &AliasedExpr{Expr: yyDollar[1].expr, As: yyDollar[2].colIdent} + } + case 430: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2398 + { + yyVAL.selectExpr = &StarExpr{TableName: TableName{Name: yyDollar[1].tableIdent}} + } + case 431: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2402 + { + yyVAL.selectExpr = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}} + } + case 432: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2407 + { + yyVAL.colIdent = ColIdent{} + } + case 433: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2411 + { + yyVAL.colIdent = yyDollar[1].colIdent + } + case 434: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2415 + { + yyVAL.colIdent = yyDollar[2].colIdent + } + case 436: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2422 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 437: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2427 + { + yyVAL.tableExprs = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewTableIdent("dual")}}} + } + case 438: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2431 + { + yyVAL.tableExprs = yyDollar[2].tableExprs + } + case 439: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2437 + { + yyVAL.tableExprs = TableExprs{yyDollar[1].tableExpr} + } + case 440: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2441 + { + yyVAL.tableExprs = append(yyVAL.tableExprs, yyDollar[3].tableExpr) + } + case 444: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2452 + { + yyVAL.tableExpr = &TableValuedFuncTableExpr{FuncExpr: yyDollar[1].expr, As: yyDollar[2].tableIdent} + } + case 445: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2458 + { + yyVAL.tableExpr = yyDollar[1].aliasedTableName + } + case 446: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2462 + { + yyVAL.tableExpr = &AliasedTableExpr{Expr: yyDollar[1].subquery, As: yyDollar[3].tableIdent} + } + case 447: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2466 + { + yyVAL.tableExpr = &ParenTableExpr{Exprs: yyDollar[2].tableExprs} + } + case 448: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2470 + { + exec := yyDollar[2].statement.(*Exec) + yyVAL.tableExpr = &ExecSubquery{Exec: exec} + } + case 449: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2477 + { + yyVAL.subquery = &Subquery{yyDollar[2].selStmt} + } + case 450: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2483 + { + yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].tableIdent, Hints: yyDollar[3].indexHints} + } + case 451: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2487 + { + yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitions, As: yyDollar[6].tableIdent, Hints: yyDollar[7].indexHints} + } + case 452: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2492 + { + yyVAL.columns = nil + } + case 453: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2496 + { + yyVAL.columns = yyDollar[2].columns + } + case 454: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2503 + { + yyVAL.columns = Columns{yyDollar[1].colIdent} + } + case 455: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2507 + { + yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) + } + case 456: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2513 + { + yyVAL.partitions = Partitions{yyDollar[1].colIdent} + } + case 457: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2517 + { + yyVAL.partitions = append(yyVAL.partitions, yyDollar[3].colIdent) + } + case 458: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2530 + { + yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} + } + case 459: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2534 + { + yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} + } + case 460: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2538 + { + yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} + } + case 461: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2542 + { + yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr} + } + case 462: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2548 + { + yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} + } + case 463: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2550 + { + yyVAL.joinCondition = JoinCondition{Using: yyDollar[3].columns} + } + case 464: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2554 + { + yyVAL.joinCondition = JoinCondition{} + } + case 465: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2556 + { + yyVAL.joinCondition = yyDollar[1].joinCondition + } + case 466: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2560 + { + yyVAL.joinCondition = JoinCondition{} + } + case 467: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2562 + { + yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} + } + case 468: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2565 + { + yyVAL.empty = struct{}{} + } + case 469: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2567 + { + yyVAL.empty = struct{}{} + } + case 470: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2570 + { + yyVAL.tableIdent = NewTableIdent("") + } + case 471: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2574 + { + yyVAL.tableIdent = yyDollar[1].tableIdent + } + case 472: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2578 + { + yyVAL.tableIdent = yyDollar[2].tableIdent + } + case 474: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2585 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 475: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2591 + { + yyVAL.str = JoinStr + } + case 476: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2595 + { + yyVAL.str = JoinStr + } + case 477: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2599 + { + yyVAL.str = JoinStr + } + case 478: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2605 + { + yyVAL.str = StraightJoinStr + } + case 479: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2611 + { + yyVAL.str = LeftJoinStr + } + case 480: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2615 + { + yyVAL.str = LeftOuterJoinStr + } + case 481: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2619 + { + yyVAL.str = RightJoinStr + } + case 482: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2623 + { + yyVAL.str = RightOuterJoinStr + } + case 483: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2629 + { + yyVAL.str = NaturalJoinStr + } + case 484: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2633 + { + if yyDollar[2].str == LeftJoinStr { + yyVAL.str = NaturalLeftJoinStr + } else { + yyVAL.str = NaturalRightJoinStr + } + } + case 485: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2643 + { + yyVAL.tableName = yyDollar[2].tableName + } + case 486: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2647 + { + yyVAL.tableName = yyDollar[1].tableName + } + case 487: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2653 + { + yyVAL.tableName = TableName{Name: yyDollar[1].tableIdent} + } + case 488: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2657 + { + yyVAL.tableName = TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent} + } + case 489: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2661 + { + yyVAL.tableName = TableName{QualifierSecond: yyDollar[1].tableIdent, Qualifier: yyDollar[3].tableIdent, Name: yyDollar[5].tableIdent} + } + case 490: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2665 + { + yyVAL.tableName = TableName{QualifierThird: yyDollar[1].tableIdent, QualifierSecond: yyDollar[3].tableIdent, Qualifier: yyDollar[5].tableIdent, Name: yyDollar[7].tableIdent} + } + case 491: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2671 + { + yyVAL.tableName = TableName{Name: yyDollar[1].tableIdent} + } + case 492: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2676 + { + yyVAL.indexHints = nil + } + case 493: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2680 + { + yyVAL.indexHints = &IndexHints{Type: UseStr, Indexes: yyDollar[4].columns} + } + case 494: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2684 + { + yyVAL.indexHints = &IndexHints{Type: UseStr} + } + case 495: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2688 + { + yyVAL.indexHints = &IndexHints{Type: IgnoreStr, Indexes: yyDollar[4].columns} + } + case 496: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2692 + { + yyVAL.indexHints = &IndexHints{Type: ForceStr, Indexes: yyDollar[4].columns} + } + case 497: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2697 + { + yyVAL.expr = nil + } + case 498: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2701 + { + yyVAL.expr = yyDollar[2].expr + } + case 499: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2707 + { + yyVAL.expr = yyDollar[1].expr + } + case 500: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2711 + { + yyVAL.expr = &AndExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} + } + case 501: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2715 + { + yyVAL.expr = &OrExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} + } + case 502: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2719 + { + yyVAL.expr = &XorExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} + } + case 503: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2723 + { + yyVAL.expr = &NotExpr{Expr: yyDollar[2].expr} + } + case 504: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2727 + { + yyVAL.expr = &IsExpr{Operator: yyDollar[3].str, Expr: yyDollar[1].expr} + } + case 505: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2731 + { + yyVAL.expr = &UnaryCastConcatamerExpr{Expr: yyDollar[1].expr, CastConcatamer: ListArgConcatamer(yyDollar[2].listArgsConcat)} + } + case 506: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2735 + { + yyVAL.expr = yyDollar[1].expr + } + case 507: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2739 + { + yyVAL.expr = &Default{ColName: yyDollar[2].str} + } + case 508: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2745 + { + yyVAL.str = "" + } + case 509: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2749 + { + yyVAL.str = string(yyDollar[2].colIdent.String()) + } + case 510: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2755 + { + yyVAL.boolVal = BoolVal(true) + } + case 511: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2759 + { + yyVAL.boolVal = BoolVal(false) + } + case 512: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2765 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: yyDollar[2].str, Right: yyDollar[3].expr} + } + case 513: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2769 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: InStr, Right: yyDollar[3].colTuple} + } + case 514: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2773 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotInStr, Right: yyDollar[4].colTuple} + } + case 515: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2777 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: LikeStr, Right: yyDollar[3].expr, Escape: yyDollar[4].expr} + } + case 516: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2781 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotLikeStr, Right: yyDollar[4].expr, Escape: yyDollar[5].expr} + } + case 517: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2785 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: RegexpStr, Right: yyDollar[3].expr} + } + case 518: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2789 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotRegexpStr, Right: yyDollar[4].expr} + } + case 519: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2793 + { + yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: BetweenStr, From: yyDollar[3].expr, To: yyDollar[5].expr} + } + case 520: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2797 + { + yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: NotBetweenStr, From: yyDollar[4].expr, To: yyDollar[6].expr} + } + case 521: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2801 + { + yyVAL.expr = &ExistsExpr{Subquery: yyDollar[2].subquery} + } + case 522: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2807 + { + yyVAL.str = IsNullStr + } + case 523: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2811 + { + yyVAL.str = IsNotNullStr + } + case 524: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2815 + { + yyVAL.str = IsTrueStr + } + case 525: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2819 + { + yyVAL.str = IsNotTrueStr + } + case 526: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2823 + { + yyVAL.str = IsFalseStr + } + case 527: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2827 + { + yyVAL.str = IsNotFalseStr + } + case 528: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2833 + { + yyVAL.str = EqualStr + } + case 529: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2837 + { + yyVAL.str = LessThanStr + } + case 530: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2841 + { + yyVAL.str = GreaterThanStr + } + case 531: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2845 + { + yyVAL.str = LessEqualStr + } + case 532: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2849 + { + yyVAL.str = GreaterEqualStr + } + case 533: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2853 + { + yyVAL.str = NotEqualStr + } + case 534: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2857 + { + yyVAL.str = NullSafeEqualStr + } + case 535: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2862 + { + yyVAL.expr = nil + } + case 536: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2866 + { + yyVAL.expr = yyDollar[2].expr + } + case 537: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2872 + { + yyVAL.colTuple = yyDollar[1].valTuple + } + case 538: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2876 + { + yyVAL.colTuple = yyDollar[1].subquery + } + case 539: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2880 + { + yyVAL.colTuple = ListArg(yyDollar[1].bytes) + } + case 540: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2885 + { + yyVAL.listArgsConcat = []ListArg{ListArg(yyDollar[1].bytes)} + } + case 541: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2889 + { + yyVAL.listArgsConcat = append(yyDollar[1].listArgsConcat, ListArg(yyDollar[2].bytes)) + } + case 542: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2895 + { + yyVAL.subquery = &Subquery{yyDollar[2].selStmt} + } + case 543: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2901 + { + yyVAL.exprs = Exprs{yyDollar[1].expr} + } + case 544: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2905 + { + yyVAL.exprs = append(yyDollar[1].exprs, yyDollar[3].expr) + } + case 545: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2911 + { + yyVAL.expr = yyDollar[1].expr + } + case 546: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2915 + { + yyVAL.expr = yyDollar[1].boolVal + } + case 547: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2919 + { + yyVAL.expr = yyDollar[1].colName + } + case 548: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2923 + { + yyVAL.expr = yyDollar[1].expr + } + case 549: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2927 + { + yyVAL.expr = yyDollar[1].subquery + } + case 550: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2931 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitAndStr, Right: yyDollar[3].expr} + } + case 551: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2935 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitOrStr, Right: yyDollar[3].expr} + } + case 552: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2939 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitXorStr, Right: yyDollar[3].expr} + } + case 553: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2943 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: PlusStr, Right: yyDollar[3].expr} + } + case 554: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2947 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MinusStr, Right: yyDollar[3].expr} + } + case 555: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2951 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MultStr, Right: yyDollar[3].expr} + } + case 556: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2955 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: DivStr, Right: yyDollar[3].expr} + } + case 557: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2959 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: IntDivStr, Right: yyDollar[3].expr} + } + case 558: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2963 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} + } + case 559: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2967 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} + } + case 560: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2971 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftLeftStr, Right: yyDollar[3].expr} + } + case 561: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2975 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftRightStr, Right: yyDollar[3].expr} + } + case 562: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2979 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONExtractOp, Right: yyDollar[3].expr} + } + case 563: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2983 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONUnquoteExtractOp, Right: yyDollar[3].expr} + } + case 564: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2987 + { + yyVAL.expr = &CollateExpr{Expr: yyDollar[1].expr, Charset: yyDollar[3].str} + } + case 565: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2991 + { + yyVAL.expr = &UnaryExpr{Operator: BinaryStr, Expr: yyDollar[2].expr} + } + case 566: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2995 + { + yyVAL.expr = &UnaryExpr{Operator: UBinaryStr, Expr: yyDollar[2].expr} + } + case 567: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:2999 + { + yyVAL.expr = &UnaryExpr{Operator: Utf8Str, Expr: yyDollar[2].expr} + } + case 568: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3003 + { + yyVAL.expr = &UnaryExpr{Operator: Utf8mb4Str, Expr: yyDollar[2].expr} + } + case 569: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3007 + { + yyVAL.expr = &UnaryExpr{Operator: Latin1Str, Expr: yyDollar[2].expr} + } + case 570: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3011 + { + if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal { + yyVAL.expr = num + } else { + yyVAL.expr = &UnaryExpr{Operator: UPlusStr, Expr: yyDollar[2].expr} + } + } + case 571: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3019 + { + if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal { + // Handle double negative + if num.Val[0] == '-' { + num.Val = num.Val[1:] + yyVAL.expr = num + } else { + yyVAL.expr = NewIntVal(append([]byte("-"), num.Val...)) + } + } else { + yyVAL.expr = &UnaryExpr{Operator: UMinusStr, Expr: yyDollar[2].expr} + } + } + case 572: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3033 + { + yyVAL.expr = &UnaryExpr{Operator: TildaStr, Expr: yyDollar[2].expr} + } + case 573: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3037 + { + yyVAL.expr = &UnaryExpr{Operator: BangStr, Expr: yyDollar[2].expr} + } + case 574: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3041 + { + // This rule prevents the usage of INTERVAL + // as a function. If support is needed for that, + // we'll need to revisit this. The solution + // will be non-trivial because of grammar conflicts. + yyVAL.expr = &IntervalExpr{Expr: yyDollar[2].expr, Unit: yyDollar[3].colIdent.String()} + } + case 575: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3049 + { + inputStr := string(yyDollar[2].bytes) + stringList := strings.Split(inputStr, " ") + if len(stringList) != 2 { + yylex.Error("the interval string '" + inputStr + "' is not valid") + return 1 + } + stringExpr := NewStrVal([]byte(stringList[0])) + yyVAL.expr = &IntervalExpr{Expr: stringExpr, Unit: stringList[1]} + } + case 580: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3066 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent(yyDollar[1].str), Exprs: yyDollar[3].selectExprs} + } + case 581: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3072 + { + yyVAL.str = JsonArrayElementsTextStr + } + case 582: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3076 + { + yyVAL.str = JsonEachStr + } + case 583: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3086 + { + yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Exprs: yyDollar[3].selectExprs, Over: yyDollar[5].overClause} + } + case 584: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3090 + { + yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprs, Over: yyDollar[6].overClause} + } + case 585: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3094 + { + yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprs, Over: yyDollar[6].overClause} + } + case 586: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3098 + { + yyVAL.expr = &FuncExpr{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].colIdent, Exprs: yyDollar[5].selectExprs} + } + case 587: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3108 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("left"), Exprs: yyDollar[3].selectExprs} + } + case 588: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3112 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("right"), Exprs: yyDollar[3].selectExprs} + } + case 589: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3116 + { + yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} + } + case 590: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3120 + { + yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} + } + case 591: + yyDollar = yyS[yypt-6 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3124 + { + yyVAL.expr = &ConvertUsingExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].str} + } + case 592: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3128 + { + yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + } + case 593: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3132 + { + yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + } + case 594: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3136 + { + yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} + } + case 595: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3140 + { + yyVAL.expr = &SubstrExpr{StrVal: NewStrVal(yyDollar[3].bytes), From: yyDollar[5].expr, To: yyDollar[7].expr} + } + case 596: + yyDollar = yyS[yypt-9 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3144 + { + yyVAL.expr = &MatchExpr{Columns: yyDollar[3].selectExprs, Expr: yyDollar[7].expr, Option: yyDollar[8].str} + } + case 597: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3148 + { + yyVAL.expr = &GroupConcatExpr{Distinct: yyDollar[3].str, Exprs: yyDollar[4].selectExprs, OrderBy: yyDollar[5].orderBy, Separator: yyDollar[6].str, Limit: yyDollar[7].limit} + } + case 598: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3152 + { + yyVAL.expr = &CaseExpr{Expr: yyDollar[2].expr, Whens: yyDollar[3].whens, Else: yyDollar[4].expr} + } + case 599: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3156 + { + yyVAL.expr = &ValuesFuncExpr{Name: yyDollar[3].colName} + } + case 600: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3166 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("current_timestamp")} + } + case 601: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3170 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_timestamp")} + } + case 602: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3174 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_time")} + } + case 603: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3179 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_date")} + } + case 604: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3184 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("localtime")} + } + case 605: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3189 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("localtimestamp")} + } + case 606: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3195 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("current_date")} + } + case 607: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3200 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("current_time")} + } + case 608: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3205 + { + yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("current_timestamp"), Fsp: yyDollar[2].expr} + } + case 609: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3209 + { + yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("utc_timestamp"), Fsp: yyDollar[2].expr} + } + case 610: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3213 + { + yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("utc_time"), Fsp: yyDollar[2].expr} + } + case 611: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3218 + { + yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("localtime"), Fsp: yyDollar[2].expr} + } + case 612: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3223 + { + yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("localtimestamp"), Fsp: yyDollar[2].expr} + } + case 613: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3228 + { + yyVAL.expr = &CurTimeFuncExpr{Name: NewColIdent("current_time"), Fsp: yyDollar[2].expr} + } + case 614: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3232 + { + yyVAL.expr = &TimestampFuncExpr{Name: string("timestampadd"), Unit: yyDollar[3].colIdent.String(), Expr1: yyDollar[5].expr, Expr2: yyDollar[7].expr} + } + case 615: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3236 + { + yyVAL.expr = &TimestampFuncExpr{Name: string("timestampdiff"), Unit: yyDollar[3].colIdent.String(), Expr1: yyDollar[5].expr, Expr2: yyDollar[7].expr} + } + case 618: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3246 + { + yyVAL.expr = yyDollar[2].expr + } + case 619: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3256 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("if"), Exprs: yyDollar[3].selectExprs} + } + case 620: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3260 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("database"), Exprs: yyDollar[3].selectExprs} + } + case 621: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3264 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("schema"), Exprs: yyDollar[3].selectExprs} + } + case 622: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3268 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("mod"), Exprs: yyDollar[3].selectExprs} + } + case 623: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3272 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("replace"), Exprs: yyDollar[3].selectExprs} + } + case 624: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3276 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("substr"), Exprs: yyDollar[3].selectExprs} + } + case 625: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3280 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("substr"), Exprs: yyDollar[3].selectExprs} + } + case 626: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3286 + { + yyVAL.str = "" + } + case 627: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3290 + { + yyVAL.str = BooleanModeStr + } + case 628: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3294 + { + yyVAL.str = NaturalLanguageModeStr + } + case 629: + yyDollar = yyS[yypt-7 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3298 + { + yyVAL.str = NaturalLanguageModeWithQueryExpansionStr + } + case 630: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3302 + { + yyVAL.str = QueryExpansionStr + } + case 631: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3310 + { + yyVAL.overClause = nil + } + case 632: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3314 + { + yyVAL.overClause = &OverClause{WindowSpec: yyDollar[3].windowSpec} + } + case 633: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3318 + { + yyVAL.overClause = &OverClause{WindowSpec: &WindowSpec{}} + } + case 634: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3322 + { + yyVAL.overClause = &OverClause{WindowName: yyDollar[2].colIdent} + } + case 635: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3328 + { + yyVAL.windowSpec = &WindowSpec{PartitionBy: yyDollar[1].exprs, OrderBy: yyDollar[2].orderBy, Frame: yyDollar[3].frameClause} + } + case 636: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3333 + { + yyVAL.exprs = nil + } + case 637: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3337 + { + yyVAL.exprs = yyDollar[3].exprs + } + case 638: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3342 + { + yyVAL.frameClause = nil + } + case 639: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3346 + { + yyVAL.frameClause = &FrameClause{Unit: RowsStr, Start: yyDollar[2].framePoint} + } + case 640: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3350 + { + yyVAL.frameClause = &FrameClause{Unit: RowsStr, Start: yyDollar[3].framePoint, End: yyDollar[5].framePoint} + } + case 641: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3354 + { + yyVAL.frameClause = &FrameClause{Unit: RangeStr, Start: yyDollar[2].framePoint} + } + case 642: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3358 + { + yyVAL.frameClause = &FrameClause{Unit: RangeStr, Start: yyDollar[3].framePoint, End: yyDollar[5].framePoint} + } + case 643: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3364 + { + yyVAL.framePoint = &FramePoint{Type: UnboundedPrecedingStr} + } + case 644: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3368 + { + yyVAL.framePoint = &FramePoint{Type: UnboundedFollowingStr} + } + case 645: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3372 + { + yyVAL.framePoint = &FramePoint{Type: CurrentRowStr} + } + case 646: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3376 + { + yyVAL.framePoint = &FramePoint{Type: PrecedingStr, Expr: yyDollar[1].expr} + } + case 647: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3380 + { + yyVAL.framePoint = &FramePoint{Type: FollowingStr, Expr: yyDollar[1].expr} + } + case 648: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3386 + { + yyVAL.withClause = nil + } + case 649: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3390 + { + yyVAL.withClause = &With{Recursive: false, CTEs: yyDollar[2].cteList} + } + case 650: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3394 + { + yyVAL.withClause = &With{Recursive: true, CTEs: yyDollar[3].cteList} + } + case 651: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3400 + { + yyVAL.cteList = []*CommonTableExpr{yyDollar[1].cte} + } + case 652: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3404 + { + yyVAL.cteList = append(yyDollar[1].cteList, yyDollar[3].cte) + } + case 653: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3410 + { + yyVAL.cte = &CommonTableExpr{Name: yyDollar[1].tableIdent, Subquery: &Subquery{Select: yyDollar[4].selStmt}} + } + case 654: + yyDollar = yyS[yypt-8 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3414 + { + yyVAL.cte = &CommonTableExpr{Name: yyDollar[1].tableIdent, Columns: yyDollar[3].columns, Subquery: &Subquery{Select: yyDollar[7].selStmt}} + } + case 655: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3420 + { + yyVAL.str = string(yyDollar[1].colIdent.String()) + } + case 656: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3424 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 657: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3430 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} + } + case 658: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3434 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal, Charset: yyDollar[3].str, Operator: CharacterSetStr} + } + case 659: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3438 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal, Charset: string(yyDollar[3].colIdent.String())} + } + case 660: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3442 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 661: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3446 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} + } + case 662: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3450 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + yyVAL.convertType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.convertType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 663: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3456 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 664: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3460 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 665: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3464 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} + } + case 666: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3468 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 667: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3472 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 668: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3476 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].sqlVal} + } + case 669: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3480 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 670: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3484 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 671: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3489 + { + yyVAL.expr = nil + } + case 672: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3493 + { + yyVAL.expr = yyDollar[1].expr + } + case 673: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3498 + { + yyVAL.str = string("") + } + case 674: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3502 + { + yyVAL.str = " separator '" + string(yyDollar[2].bytes) + "'" + } + case 675: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3508 + { + yyVAL.whens = []*When{yyDollar[1].when} + } + case 676: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3512 + { + yyVAL.whens = append(yyDollar[1].whens, yyDollar[2].when) + } + case 677: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3518 + { + yyVAL.when = &When{Cond: yyDollar[2].expr, Val: yyDollar[4].expr} + } + case 678: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3523 + { + yyVAL.expr = nil + } + case 679: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3527 + { + yyVAL.expr = yyDollar[2].expr + } + case 680: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3533 + { + yyVAL.colName = &ColName{Name: yyDollar[1].colIdent} + } + case 681: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3537 + { + yyVAL.colName = &ColName{Qualifier: TableName{Name: yyDollar[1].tableIdent}, Name: yyDollar[3].colIdent} + } + case 682: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3541 + { + yyVAL.colName = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}, Name: yyDollar[5].colIdent} + } + case 683: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3547 + { + yyVAL.expr = NewStrVal(yyDollar[1].bytes) + } + case 684: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3551 + { + yyVAL.expr = NewHexVal(yyDollar[1].bytes) + } + case 685: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3555 + { + yyVAL.expr = NewBitVal(yyDollar[1].bytes) + } + case 686: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3559 + { + yyVAL.expr = NewIntVal(yyDollar[1].bytes) + } + case 687: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3563 + { + yyVAL.expr = NewFloatVal(yyDollar[1].bytes) + } + case 688: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3567 + { + yyVAL.expr = NewHexNum(yyDollar[1].bytes) + } + case 689: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3571 + { + yyVAL.expr = NewValArg(yyDollar[1].bytes) + } + case 690: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3575 + { + yyVAL.expr = &NullVal{} + } + case 691: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3581 + { + // TODO(sougou): Deprecate this construct. + if yyDollar[1].colIdent.Lowered() != "value" { + yylex.Error("expecting value after next") + return 1 + } + yyVAL.expr = NewIntVal([]byte("1")) + } + case 692: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3590 + { + yyVAL.expr = NewIntVal(yyDollar[1].bytes) + } + case 693: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3594 + { + yyVAL.expr = NewValArg(yyDollar[1].bytes) + } + case 694: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3599 + { + yyVAL.exprs = nil + } + case 695: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3603 + { + yyVAL.exprs = yyDollar[3].exprs + } + case 696: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3608 + { + yyVAL.expr = nil + } + case 697: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3612 + { + yyVAL.expr = yyDollar[2].expr + } + case 698: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3617 + { + yyVAL.orderBy = nil + } + case 699: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3621 + { + yyVAL.orderBy = yyDollar[3].orderBy + } + case 700: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3627 + { + yyVAL.orderBy = OrderBy{yyDollar[1].order} + } + case 701: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3631 + { + yyVAL.orderBy = append(yyDollar[1].orderBy, yyDollar[3].order) + } + case 702: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3637 + { + yyVAL.order = &Order{Expr: yyDollar[1].expr, Direction: yyDollar[2].str} + } + case 703: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3642 + { + yyVAL.str = AscScr + } + case 704: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3646 + { + yyVAL.str = AscScr + } + case 705: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3650 + { + yyVAL.str = DescScr + } + case 706: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3655 + { + yyVAL.limit = nil + } + case 707: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3659 + { + yyVAL.limit = &Limit{Rowcount: yyDollar[2].expr} + } + case 708: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3663 + { + yyVAL.limit = &Limit{Offset: yyDollar[2].expr, Rowcount: yyDollar[4].expr} + } + case 709: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3667 + { + yyVAL.limit = &Limit{Offset: yyDollar[4].expr, Rowcount: yyDollar[2].expr} + } + case 710: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3672 + { + yyVAL.str = "" + } + case 711: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3676 + { + yyVAL.str = ForUpdateStr + } + case 712: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3680 + { + yyVAL.str = ShareModeStr + } + case 713: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3693 + { + yyVAL.ins = &Insert{Rows: yyDollar[2].values} + } + case 714: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3697 + { + yyVAL.ins = &Insert{Rows: yyDollar[1].selStmt} + } + case 715: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3701 + { + yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].values} + } + case 716: + yyDollar = yyS[yypt-4 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3705 + { + yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[4].selStmt} + } + case 717: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3711 + { + yyVAL.columns = Columns{yyDollar[1].colIdent} + } + case 718: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3715 + { + yyVAL.columns = Columns{yyDollar[3].colIdent} + } + case 719: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3719 + { + yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) + } + case 720: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3723 + { + yyVAL.columns = append(yyVAL.columns, yyDollar[5].colIdent) + } + case 721: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3728 + { + yyVAL.updateExprs = nil + } + case 722: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3732 + { + yyVAL.updateExprs = yyDollar[1].updateExprs + } + case 723: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3738 + { + yyVAL.updateExprs = yyDollar[5].updateExprs + } + case 724: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3743 + { + yyVAL.selectExprs = nil + } + case 725: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3747 + { + yyVAL.selectExprs = yyDollar[1].selectExprs + } + case 726: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3753 + { + yyVAL.selectExprs = yyDollar[2].selectExprs + } + case 727: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3759 + { + yyVAL.values = Values{yyDollar[1].valTuple} + } + case 728: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3763 + { + yyVAL.values = append(yyDollar[1].values, yyDollar[3].valTuple) + } + case 729: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3769 + { + yyVAL.valTuple = yyDollar[1].valTuple + } + case 730: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3773 + { + yyVAL.valTuple = ValTuple{} + } + case 731: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3779 + { + yyVAL.valTuple = ValTuple(yyDollar[2].exprs) + } + case 732: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3785 + { + if len(yyDollar[1].valTuple) == 1 { + yyVAL.expr = yyDollar[1].valTuple[0] + } else { + yyVAL.expr = yyDollar[1].valTuple + } + } + case 733: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3795 + { + yyVAL.updateExprs = UpdateExprs{yyDollar[1].updateExpr} + } + case 734: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3799 + { + yyVAL.updateExprs = append(yyDollar[1].updateExprs, yyDollar[3].updateExpr) + } + case 735: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3805 + { + yyVAL.updateExpr = &UpdateExpr{Name: yyDollar[1].colName, Expr: yyDollar[3].expr} + } + case 736: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3811 + { + yyVAL.setExprs = SetExprs{yyDollar[1].setExpr} + } + case 737: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3815 + { + yyDollar[2].setExpr.Scope = yyDollar[1].str + yyVAL.setExprs = SetExprs{yyDollar[2].setExpr} + } + case 738: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3820 + { + yyVAL.setExprs = append(yyDollar[1].setExprs, yyDollar[3].setExpr) + } + case 739: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3826 + { + yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: NewStrVal([]byte("on"))} + } + case 740: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3830 + { + yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: NewStrVal([]byte("off"))} + } + case 741: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3834 + { + yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: yyDollar[3].expr} + } + case 742: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3838 + { + yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: NewStrVal([]byte("on"))} + } + case 743: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3842 + { + yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: NewStrVal([]byte("off"))} + } + case 744: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3846 + { + yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: yyDollar[3].expr} + } + case 745: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3850 + { + yyVAL.setExpr = &SetExpr{Name: NewColIdent(string(yyDollar[1].bytes)), Expr: yyDollar[2].expr} + } + case 747: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3857 + { + yyVAL.bytes = []byte("charset") + } + case 749: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3864 + { + yyVAL.expr = NewStrVal([]byte(yyDollar[1].colIdent.String())) + } + case 750: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3868 + { + yyVAL.expr = NewStrVal(yyDollar[1].bytes) + } + case 751: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3872 + { + yyVAL.expr = &Default{} + } + case 754: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3881 + { + yyVAL.byt = 0 + } + case 755: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3883 + { + yyVAL.byt = 1 + } + case 756: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3886 + { + yyVAL.byt = 0 + } + case 757: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3888 + { + yyVAL.byt = 1 + } + case 758: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3891 + { + yyVAL.str = "" + } + case 759: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3893 + { + yyVAL.str = IgnoreStr + } + case 760: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3897 + { + yyVAL.empty = struct{}{} + } + case 761: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3899 + { + yyVAL.empty = struct{}{} + } + case 762: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3901 + { + yyVAL.empty = struct{}{} + } + case 763: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3903 + { + yyVAL.empty = struct{}{} + } + case 764: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3905 + { + yyVAL.empty = struct{}{} + } + case 765: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3907 + { + yyVAL.empty = struct{}{} + } + case 766: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3909 + { + yyVAL.empty = struct{}{} + } + case 767: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3911 + { + yyVAL.empty = struct{}{} + } + case 768: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3913 + { + yyVAL.empty = struct{}{} + } + case 769: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3915 + { + yyVAL.empty = struct{}{} + } + case 770: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3918 + { + yyVAL.empty = struct{}{} + } + case 771: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3920 + { + yyVAL.empty = struct{}{} + } + case 772: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3922 + { + yyVAL.empty = struct{}{} + } + case 773: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3926 + { + yyVAL.empty = struct{}{} + } + case 774: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3928 + { + yyVAL.empty = struct{}{} + } + case 775: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3931 + { + yyVAL.empty = struct{}{} + } + case 776: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3933 + { + yyVAL.empty = struct{}{} + } + case 777: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3935 + { + yyVAL.empty = struct{}{} + } + case 778: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3938 + { + yyVAL.colIdent = ColIdent{} + } + case 779: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3940 + { + yyVAL.colIdent = yyDollar[2].colIdent + } + case 780: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3944 + { + yyVAL.colIdent = yyDollar[1].colIdent + } + case 781: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3948 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 783: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3955 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 784: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3961 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].colIdent.String())) + } + case 785: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3965 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 787: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3972 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 788: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3978 + { + yyVAL.execVarDef = NewExecVarDef(yyDollar[1].colIdent, yyDollar[3].expr) + } + case 789: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3984 + { + yyVAL.execVarDef = NewExecVarDef(yyDollar[1].colIdent, yyDollar[3].expr) + } + case 790: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3990 + { + yyVAL.execVarDefOpt = nil + } + case 791: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3992 + { + rv := yyDollar[1].execVarDef + yyVAL.execVarDefOpt = &rv + } + case 792: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:3998 + { + yyVAL.execVarDefs = nil + } + case 793: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:4000 + { + yyVAL.execVarDefs = []ExecVarDef{yyDollar[1].execVarDef} + } + case 794: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:4004 + { + yyVAL.execVarDefs = append(yyDollar[1].execVarDefs, yyDollar[3].execVarDef) + } + case 795: + yyDollar = yyS[yypt-5 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:4011 + { + yyVAL.statement = NewExec(yyDollar[2].bytes2, yyDollar[3].tableName, yyDollar[4].execVarDefs, yyDollar[5].execVarDefOpt) + } + case 796: + yyDollar = yyS[yypt-2 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:4017 + { + yyVAL.statement = NewPurge(yyDollar[2].bytes2, TableName{}, true) + } + case 797: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:4021 + { + yyVAL.statement = NewPurge(yyDollar[2].bytes2, yyDollar[3].tableName, false) + } + case 798: + yyDollar = yyS[yypt-3 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:4027 + { + yyVAL.statement = NewNativeQuery(yyDollar[2].bytes2, string(yyDollar[3].bytes)) + } + case 1108: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:4363 + { + if incNesting(yylex) { + yylex.Error("max nesting level reached") + return 1 + } + } + case 1109: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:4372 + { + decNesting(yylex) + } + case 1110: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:4377 + { + skipToEnd(yylex) + } + case 1111: + yyDollar = yyS[yypt-0 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:4382 + { + skipToEnd(yylex) + } + case 1112: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:4386 + { + skipToEnd(yylex) + } + case 1113: + yyDollar = yyS[yypt-1 : yypt+1] +//line /home/user/stackql-devel/internal/stackql-parser-fork/go/vt/sqlparser/sql.y:4390 + { + skipToEnd(yylex) + } + } + goto yystack /* stack new state and value */ +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/sql.y b/internal/stackql-parser-fork/go/vt/sqlparser/sql.y new file mode 100644 index 00000000..ccf060ef --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/sql.y @@ -0,0 +1,4392 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +%{ +package sqlparser + +import "strings" + +func setParseTree(yylex interface{}, stmt Statement) { + yylex.(*Tokenizer).ParseTree = stmt +} + +func setAllowComments(yylex interface{}, allow bool) { + yylex.(*Tokenizer).AllowComments = allow +} + +func setDDL(yylex interface{}, ddl *DDL) { + yylex.(*Tokenizer).partialDDL = ddl +} + +func incNesting(yylex interface{}) bool { + yylex.(*Tokenizer).nesting++ + if yylex.(*Tokenizer).nesting == 200 { + return true + } + return false +} + +func decNesting(yylex interface{}) { + yylex.(*Tokenizer).nesting-- +} + +// skipToEnd forces the lexer to end prematurely. Not all SQL statements +// are supported by the Parser, thus calling skipToEnd will make the lexer +// return EOF early. +func skipToEnd(yylex interface{}) { + yylex.(*Tokenizer).SkipToEnd = true +} + +%} + +%union { + empty struct{} + statement Statement + selStmt SelectStatement + ddl *DDL + ins *Insert + byt byte + bytes []byte + bytes2 [][]byte + str string + strs []string + selectExprs SelectExprs + selectExpr SelectExpr + columns Columns + partitions Partitions + colName *ColName + tableExprs TableExprs + tableExpr TableExpr + joinCondition JoinCondition + tableName TableName + tableNames TableNames + indexHints *IndexHints + expr Expr + exprs Exprs + boolVal BoolVal + sqlVal *SQLVal + colTuple ColTuple + values Values + valTuple ValTuple + subquery *Subquery + execsubquery *ExecSubquery + whens []*When + when *When + orderBy OrderBy + order *Order + limit *Limit + updateExprs UpdateExprs + setExprs SetExprs + updateExpr *UpdateExpr + setExpr *SetExpr + characteristic Characteristic + characteristics []Characteristic + colIdent ColIdent + tableIdent TableIdent + convertType *ConvertType + aliasedTableName *AliasedTableExpr + TableSpec *TableSpec + columnType ColumnType + colKeyOpt ColumnKeyOption + optVal Expr + LengthScaleOption LengthScaleOption + columnDefinition *ColumnDefinition + indexDefinition *IndexDefinition + indexInfo *IndexInfo + indexOption *IndexOption + indexOptions []*IndexOption + indexColumn *IndexColumn + indexColumns []*IndexColumn + constraintDefinition *ConstraintDefinition + constraintInfo ConstraintInfo + ReferenceAction ReferenceAction + partDefs []*PartitionDefinition + partDef *PartitionDefinition + partSpec *PartitionSpec + vindexParam VindexParam + vindexParams []VindexParam + showFilter *ShowFilter + optLike *OptLike + execVarDef ExecVarDef + execVarDefOpt *ExecVarDef + execVarDefs []ExecVarDef + listArgsConcat []ListArg + overClause *OverClause + windowSpec *WindowSpec + frameClause *FrameClause + framePoint *FramePoint + withClause *With + cteList []*CommonTableExpr + cte *CommonTableExpr +} + +%token LEX_ERROR +%left UNION +%token SELECT STREAM INSERT UPDATE DELETE FROM WHERE GROUP HAVING ORDER BY LIMIT OFFSET FOR +%token ALL DISTINCT AS EXISTS ASC DESC INTO DUPLICATE KEY DEFAULT SET LOCK UNLOCK KEYS DO +%token DISTINCTROW +%token VALUES LAST_INSERT_ID +%token NEXT VALUE SHARE MODE +%token SQL_NO_CACHE SQL_CACHE SQL_CALC_FOUND_ROWS +%left JOIN STRAIGHT_JOIN LEFT RIGHT INNER OUTER CROSS NATURAL USE FORCE +%left ON USING +%token '(' ',' ')' +%token ID AT_ID AT_AT_ID HEX STRING INTEGRAL FLOAT HEXNUM VALUE_ARG LIST_ARG COMMENT COMMENT_KEYWORD BIT_LITERAL +%token NULL TRUE FALSE OFF + +// Precedence dictated by mysql. But the vitess grammar is simplified. +// Some of these operators don't conflict in our situation. Nevertheless, +// it's better to have these listed in the correct order. Also, we don't +// support all operators yet. +// * NOTE: If you change anything here, update precedence.go as well * +%left OR +%left XOR +%left AND +%right NOT '!' +%left BETWEEN CASE WHEN THEN ELSE END +%left '=' '<' '>' LE GE NE NULL_SAFE_EQUAL IS LIKE REGEXP IN +%left '|' +%left '&' +%left SHIFT_LEFT SHIFT_RIGHT +%left '+' '-' +%left '*' '/' DIV '%' MOD +%left '^' +%right '~' UNARY +%left COLLATE +%right BINARY UNDERSCORE_BINARY UNDERSCORE_UTF8MB4 UNDERSCORE_UTF8 UNDERSCORE_LATIN1 +%right INTERVAL +%nonassoc '.' + +// There is no need to define precedence for the JSON +// operators because the syntax is restricted enough that +// they don't cause conflicts. +%token JSON_EXTRACT_OP JSON_UNQUOTE_EXTRACT_OP + +// DDL Tokens +%token CREATE ALTER DROP RENAME ANALYZE ADD FLUSH +%token SCHEMA TABLE INDEX VIEW TO IGNORE IF UNIQUE PRIMARY COLUMN SPATIAL FULLTEXT KEY_BLOCK_SIZE CHECK INDEXES +%token ACTION CASCADE CONSTRAINT FOREIGN NO REFERENCES RESTRICT +%token SHOW DESCRIBE EXPLAIN DATE ESCAPE REPAIR OPTIMIZE TRUNCATE +%token MAXVALUE PARTITION REORGANIZE LESS THAN PROCEDURE TRIGGER +%token VINDEX VINDEXES +%token STATUS VARIABLES WARNINGS +%token SEQUENCE + +// Transaction Tokens +%token BEGIN START TRANSACTION COMMIT ROLLBACK SAVEPOINT RELEASE WORK + +// Type Tokens +%token BIT TINYINT SMALLINT MEDIUMINT INT INTEGER BIGINT INTNUM +%token REAL DOUBLE FLOAT_TYPE DECIMAL NUMERIC +%token TIME TIMESTAMP DATETIME YEAR +%token CHAR VARCHAR BOOL CHARACTER VARBINARY NCHAR +%token TEXT TINYTEXT MEDIUMTEXT LONGTEXT +%token BLOB TINYBLOB MEDIUMBLOB LONGBLOB JSON ENUM +%token GEOMETRY POINT LINESTRING POLYGON GEOMETRYCOLLECTION MULTIPOINT MULTILINESTRING MULTIPOLYGON + +// Type Modifiers +%token NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL + +// Supported SHOW tokens +%token COLLATION DATABASES TABLES VITESS_METADATA VSCHEMA FULL PROCESSLIST COLUMNS FIELDS ENGINES PLUGINS EXTENDED + +// SET tokens +%token NAMES CHARSET GLOBAL SESSION ISOLATION LEVEL READ WRITE ONLY REPEATABLE COMMITTED UNCOMMITTED SERIALIZABLE + +// Functions +%token CURRENT_TIMESTAMP DATABASE CURRENT_DATE +%token CURRENT_TIME LOCALTIME LOCALTIMESTAMP +%token UTC_DATE UTC_TIME UTC_TIMESTAMP +%token REPLACE +%token CONVERT CAST +%token SUBSTR SUBSTRING +%token GROUP_CONCAT SEPARATOR +%token TIMESTAMPADD TIMESTAMPDIFF + +// Match +%token MATCH AGAINST BOOLEAN LANGUAGE WITH QUERY EXPANSION + +// Window function tokens +%token OVER ROWS RANGE UNBOUNDED PRECEDING FOLLOWING CURRENT ROW + +// CTE tokens +%token RECURSIVE + +// MySQL reserved words that are unused by this grammar will map to this token. +%token UNUSED ARRAY CUME_DIST DESCRIPTION DENSE_RANK EMPTY EXCEPT FIRST_VALUE GROUPING GROUPS JSON_TABLE LAG LAST_VALUE LATERAL LEAD MEMBER +%token NTH_VALUE NTILE OF PERCENT_RANK RANK ROW_NUMBER SYSTEM WINDOW +%token ACTIVE ADMIN BUCKETS CLONE COMPONENT DEFINITION ENFORCED EXCLUDE GEOMCOLLECTION GET_MASTER_PUBLIC_KEY HISTOGRAM HISTORY +%token INACTIVE INVISIBLE LOCKED MASTER_COMPRESSION_ALGORITHMS MASTER_PUBLIC_KEY_PATH MASTER_TLS_CIPHERSUITES MASTER_ZSTD_COMPRESSION_LEVEL +%token NESTED NETWORK_NAMESPACE NOWAIT NULLS OJ OLD OPTIONAL ORDINALITY ORGANIZATION OTHERS PATH PERSIST PERSIST_ONLY PRIVILEGE_CHECKS_USER PROCESS +%token RANDOM REFERENCE REQUIRE_ROW_FORMAT RESOURCE RESPECT RESTART RETAIN REUSE ROLE SECONDARY SECONDARY_ENGINE SECONDARY_LOAD SECONDARY_UNLOAD SKIP SRID +%token THREAD_PRIORITY TIES VCPU VISIBLE + +// Explain tokens +%token FORMAT TREE VITESS TRADITIONAL + +// Auth tokens +%token AUTH INTERACTIVE LOGIN REVOKE SA SERVICEACCOUNT SLEEP + +// View and table modifier tokens +%token MATERIALIZED TEMP TEMPORARY + +// Materialized view admin tokens +%token REFRESH + +// Table valued and unnest function tokens +%token JSON_ARRAY_ELEMENTS_TEXT JSON_EACH UNNEST + +// Registry tokens +%token REGISTRY PULL LIST + +// Exec tokens +%token EXEC + +// Purge tokens +%token PURGE + +// NativeQuery tokens +%token NATIVEQUERY + +// stackql +%token STACKQL + +// returning +%token RETURNING + +%type command +%type simple_select select_statement base_select union_rhs +%type explain_statement explainable_statement +%type stream_statement insert_statement update_statement delete_statement set_statement set_transaction_statement +%type create_statement alter_statement rename_statement drop_statement truncate_statement flush_statement do_statement refresh_stmt +%type create_table_prefix rename_list +%type analyze_statement show_statement use_statement other_statement +%type begin_statement commit_statement rollback_statement savepoint_statement release_statement +%type auth_statement exec_stmt sleep_stmt registry_stmt purge_stmt nativequery_stmt +%type infraql_opt +%type comment_opt comment_list +%type union_op insert_only update_or_replace explain_format_opt wild_opt +%type explain_synonyms +%type distinct_opt cache_opt match_option separator_opt +%type auth_type +%type cardinality_expansion_function_name +%type like_escape_opt +%type select_expression_list select_expression_list_opt returning_opt returning +%type select_expression +%type select_options +%type select_option +%type expression +%type from_opt table_references +%type table_reference table_factor join_table table_valued_func +%type join_condition join_condition_opt on_expression_opt +%type table_name_list delete_table_list +%type inner_join outer_join straight_join natural_join +%type table_name into_table_name delete_table_name +%type aliased_table_name +%type index_hint_list +%type where_expression_opt +%type condition +%type boolean_value +%type compare +%type insert_data +%type value value_expression num_val +%type function_call_keyword function_call_nonkeyword function_call_generic function_call_conflict func_datetime_precision function_call_table_valued +%type is_suffix +%type col_tuple +%type expression_list +%type tuple_list +%type row_tuple tuple_or_empty +%type tuple_expression +%type subquery derived_table +%type column_name +%type when_expression_list +%type when_expression +%type expression_opt else_expression_opt +%type group_by_opt +%type having_opt +%type order_by_opt order_list +%type order +%type asc_desc_opt +%type limit_opt +%type lock_opt +%type ins_column_list column_list opt_column_list +%type opt_partition_clause partition_list +%type on_dup_opt on_dup +%type update_list +%type set_list +%type charset_or_character_set +%type update_expression +%type set_expression +%type transaction_char +%type transaction_chars +%type isolation_level view_modifier table_modifier +%type for_from +%type ignore_opt default_opt +%type full_opt from_database_opt tables_or_processlist columns_or_fields extended_opt +%type like_or_where_opt like_opt +%type exists_opt not_exists_opt +%type non_add_drop_or_rename_operation to_opt index_opt constraint_opt +%type reserved_keyword non_reserved_keyword +%type sql_id reserved_sql_id col_alias as_ci_opt using_opt +%type charset_value +%type table_id reserved_table_id table_alias as_opt_id +%type as_opt work_opt savepoint_opt +%type skip_to_end ddl_skip_to_end +%type charset +%type set_session_or_global show_session_or_global +%type convert_type +%type column_type +%type int_type decimal_type numeric_type time_type char_type spatial_type +%type length_opt column_comment_opt +%type column_default_opt on_update_opt +%type charset_opt collate_opt +%type unsigned_opt zero_fill_opt +%type float_length_opt decimal_length_opt +%type null_opt auto_increment_opt +%type column_key_opt +%type enum_values +%type column_definition +%type index_definition +%type constraint_definition +%type index_or_key index_symbols from_or_in +%type name_opt +%type equal_opt +%type table_spec table_column_list +%type create_like +%type table_option_list table_option table_opt_value +%type index_info +%type index_column +%type index_column_list +%type index_option +%type index_option_list +%type constraint_info +%type partition_definitions +%type partition_definition +%type partition_operation +%type vindex_param +%type vindex_param_list vindex_params_opt +%type id_or_var vindex_type vindex_type_opt at_id at_at_id +%type alter_object_type +%type fk_reference_action fk_on_delete fk_on_update +%type exec_var exec_payload +%type opt_exec_payload +%type exec_var_list +%type list_arg_concatamer +%type over_clause_opt +%type window_spec +%type frame_clause_opt +%type frame_point +%type partition_by_opt +%type with_clause_opt +%type cte_list +%type cte + +%start any_command + +%% + +any_command: + command semicolon_opt + { + setParseTree(yylex, $1) + } + +semicolon_opt: +/*empty*/ {} +| ';' {} + +command: + select_statement + { + $$ = $1 + } +| stream_statement +| insert_statement +| update_statement +| delete_statement +| set_statement +| set_transaction_statement +| create_statement +| alter_statement +| rename_statement +| drop_statement +| truncate_statement +| analyze_statement +| show_statement +| use_statement +| begin_statement +| commit_statement +| rollback_statement +| savepoint_statement +| release_statement +| explain_statement +| other_statement +| flush_statement +| do_statement +| auth_statement +| registry_stmt +| exec_stmt +| sleep_stmt +| purge_stmt +| nativequery_stmt +| refresh_stmt +| /*empty*/ +{ + setParseTree(yylex, nil) +} + +id_or_var: + ID + { + $$ = NewColIdentWithAt(string($1), NoAt) + } +| AT_ID + { + $$ = NewColIdentWithAt(string($1), SingleAt) + } +| AT_AT_ID + { + $$ = NewColIdentWithAt(string($1), DoubleAt) + } + +at_id: +AT_ID + { + $$ = NewColIdentWithAt(string($1), SingleAt) + } + +at_at_id: +AT_AT_ID + { + $$ = NewColIdentWithAt(string($1), DoubleAt) + } + +do_statement: + DO expression_list + { + $$ = &OtherAdmin{} + } + +select_statement: + base_select order_by_opt limit_opt lock_opt + { + sel := $1.(*Select) + sel.OrderBy = $2 + sel.Limit = $3 + sel.Lock = $4 + $$ = sel + } +| openb select_statement union_op union_rhs order_by_opt limit_opt lock_opt closeb + { + $$ = Unionize($2, $4, $3, $5, $6, $7) + } +| select_statement union_op union_rhs order_by_opt limit_opt lock_opt + { + $$ = Unionize($1, $3, $2, $4, $5, $6) + } +| SELECT comment_opt cache_opt NEXT num_val for_from table_name + { + $$ = NewSelect(Comments($2), SelectExprs{Nextval{Expr: $5}}, []string{$3}/*options*/, TableExprs{&AliasedTableExpr{Expr: $7}}, nil/*where*/, nil/*groupBy*/, nil/*having*/) + } + +// simple_select is an unparenthesized select used for subquery. +// Allowing parenthesis for subqueries leads to grammar ambiguity. +// MySQL also seems to have run into this and resolved it the same way. +// The specific ambiguity comes from the fact that parenthesis means +// many things: +// 1. Grouping: (select id from t) order by id +// 2. Tuple: id in (1, 2, 3) +// 3. Subquery: id in (select id from t) +// Example: +// ((select id from t)) +// Interpretation 1: inner () is for subquery (rule 3), and outer () +// is Tuple (rule 2), which degenerates to a simple expression +// for single value expressions. +// Interpretation 2: inner () is for grouping (rule 1), and outer +// is for subquery. +// Not allowing parenthesis for subselects will force the above +// construct to use the first interpretation. +simple_select: + base_select order_by_opt limit_opt lock_opt + { + sel := $1.(*Select) + sel.OrderBy = $2 + sel.Limit = $3 + sel.Lock = $4 + $$ = sel + } +| simple_select union_op union_rhs order_by_opt limit_opt lock_opt + { + $$ = Unionize($1, $3, $2, $4, $5, $6) + } + +stream_statement: + STREAM comment_opt select_expression FROM table_name + { + $$ = &Stream{Comments: Comments($2), SelectExpr: $3, Table: $5} + } + +// base_select is an unparenthesized SELECT with no order by clause or beyond. +base_select: +// 1 2 3 4 5 6 7 8 + SELECT comment_opt select_options select_expression_list from_opt where_expression_opt group_by_opt having_opt + { + $$ = NewSelect(Comments($2), $4/*SelectExprs*/, $3/*options*/, $5/*from*/, NewWhere(WhereStr, $6), GroupBy($7), NewWhere(HavingStr, $8)) + } +| WITH cte_list SELECT comment_opt select_options select_expression_list from_opt where_expression_opt group_by_opt having_opt + { + sel := NewSelect(Comments($4), $6/*SelectExprs*/, $5/*options*/, $7/*from*/, NewWhere(WhereStr, $8), GroupBy($9), NewWhere(HavingStr, $10)) + sel.With = &With{Recursive: false, CTEs: $2} + $$ = sel + } +| WITH RECURSIVE cte_list SELECT comment_opt select_options select_expression_list from_opt where_expression_opt group_by_opt having_opt + { + sel := NewSelect(Comments($5), $7/*SelectExprs*/, $6/*options*/, $8/*from*/, NewWhere(WhereStr, $9), GroupBy($10), NewWhere(HavingStr, $11)) + sel.With = &With{Recursive: true, CTEs: $3} + $$ = sel + } + +union_rhs: + base_select + { + $$ = $1 + } +| openb select_statement closeb + { + $$ = &ParenSelect{Select: $2} + } + + +insert_statement: + insert_only comment_opt ignore_opt into_table_name opt_partition_clause insert_data on_dup returning_opt + { + // insert_data returns a *Insert pre-filled with Columns & Values + ins := $6 + ins.Action = $1 + ins.Comments = $2 + ins.Ignore = $3 + ins.Table = $4 + ins.Partitions = $5 + ins.OnDup = OnDup($7) + ins.SelectExprs = $8 + $$ = ins + } +| insert_only comment_opt ignore_opt into_table_name opt_partition_clause insert_data returning + { + // insert_data returns a *Insert pre-filled with Columns & Values + ins := $6 + ins.Action = $1 + ins.Comments = $2 + ins.Ignore = $3 + ins.Table = $4 + ins.Partitions = $5 + ins.OnDup = OnDup(nil) + ins.SelectExprs = $7 + $$ = ins + } +| insert_only comment_opt ignore_opt into_table_name opt_partition_clause insert_data on_dup_opt + { + // insert_data returns a *Insert pre-filled with Columns & Values + ins := $6 + ins.Action = $1 + ins.Comments = $2 + ins.Ignore = $3 + ins.Table = $4 + ins.Partitions = $5 + ins.OnDup = OnDup($7) + $$ = ins + } + +insert_only: + INSERT + { + $$ = InsertStr + } + +update_or_replace: + UPDATE + { + $$ = UpdateStr + } +| REPLACE + { + $$ = ReplaceStr + } + +update_statement: + update_or_replace comment_opt ignore_opt table_references SET update_list from_opt where_expression_opt order_by_opt limit_opt returning_opt + { + $$ = &Update{Action: $1, Comments: Comments($2), Ignore: $3, TableExprs: $4, Exprs: $6, From: $7, Where: NewWhere(WhereStr, $8), OrderBy: $9, Limit: $10, SelectExprs: $11} + } + +delete_statement: + DELETE comment_opt FROM table_name opt_partition_clause where_expression_opt order_by_opt limit_opt returning_opt + { + $$ = &Delete{Comments: Comments($2), TableExprs: TableExprs{&AliasedTableExpr{Expr:$4}}, Partitions: $5, Where: NewWhere(WhereStr, $6), OrderBy: $7, Limit: $8, SelectExprs: $9} + } +| DELETE comment_opt FROM table_name_list USING table_references where_expression_opt returning_opt + { + $$ = &Delete{Comments: Comments($2), Targets: $4, TableExprs: $6, Where: NewWhere(WhereStr, $7), SelectExprs: $8} + } +| DELETE comment_opt table_name_list from_or_using table_references where_expression_opt returning_opt + { + $$ = &Delete{Comments: Comments($2), Targets: $3, TableExprs: $5, Where: NewWhere(WhereStr, $6), SelectExprs: $7} + } +|DELETE comment_opt delete_table_list from_or_using table_references where_expression_opt returning_opt + { + $$ = &Delete{Comments: Comments($2), Targets: $3, TableExprs: $5, Where: NewWhere(WhereStr, $6), SelectExprs: $7} + } + +from_or_using: + FROM {} +| USING {} + +table_name_list: + table_name + { + $$ = TableNames{$1} + } +| table_name_list ',' table_name + { + $$ = append($$, $3) + } + +delete_table_list: + delete_table_name + { + $$ = TableNames{$1} + } +| delete_table_list ',' delete_table_name + { + $$ = append($$, $3) + } + +opt_partition_clause: + { + $$ = nil + } +| PARTITION openb partition_list closeb + { + $$ = $3 + } + +set_statement: + SET comment_opt set_list + { + $$ = &Set{Comments: Comments($2), Exprs: $3} + } + +set_transaction_statement: + SET comment_opt set_session_or_global TRANSACTION transaction_chars + { + $$ = &SetTransaction{Comments: Comments($2), Scope: $3, Characteristics: $5} + } +| SET comment_opt TRANSACTION transaction_chars + { + $$ = &SetTransaction{Comments: Comments($2), Characteristics: $4} + } + +transaction_chars: + transaction_char + { + $$ = []Characteristic{$1} + } +| transaction_chars ',' transaction_char + { + $$ = append($$, $3) + } + +transaction_char: + ISOLATION LEVEL isolation_level + { + $$ = &IsolationLevel{Level: string($3)} + } +| READ WRITE + { + $$ = &AccessMode{Mode: TxReadWrite} + } +| READ ONLY + { + $$ = &AccessMode{Mode: TxReadOnly} + } + +isolation_level: + REPEATABLE READ + { + $$ = RepeatableRead + } +| READ COMMITTED + { + $$ = ReadCommitted + } +| READ UNCOMMITTED + { + $$ = ReadUncommitted + } +| SERIALIZABLE + { + $$ = Serializable + } + +set_session_or_global: + SESSION + { + $$ = SessionStr + } +| GLOBAL + { + $$ = GlobalStr + } + +table_modifier: + TEMP + { + $$ = TempStr + } +| TEMPORARY + { + $$ = TemporaryStr + } + +view_modifier: + MATERIALIZED + { + $$ = MaterializedStr + } + +create_statement: + create_table_prefix table_spec + { + $1.TableSpec = $2 + $$ = $1 + } +| create_table_prefix create_like + { + // Create table [name] like [name] + $1.OptLike = $2 + $$ = $1 + } +| CREATE constraint_opt INDEX id_or_var using_opt ON table_name ddl_skip_to_end + { + // Change this to an alter statement + $$ = &DDL{Action: AlterStr, Table: $7} + } +| CREATE VIEW table_name AS select_statement + { + $$ = &DDL{Action: CreateStr, Table: $3.ToViewName(), SelectStatement: $5 } + } +| CREATE OR REPLACE VIEW table_name AS select_statement + { + $$ = &DDL{Action: CreateStr, Table: $5.ToViewName(), SelectStatement: $7, OrReplace: true } + } +| CREATE view_modifier VIEW table_name AS select_statement + { + $$ = &DDL{Action: CreateStr, Table: $4.ToViewName(), SelectStatement: $6, Modifier: $2 } + } +| CREATE OR REPLACE view_modifier VIEW table_name AS select_statement + { + $$ = &DDL{Action: CreateStr, Table: $6.ToViewName(), SelectStatement: $8, OrReplace: true, Modifier: $4 } + } +| CREATE DATABASE not_exists_opt id_or_var ddl_skip_to_end + { + var notExists bool + if $3 != 0 { + notExists = true + } + $$ = &DBDDL{Action: CreateStr, DBName: string($4.String()), IfNotExists: notExists} + } +| CREATE SCHEMA not_exists_opt id_or_var ddl_skip_to_end + { + var notExists bool + if $3 != 0 { + notExists = true + } + $$ = &DBDDL{Action: CreateStr, DBName: string($4.String()), IfNotExists: notExists} + } + +refresh_stmt: + REFRESH MATERIALIZED VIEW table_name + { + $$ = &RefreshMaterializedView{ViewName: $4} + } + +infraql_opt: + { + $$ = BoolVal(false) + } +| STACKQL + { + $$ = BoolVal(true) + } + +auth_statement: + infraql_opt AUTH LOGIN name_opt + { + $$ = &Auth{SessionAuth: $1, Provider: $4 } + } +| + infraql_opt AUTH LOGIN name_opt auth_type + { + $$ = &Auth{SessionAuth: $1, Provider: $4, Type: $5 } + } +| + infraql_opt AUTH LOGIN name_opt auth_type STRING STRING + { + $$ = &Auth{SessionAuth: $1, Provider: $4, Type: $5, KeyFilePath: string($6), KeyEnvVar: string($7)} + } +| + infraql_opt AUTH REVOKE name_opt + { + $$ = &AuthRevoke{SessionAuth: $1, Provider: $4 } + } + +registry_stmt: + infraql_opt REGISTRY PULL id_or_var id_or_var + { + $$ = &Registry{ActionType: string($3), ProviderId: $4.GetRawVal(), ProviderVersion: $5.GetRawVal() } + } +| + infraql_opt REGISTRY PULL id_or_var STRING + { + $$ = &Registry{ActionType: string($3), ProviderId: $4.GetRawVal(), ProviderVersion: string($5) } + } +| + infraql_opt REGISTRY PULL id_or_var + { + $$ = &Registry{ActionType: string($3), ProviderId: $4.GetRawVal() } + } +| + infraql_opt REGISTRY LIST id_or_var + { + $$ = &Registry{ActionType: string($3), ProviderId: $4.GetRawVal() } + } +| + infraql_opt REGISTRY LIST STRING + { + $$ = &Registry{ActionType: string($3), ProviderId: string($4) } + } +| + infraql_opt REGISTRY LIST + { + $$ = &Registry{ActionType: string($3) } + } + +auth_type: + INTERACTIVE + { + $$ = InteractiveStr + } +| SERVICEACCOUNT + { + $$ = ServiceAccountStr + } +| SA + { + $$ = ServiceAccountStr + } + + +vindex_type_opt: + { + $$ = NewColIdent("") + } +| USING vindex_type + { + $$ = $2 + } + +vindex_type: + id_or_var + { + $$ = $1 + } + +vindex_params_opt: + { + var v []VindexParam + $$ = v + } +| WITH vindex_param_list + { + $$ = $2 + } + +vindex_param_list: + vindex_param + { + $$ = make([]VindexParam, 0, 4) + $$ = append($$, $1) + } +| vindex_param_list ',' vindex_param + { + $$ = append($$, $3) + } + +vindex_param: + reserved_sql_id '=' table_opt_value + { + $$ = VindexParam{Key: $1, Val: $3} + } + +create_table_prefix: + CREATE TABLE not_exists_opt table_name + { + var notExists bool + if $3 != 0 { + notExists = true + } + $$ = &DDL{Action: CreateStr, Table: $4, IfNotExists: notExists } + setDDL(yylex, $$) + } +| CREATE table_modifier TABLE not_exists_opt table_name + { + var notExists bool + if $4 != 0 { + notExists = true + } + $$ = &DDL{Action: CreateStr, Table: $5, IfNotExists: notExists, Modifier: $2} + setDDL(yylex, $$) + } + +table_spec: + '(' table_column_list ')' table_option_list + { + $$ = $2 + $$.Options = $4 + } + +create_like: + LIKE table_name + { + $$ = &OptLike{LikeTable: $2} + } +| '(' LIKE table_name ')' + { + $$ = &OptLike{LikeTable: $3} + } + +table_column_list: + column_definition + { + $$ = &TableSpec{} + $$.AddColumn($1) + } +| table_column_list ',' column_definition + { + $$.AddColumn($3) + } +| table_column_list ',' index_definition + { + $$.AddIndex($3) + } +| table_column_list ',' constraint_definition + { + $$.AddConstraint($3) + } + +column_definition: + id_or_var column_type null_opt column_default_opt on_update_opt auto_increment_opt column_key_opt column_comment_opt + { + $2.NotNull = $3 + $2.Default = $4 + $2.OnUpdate = $5 + $2.Autoincrement = $6 + $2.KeyOpt = $7 + $2.Comment = $8 + $$ = &ColumnDefinition{Name: $1, Type: $2} + } +column_type: + numeric_type unsigned_opt zero_fill_opt + { + $$ = $1 + $$.Unsigned = $2 + $$.Zerofill = $3 + } +| char_type +| time_type +| spatial_type + +numeric_type: + int_type length_opt + { + $$ = $1 + $$.Length = $2 + } +| decimal_type + { + $$ = $1 + } + +int_type: + BIT + { + $$ = ColumnType{Type: string($1)} + } +| BOOL + { + $$ = ColumnType{Type: string($1)} + } +| BOOLEAN + { + $$ = ColumnType{Type: string($1)} + } +| TINYINT + { + $$ = ColumnType{Type: string($1)} + } +| SMALLINT + { + $$ = ColumnType{Type: string($1)} + } +| MEDIUMINT + { + $$ = ColumnType{Type: string($1)} + } +| INT + { + $$ = ColumnType{Type: string($1)} + } +| INTEGER + { + $$ = ColumnType{Type: string($1)} + } +| BIGINT + { + $$ = ColumnType{Type: string($1)} + } + +decimal_type: +REAL float_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| DOUBLE float_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| FLOAT_TYPE float_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| DECIMAL decimal_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| NUMERIC decimal_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } + +time_type: + DATE + { + $$ = ColumnType{Type: string($1)} + } +| TIME length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| TIMESTAMP length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| DATETIME length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| YEAR + { + $$ = ColumnType{Type: string($1)} + } + +char_type: + CHAR length_opt charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Length: $2, Charset: $3, Collate: $4} + } +| VARCHAR length_opt charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Length: $2, Charset: $3, Collate: $4} + } +| BINARY length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| VARBINARY length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| TEXT charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Charset: $2, Collate: $3} + } +| TINYTEXT charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Charset: $2, Collate: $3} + } +| MEDIUMTEXT charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Charset: $2, Collate: $3} + } +| LONGTEXT charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Charset: $2, Collate: $3} + } +| BLOB + { + $$ = ColumnType{Type: string($1)} + } +| TINYBLOB + { + $$ = ColumnType{Type: string($1)} + } +| MEDIUMBLOB + { + $$ = ColumnType{Type: string($1)} + } +| LONGBLOB + { + $$ = ColumnType{Type: string($1)} + } +| JSON + { + $$ = ColumnType{Type: string($1)} + } +| ENUM '(' enum_values ')' charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), EnumValues: $3, Charset: $5, Collate: $6} + } +// need set_values / SetValues ? +| SET '(' enum_values ')' charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), EnumValues: $3, Charset: $5, Collate: $6} + } + +spatial_type: + GEOMETRY + { + $$ = ColumnType{Type: string($1)} + } +| POINT + { + $$ = ColumnType{Type: string($1)} + } +| LINESTRING + { + $$ = ColumnType{Type: string($1)} + } +| POLYGON + { + $$ = ColumnType{Type: string($1)} + } +| GEOMETRYCOLLECTION + { + $$ = ColumnType{Type: string($1)} + } +| MULTIPOINT + { + $$ = ColumnType{Type: string($1)} + } +| MULTILINESTRING + { + $$ = ColumnType{Type: string($1)} + } +| MULTIPOLYGON + { + $$ = ColumnType{Type: string($1)} + } + +enum_values: + STRING + { + $$ = make([]string, 0, 4) + $$ = append($$, "'" + string($1) + "'") + } +| enum_values ',' STRING + { + $$ = append($1, "'" + string($3) + "'") + } + +length_opt: + { + $$ = nil + } +| '(' INTEGRAL ')' + { + $$ = NewIntVal($2) + } + +float_length_opt: + { + $$ = LengthScaleOption{} + } +| '(' INTEGRAL ',' INTEGRAL ')' + { + $$ = LengthScaleOption{ + Length: NewIntVal($2), + Scale: NewIntVal($4), + } + } + +decimal_length_opt: + { + $$ = LengthScaleOption{} + } +| '(' INTEGRAL ')' + { + $$ = LengthScaleOption{ + Length: NewIntVal($2), + } + } +| '(' INTEGRAL ',' INTEGRAL ')' + { + $$ = LengthScaleOption{ + Length: NewIntVal($2), + Scale: NewIntVal($4), + } + } + +unsigned_opt: + { + $$ = BoolVal(false) + } +| UNSIGNED + { + $$ = BoolVal(true) + } + +zero_fill_opt: + { + $$ = BoolVal(false) + } +| ZEROFILL + { + $$ = BoolVal(true) + } + +// Null opt returns false to mean NULL (i.e. the default) and true for NOT NULL +null_opt: + { + $$ = BoolVal(false) + } +| NULL + { + $$ = BoolVal(false) + } +| NOT NULL + { + $$ = BoolVal(true) + } + +column_default_opt: + { + $$ = nil + } +| DEFAULT value_expression + { + $$ = $2 + } + +on_update_opt: + { + $$ = nil + } +| ON UPDATE function_call_nonkeyword +{ + $$ = $3 +} + +auto_increment_opt: + { + $$ = BoolVal(false) + } +| AUTO_INCREMENT + { + $$ = BoolVal(true) + } + +charset_opt: + { + $$ = "" + } +| CHARACTER SET id_or_var + { + $$ = string($3.String()) + } +| CHARACTER SET BINARY + { + $$ = string($3) + } + +collate_opt: + { + $$ = "" + } +| COLLATE id_or_var + { + $$ = string($2.String()) + } +| COLLATE STRING + { + $$ = string($2) + } + +column_key_opt: + { + $$ = colKeyNone + } +| PRIMARY KEY + { + $$ = ColKeyPrimary + } +| KEY + { + $$ = ColKey + } +| UNIQUE KEY + { + $$ = ColKeyUniqueKey + } +| UNIQUE + { + $$ = ColKeyUnique + } + +column_comment_opt: + { + $$ = nil + } +| COMMENT_KEYWORD STRING + { + $$ = NewStrVal($2) + } + +index_definition: + index_info '(' index_column_list ')' index_option_list + { + $$ = &IndexDefinition{Info: $1, Columns: $3, Options: $5} + } +| index_info '(' index_column_list ')' + { + $$ = &IndexDefinition{Info: $1, Columns: $3} + } + +index_option_list: + index_option + { + $$ = []*IndexOption{$1} + } +| index_option_list index_option + { + $$ = append($$, $2) + } + +index_option: + USING id_or_var + { + $$ = &IndexOption{Name: string($1), Using: string($2.String())} + } +| KEY_BLOCK_SIZE equal_opt INTEGRAL + { + // should not be string + $$ = &IndexOption{Name: string($1), Value: NewIntVal($3)} + } +| COMMENT_KEYWORD STRING + { + $$ = &IndexOption{Name: string($1), Value: NewStrVal($2)} + } + +sleep_stmt: + SLEEP INTEGRAL + { + $$ = &Sleep{Duration: NewIntVal($2)} + } + +equal_opt: + /* empty */ + { + $$ = "" + } +| '=' + { + $$ = string($1) + } + +index_info: + PRIMARY KEY + { + $$ = &IndexInfo{Type: string($1) + " " + string($2), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true} + } +| SPATIAL index_or_key name_opt + { + $$ = &IndexInfo{Type: string($1) + " " + string($2), Name: NewColIdent($3), Spatial: true, Unique: false} + } +| UNIQUE index_or_key name_opt + { + $$ = &IndexInfo{Type: string($1) + " " + string($2), Name: NewColIdent($3), Unique: true} + } +| UNIQUE name_opt + { + $$ = &IndexInfo{Type: string($1), Name: NewColIdent($2), Unique: true} + } +| index_or_key name_opt + { + $$ = &IndexInfo{Type: string($1), Name: NewColIdent($2), Unique: false} + } + +index_symbols: + INDEX + { + $$ = string($1) + } +| KEYS + { + $$ = string($1) + } +| INDEXES + { + $$ = string($1) + } + + +from_or_in: + FROM + { + $$ = string($1) + } +| IN + { + $$ = string($1) + } + +index_or_key: + INDEX + { + $$ = string($1) + } + | KEY + { + $$ = string($1) + } + +name_opt: + { + $$ = "" + } +| id_or_var + { + $$ = string($1.String()) + } + +index_column_list: + index_column + { + $$ = []*IndexColumn{$1} + } +| index_column_list ',' index_column + { + $$ = append($$, $3) + } + +index_column: + sql_id length_opt + { + $$ = &IndexColumn{Column: $1, Length: $2} + } + +constraint_definition: + CONSTRAINT id_or_var constraint_info + { + $$ = &ConstraintDefinition{Name: string($2.String()), Details: $3} + } +| constraint_info + { + $$ = &ConstraintDefinition{Details: $1} + } + + +constraint_info: + FOREIGN KEY '(' column_list ')' REFERENCES table_name '(' column_list ')' + { + $$ = &ForeignKeyDefinition{Source: $4, ReferencedTable: $7, ReferencedColumns: $9} + } +| FOREIGN KEY '(' column_list ')' REFERENCES table_name '(' column_list ')' fk_on_delete + { + $$ = &ForeignKeyDefinition{Source: $4, ReferencedTable: $7, ReferencedColumns: $9, OnDelete: $11} + } +| FOREIGN KEY '(' column_list ')' REFERENCES table_name '(' column_list ')' fk_on_update + { + $$ = &ForeignKeyDefinition{Source: $4, ReferencedTable: $7, ReferencedColumns: $9, OnUpdate: $11} + } +| FOREIGN KEY '(' column_list ')' REFERENCES table_name '(' column_list ')' fk_on_delete fk_on_update + { + $$ = &ForeignKeyDefinition{Source: $4, ReferencedTable: $7, ReferencedColumns: $9, OnDelete: $11, OnUpdate: $12} + } + +fk_on_delete: + ON DELETE fk_reference_action + { + $$ = $3 + } + +fk_on_update: + ON UPDATE fk_reference_action + { + $$ = $3 + } + +fk_reference_action: + RESTRICT + { + $$ = Restrict + } +| CASCADE + { + $$ = Cascade + } +| NO ACTION + { + $$ = NoAction + } +| SET DEFAULT + { + $$ = SetDefault + } +| SET NULL + { + $$ = SetNull + } + +table_option_list: + { + $$ = "" + } +| table_option + { + $$ = " " + string($1) + } +| table_option_list ',' table_option + { + $$ = string($1) + ", " + string($3) + } + +// rather than explicitly parsing the various keywords for table options, +// just accept any number of keywords, IDs, strings, numbers, and '=' +table_option: + table_opt_value + { + $$ = $1 + } +| table_option table_opt_value + { + $$ = $1 + " " + $2 + } +| table_option '=' table_opt_value + { + $$ = $1 + "=" + $3 + } + +table_opt_value: + reserved_sql_id + { + $$ = $1.String() + } +| STRING + { + $$ = "'" + string($1) + "'" + } +| INTEGRAL + { + $$ = string($1) + } + +alter_statement: + ALTER ignore_opt TABLE table_name non_add_drop_or_rename_operation skip_to_end + { + $$ = &DDL{Action: AlterStr, Table: $4} + } +| ALTER ignore_opt TABLE table_name ADD alter_object_type skip_to_end + { + $$ = &DDL{Action: AlterStr, Table: $4} + } +| ALTER ignore_opt TABLE table_name DROP alter_object_type skip_to_end + { + $$ = &DDL{Action: AlterStr, Table: $4} + } +| ALTER ignore_opt TABLE table_name RENAME to_opt table_name + { + // Change this to a rename statement + $$ = &DDL{Action: RenameStr, FromTables: TableNames{$4}, ToTables: TableNames{$7}} + } +| ALTER ignore_opt TABLE table_name RENAME index_opt skip_to_end + { + // Rename an index can just be an alter + $$ = &DDL{Action: AlterStr, Table: $4} + } +| ALTER VIEW table_name ddl_skip_to_end + { + $$ = &DDL{Action: AlterStr, Table: $3.ToViewName()} + } +| ALTER ignore_opt TABLE table_name partition_operation + { + $$ = &DDL{Action: AlterStr, Table: $4, PartitionSpec: $5} + } +| ALTER DATABASE id_or_var ddl_skip_to_end + { + $$ = &DBDDL{Action: AlterStr, DBName: string($3.String())} + } +| ALTER SCHEMA id_or_var ddl_skip_to_end + { + $$ = &DBDDL{Action: AlterStr, DBName: string($3.String())} + } +| ALTER VSCHEMA CREATE VINDEX table_name vindex_type_opt vindex_params_opt + { + $$ = &DDL{ + Action: CreateVindexStr, + Table: $5, + VindexSpec: &VindexSpec{ + Name: NewColIdent($5.Name.String()), + Type: $6, + Params: $7, + }, + } + } +| ALTER VSCHEMA DROP VINDEX table_name + { + $$ = &DDL{ + Action: DropVindexStr, + Table: $5, + VindexSpec: &VindexSpec{ + Name: NewColIdent($5.Name.String()), + }, + } + } +| ALTER VSCHEMA ADD TABLE table_name + { + $$ = &DDL{Action: AddVschemaTableStr, Table: $5} + } +| ALTER VSCHEMA DROP TABLE table_name + { + $$ = &DDL{Action: DropVschemaTableStr, Table: $5} + } +| ALTER VSCHEMA ON table_name ADD VINDEX sql_id '(' column_list ')' vindex_type_opt vindex_params_opt + { + $$ = &DDL{ + Action: AddColVindexStr, + Table: $4, + VindexSpec: &VindexSpec{ + Name: $7, + Type: $11, + Params: $12, + }, + VindexCols: $9, + } + } +| ALTER VSCHEMA ON table_name DROP VINDEX sql_id + { + $$ = &DDL{ + Action: DropColVindexStr, + Table: $4, + VindexSpec: &VindexSpec{ + Name: $7, + }, + } + } +| ALTER VSCHEMA ADD SEQUENCE table_name + { + $$ = &DDL{Action: AddSequenceStr, Table: $5} + } +| ALTER VSCHEMA ON table_name ADD AUTO_INCREMENT sql_id USING table_name + { + $$ = &DDL{ + Action: AddAutoIncStr, + Table: $4, + AutoIncSpec: &AutoIncSpec{ + Column: $7, + Sequence: $9, + }, + } + } + +alter_object_type: + CHECK +| COLUMN +| CONSTRAINT +| FOREIGN +| FULLTEXT +| ID +| AT_ID +| AT_AT_ID +| INDEX +| KEY +| PRIMARY +| SPATIAL +| PARTITION +| UNIQUE + +partition_operation: + REORGANIZE PARTITION sql_id INTO openb partition_definitions closeb + { + $$ = &PartitionSpec{Action: ReorganizeStr, Name: $3, Definitions: $6} + } + +partition_definitions: + partition_definition + { + $$ = []*PartitionDefinition{$1} + } +| partition_definitions ',' partition_definition + { + $$ = append($1, $3) + } + +partition_definition: + PARTITION sql_id VALUES LESS THAN openb value_expression closeb + { + $$ = &PartitionDefinition{Name: $2, Limit: $7} + } +| PARTITION sql_id VALUES LESS THAN openb MAXVALUE closeb + { + $$ = &PartitionDefinition{Name: $2, Maxvalue: true} + } + +rename_statement: + RENAME TABLE rename_list + { + $$ = $3 + } + +rename_list: + table_name TO table_name + { + $$ = &DDL{Action: RenameStr, FromTables: TableNames{$1}, ToTables: TableNames{$3}} + } +| rename_list ',' table_name TO table_name + { + $$ = $1 + $$.FromTables = append($$.FromTables, $3) + $$.ToTables = append($$.ToTables, $5) + } + +drop_statement: + DROP TABLE exists_opt table_name_list + { + var exists bool + if $3 != 0 { + exists = true + } + $$ = &DDL{Action: DropStr, FromTables: $4, IfExists: exists, Modifier: "table"} + } +| DROP INDEX id_or_var ON table_name ddl_skip_to_end + { + // Change this to an alter statement + $$ = &DDL{Action: AlterStr, Table: $5} + } +| DROP VIEW exists_opt table_name ddl_skip_to_end + { + var exists bool + if $3 != 0 { + exists = true + } + $$ = &DDL{Action: DropStr, FromTables: TableNames{$4.ToViewName()}, IfExists: exists} + } +| DROP view_modifier VIEW exists_opt table_name ddl_skip_to_end + { + var exists bool + if $4 != 0 { + exists = true + } + $$ = &DDL{Action: DropStr, FromTables: TableNames{$5.ToViewName()}, IfExists: exists, Modifier: $2} + } +| DROP DATABASE exists_opt id_or_var + { + $$ = &DBDDL{Action: DropStr, DBName: string($4.String())} + } +| DROP SCHEMA exists_opt id_or_var + { + $$ = &DBDDL{Action: DropStr, DBName: string($4.String())} + } + +truncate_statement: + TRUNCATE TABLE table_name + { + $$ = &DDL{Action: TruncateStr, Table: $3} + } +| TRUNCATE table_name + { + $$ = &DDL{Action: TruncateStr, Table: $2} + } +analyze_statement: + ANALYZE TABLE table_name + { + $$ = &OtherRead{} + } + +show_statement: + SHOW BINARY id_or_var ddl_skip_to_end /* SHOW BINARY LOGS */ + { + $$ = &Show{Type: string($2) + " " + string($3.String())} + } +/* SHOW CHARACTER SET and SHOW CHARSET are equivalent */ +| SHOW CHARACTER SET like_or_where_opt + { + showTablesOpt := &ShowTablesOpt{Filter: $4} + $$ = &Show{Type: CharsetStr, ShowTablesOpt: showTablesOpt} + } +| SHOW CHARSET like_or_where_opt + { + showTablesOpt := &ShowTablesOpt{Filter: $3} + $$ = &Show{Type: string($2), ShowTablesOpt: showTablesOpt} + } +| SHOW CREATE DATABASE ddl_skip_to_end + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +/* Rule to handle SHOW CREATE EVENT, SHOW CREATE FUNCTION, etc. */ +| SHOW CREATE id_or_var ddl_skip_to_end + { + $$ = &Show{Type: string($2) + " " + string($3.String())} + } +| SHOW CREATE PROCEDURE ddl_skip_to_end + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +| SHOW CREATE TABLE table_name + { + $$ = &Show{Type: string($2) + " " + string($3), Table: $4} + } +| SHOW CREATE TRIGGER ddl_skip_to_end + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +| SHOW CREATE VIEW ddl_skip_to_end + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +| SHOW DATABASES ddl_skip_to_end + { + $$ = &Show{Type: string($2)} + } +| SHOW ENGINES + { + $$ = &Show{Type: string($2)} + } +| SHOW extended_opt index_symbols from_or_in table_name from_database_opt like_or_where_opt + { + showTablesOpt := &ShowTablesOpt{DbName:$6, Filter:$7} + $$ = &Show{Extended: string($2), Type: string($3), ShowTablesOpt: showTablesOpt, OnTable: $5} + } +| SHOW PLUGINS + { + $$ = &Show{Type: string($2)} + } +| SHOW PROCEDURE ddl_skip_to_end + { + $$ = &Show{Type: string($2)} + } +| SHOW show_session_or_global STATUS ddl_skip_to_end + { + $$ = &Show{Scope: $2, Type: string($3)} + } +| SHOW TABLE ddl_skip_to_end + { + $$ = &Show{Type: string($2)} + } +| SHOW full_opt columns_or_fields FROM table_name from_database_opt like_or_where_opt + { + showTablesOpt := &ShowTablesOpt{Full:$2, DbName:$6, Filter:$7} + $$ = &Show{Type: string($3), ShowTablesOpt: showTablesOpt, OnTable: $5} + } +| SHOW full_opt tables_or_processlist from_database_opt like_or_where_opt + { + // this is ugly, but I couldn't find a better way for now + if $3 == "processlist" { + $$ = &Show{Type: $3} + } else { + showTablesOpt := &ShowTablesOpt{Full:$2, DbName:$4, Filter:$5} + $$ = &Show{Type: $3, ShowTablesOpt: showTablesOpt} + } + } +| SHOW show_session_or_global VARIABLES ddl_skip_to_end + { + $$ = &Show{Scope: $2, Type: string($3)} + } +| SHOW COLLATION + { + $$ = &Show{Type: string($2)} + } +| SHOW COLLATION WHERE expression + { + $$ = &Show{Type: string($2), ShowCollationFilterOpt: $4} + } +| SHOW VITESS_METADATA VARIABLES like_opt + { + showTablesOpt := &ShowTablesOpt{Filter: $4} + $$ = &Show{Scope: string($2), Type: string($3), ShowTablesOpt: showTablesOpt} + } +| SHOW VSCHEMA TABLES + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +| SHOW VSCHEMA VINDEXES + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +| SHOW VSCHEMA VINDEXES ON table_name + { + $$ = &Show{Type: string($2) + " " + string($3), OnTable: $5} + } +| SHOW WARNINGS + { + $$ = &Show{Type: string($2)} + } +| SHOW extended_opt id_or_var from_or_in table_name like_or_where_opt + { + showTablesOpt := &ShowTablesOpt{Filter: $6} + $$ = &Show{Extended: string($2), Type: string($3.String()), OnTable: $5, ShowTablesOpt: showTablesOpt} + } +/* + * Catch-all for show statements without vitess keywords: + * + * SHOW BINARY LOGS + * SHOW INVALID + * SHOW VITESS_KEYSPACES + * SHOW VITESS_TABLETS + * SHOW VITESS_SHARDS + * SHOW VITESS_TARGET + * + * + */ +| SHOW extended_opt id_or_var like_or_where_opt + { + showTablesOpt := &ShowTablesOpt{Filter: $4} + $$ = &Show{Extended: string($2), Type: string($3.String()), ShowTablesOpt: showTablesOpt} + } +| SHOW AUTH name_opt + { + $$ = &Show{Type: string($2), Scope: $3} + } +| SHOW extended_opt INSERT comment_opt into_table_name opt_column_list like_or_where_opt + + { + showTablesOpt := &ShowTablesOpt{Filter: $7} + $$ = &Show{Extended: string($2), Comments: Comments($4), Type: string($3), OnTable: $5, ShowTablesOpt: showTablesOpt, Columns: $6} + } +| SHOW TRANSACTION ISOLATION LEVEL + + { + $$ = &Show{Type: "TRANSACTION_ISOLATION_LEVEL"} + } + +tables_or_processlist: + TABLES + { + $$ = string($1) + } +| PROCESSLIST + { + $$ = string($1) + } + +extended_opt: + /* empty */ + { + $$ = "" + } + | EXTENDED + { + $$ = "extended " + } + +full_opt: + /* empty */ + { + $$ = "" + } +| FULL + { + $$ = "full " + } + +columns_or_fields: + COLUMNS + { + $$ = string($1) + } +| FIELDS + { + $$ = string($1) + } + +from_database_opt: + /* empty */ + { + $$ = "" + } +| FROM table_id + { + $$ = $2.v + } +| IN table_id + { + $$ = $2.v + } + +like_or_where_opt: + /* empty */ + { + $$ = nil + } +| LIKE STRING + { + $$ = &ShowFilter{Like:string($2)} + } +| WHERE expression + { + $$ = &ShowFilter{Filter:$2} + } + +like_opt: + /* empty */ + { + $$ = nil + } + | LIKE STRING + { + $$ = &ShowFilter{Like:string($2)} + } + +show_session_or_global: + /* empty */ + { + $$ = "" + } +| SESSION + { + $$ = SessionStr + } +| GLOBAL + { + $$ = GlobalStr + } + +use_statement: + USE table_id + { + $$ = &Use{DBName: $2} + } +| USE + { + $$ = &Use{DBName:TableIdent{v:""}} + } + +begin_statement: + BEGIN + { + $$ = &Begin{} + } +| START TRANSACTION + { + $$ = &Begin{} + } + +commit_statement: + COMMIT + { + $$ = &Commit{} + } + +rollback_statement: + ROLLBACK + { + $$ = &Rollback{} + } +| ROLLBACK work_opt TO savepoint_opt sql_id + { + $$ = &SRollback{Name: $5} + } + +work_opt: + { $$ = struct{}{} } +| WORK + { $$ = struct{}{} } + +savepoint_opt: + { $$ = struct{}{} } +| SAVEPOINT + { $$ = struct{}{} } + + +savepoint_statement: + SAVEPOINT sql_id + { + $$ = &Savepoint{Name: $2} + } + +release_statement: + RELEASE SAVEPOINT sql_id + { + $$ = &Release{Name: $3} + } + +explain_format_opt: + { + $$ = "" + } +| FORMAT '=' JSON + { + $$ = JSONStr + } +| FORMAT '=' TREE + { + $$ = TreeStr + } +| FORMAT '=' VITESS + { + $$ = VitessStr + } +| FORMAT '=' TRADITIONAL + { + $$ = TraditionalStr + } +| ANALYZE + { + $$ = AnalyzeStr + } + +explain_synonyms: + EXPLAIN + { + $$ = $1 + } +| DESCRIBE + { + $$ = $1 + } +| DESC + { + $$ = $1 + } + +explainable_statement: + select_statement + { + $$ = $1 + } +| update_statement + { + $$ = $1 + } +| insert_statement + { + $$ = $1 + } +| delete_statement + { + $$ = $1 + } + +wild_opt: + { + $$ = "" + } +| sql_id + { + $$ = "" + } +| STRING + { + $$ = "" + } + +explain_statement: + explain_synonyms full_opt extended_opt table_name wild_opt + { + $$ = &DescribeTable{Full: string($2), Extended: string($3), Table: $4} + } +| explain_synonyms explain_format_opt explainable_statement + { + $$ = &Explain{Type: $2, Statement: $3} + } + +other_statement: + REPAIR skip_to_end + { + $$ = &OtherAdmin{} + } +| OPTIMIZE skip_to_end + { + $$ = &OtherAdmin{} + } +| LOCK TABLES skip_to_end + { + $$ = &OtherAdmin{} + } +| UNLOCK TABLES skip_to_end + { + $$ = &OtherAdmin{} + } + +flush_statement: + FLUSH skip_to_end + { + $$ = &DDL{Action: FlushStr} + } +comment_opt: + { + setAllowComments(yylex, true) + } + comment_list + { + $$ = $2 + setAllowComments(yylex, false) + } + +comment_list: + { + $$ = nil + } +| comment_list COMMENT + { + $$ = append($1, $2) + } + +union_op: + UNION + { + $$ = UnionStr + } +| UNION ALL + { + $$ = UnionAllStr + } +| UNION DISTINCT + { + $$ = UnionDistinctStr + } + +cache_opt: +{ + $$ = "" +} +| SQL_NO_CACHE +{ + $$ = SQLNoCacheStr +} +| SQL_CACHE +{ + $$ = SQLCacheStr +} + +distinct_opt: + { + $$ = "" + } +| DISTINCT + { + $$ = DistinctStr + } +| DISTINCTROW + { + $$ = DistinctStr + } + +select_expression_list_opt: + { + $$ = nil + } +| select_expression_list + { + $$ = $1 + } + +select_options: + { + $$ = nil + } +| select_option + { + $$ = []string{$1} + } +| select_option select_option // TODO: figure out a way to do this recursively instead. + { // TODO: This is a hack since I couldn't get it to work in a nicer way. I got 'conflicts: 8 shift/reduce' + $$ = []string{$1, $2} + } +| select_option select_option select_option + { + $$ = []string{$1, $2, $3} + } +| select_option select_option select_option select_option + { + $$ = []string{$1, $2, $3, $4} + } + +select_option: + SQL_NO_CACHE + { + $$ = SQLNoCacheStr + } +| SQL_CACHE + { + $$ = SQLCacheStr + } +| DISTINCT + { + $$ = DistinctStr + } +| DISTINCTROW + { + $$ = DistinctStr + } +| STRAIGHT_JOIN + { + $$ = StraightJoinHint + } +| SQL_CALC_FOUND_ROWS + { + $$ = SQLCalcFoundRowsStr + } + +select_expression_list: + select_expression + { + $$ = SelectExprs{$1} + } +| select_expression_list ',' select_expression + { + $$ = append($$, $3) + } + +select_expression: + '*' + { + $$ = &StarExpr{} + } +| expression list_arg_concatamer + { + concatamer := ListArgConcatamer($2) + $$ = &AliasedExpr{Expr: $1, As: NewColIdent(concatamer.String()) } + } +| expression as_ci_opt + { + $$ = &AliasedExpr{Expr: $1, As: $2} + } +| table_id '.' '*' + { + $$ = &StarExpr{TableName: TableName{Name: $1}} + } +| table_id '.' reserved_table_id '.' '*' + { + $$ = &StarExpr{TableName: TableName{Qualifier: $1, Name: $3}} + } + +as_ci_opt: + { + $$ = ColIdent{} + } +| col_alias + { + $$ = $1 + } +| AS col_alias + { + $$ = $2 + } + +col_alias: + sql_id +| STRING + { + $$ = NewColIdent(string($1)) + } + +from_opt: + { + $$ = TableExprs{&AliasedTableExpr{Expr:TableName{Name: NewTableIdent("dual")}}} + } +| FROM table_references + { + $$ = $2 + } + +table_references: + table_reference + { + $$ = TableExprs{$1} + } +| table_references ',' table_reference + { + $$ = append($$, $3) + } + +table_reference: + table_factor +| join_table +| table_valued_func + +table_valued_func: + function_call_table_valued as_opt_id + { + $$ = &TableValuedFuncTableExpr{FuncExpr:$1, As: $2} + } + +table_factor: + aliased_table_name + { + $$ = $1 + } +| derived_table as_opt table_id + { + $$ = &AliasedTableExpr{Expr:$1, As: $3} + } +| openb table_references closeb + { + $$ = &ParenTableExpr{Exprs: $2} + } +| openb exec_stmt closeb + { + exec := $2.(*Exec) + $$ = &ExecSubquery{Exec: exec } + } + +derived_table: + openb select_statement closeb + { + $$ = &Subquery{$2} + } + +aliased_table_name: +table_name as_opt_id index_hint_list + { + $$ = &AliasedTableExpr{Expr:$1, As: $2, Hints: $3} + } +| table_name PARTITION openb partition_list closeb as_opt_id index_hint_list + { + $$ = &AliasedTableExpr{Expr:$1, Partitions: $4, As: $6, Hints: $7} + } + +opt_column_list: + { + $$ = nil + } +| '(' column_list ')' + { + $$ = $2 + } + + +column_list: + sql_id + { + $$ = Columns{$1} + } +| column_list ',' sql_id + { + $$ = append($$, $3) + } + +partition_list: + sql_id + { + $$ = Partitions{$1} + } +| partition_list ',' sql_id + { + $$ = append($$, $3) + } + +// There is a grammar conflict here: +// 1: INSERT INTO a SELECT * FROM b JOIN c ON b.i = c.i +// 2: INSERT INTO a SELECT * FROM b JOIN c ON DUPLICATE KEY UPDATE a.i = 1 +// When yacc encounters the ON clause, it cannot determine which way to +// resolve. The %prec override below makes the parser choose the +// first construct, which automatically makes the second construct a +// syntax error. This is the same behavior as MySQL. +join_table: + table_reference inner_join table_factor join_condition_opt + { + $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3, Condition: $4} + } +| table_reference straight_join table_factor on_expression_opt + { + $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3, Condition: $4} + } +| table_reference outer_join table_reference join_condition + { + $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3, Condition: $4} + } +| table_reference natural_join table_factor + { + $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3} + } + +join_condition: + ON expression + { $$ = JoinCondition{On: $2} } +| USING '(' column_list ')' + { $$ = JoinCondition{Using: $3} } + +join_condition_opt: +%prec JOIN + { $$ = JoinCondition{} } +| join_condition + { $$ = $1 } + +on_expression_opt: +%prec JOIN + { $$ = JoinCondition{} } +| ON expression + { $$ = JoinCondition{On: $2} } + +as_opt: + { $$ = struct{}{} } +| AS + { $$ = struct{}{} } + +as_opt_id: + { + $$ = NewTableIdent("") + } +| table_alias + { + $$ = $1 + } +| AS table_alias + { + $$ = $2 + } + +table_alias: + table_id +| STRING + { + $$ = NewTableIdent(string($1)) + } + +inner_join: + JOIN + { + $$ = JoinStr + } +| INNER JOIN + { + $$ = JoinStr + } +| CROSS JOIN + { + $$ = JoinStr + } + +straight_join: + STRAIGHT_JOIN + { + $$ = StraightJoinStr + } + +outer_join: + LEFT JOIN + { + $$ = LeftJoinStr + } +| LEFT OUTER JOIN + { + $$ = LeftOuterJoinStr + } +| RIGHT JOIN + { + $$ = RightJoinStr + } +| RIGHT OUTER JOIN + { + $$ = RightOuterJoinStr + } + +natural_join: + NATURAL JOIN + { + $$ = NaturalJoinStr + } +| NATURAL outer_join + { + if $2 == LeftJoinStr { + $$ = NaturalLeftJoinStr + } else { + $$ = NaturalRightJoinStr + } + } + +into_table_name: + INTO table_name + { + $$ = $2 + } +| table_name + { + $$ = $1 + } + +table_name: + table_id + { + $$ = TableName{Name: $1} + } +| table_id '.' reserved_table_id + { + $$ = TableName{Qualifier: $1, Name: $3} + } +| table_id '.' reserved_table_id '.' reserved_table_id + { + $$ = TableName{QualifierSecond: $1, Qualifier: $3, Name: $5} + } +| table_id '.' reserved_table_id '.' reserved_table_id '.' reserved_table_id + { + $$ = TableName{QualifierThird: $1, QualifierSecond: $3, Qualifier: $5, Name: $7} + } + +delete_table_name: +table_id '.' '*' + { + $$ = TableName{Name: $1} + } + +index_hint_list: + { + $$ = nil + } +| USE INDEX openb column_list closeb + { + $$ = &IndexHints{Type: UseStr, Indexes: $4} + } +| USE INDEX openb closeb + { + $$ = &IndexHints{Type: UseStr} + } +| IGNORE INDEX openb column_list closeb + { + $$ = &IndexHints{Type: IgnoreStr, Indexes: $4} + } +| FORCE INDEX openb column_list closeb + { + $$ = &IndexHints{Type: ForceStr, Indexes: $4} + } + +where_expression_opt: + { + $$ = nil + } +| WHERE expression + { + $$ = $2 + } + +expression: + condition + { + $$ = $1 + } +| expression AND expression + { + $$ = &AndExpr{Left: $1, Right: $3} + } +| expression OR expression + { + $$ = &OrExpr{Left: $1, Right: $3} + } +| expression XOR expression + { + $$ = &XorExpr{Left: $1, Right: $3} + } +| NOT expression + { + $$ = &NotExpr{Expr: $2} + } +| expression IS is_suffix + { + $$ = &IsExpr{Operator: $3, Expr: $1} + } +| expression list_arg_concatamer + { + $$ = &UnaryCastConcatamerExpr{ Expr: $1, CastConcatamer: ListArgConcatamer($2) } + } +| value_expression + { + $$ = $1 + } +| DEFAULT default_opt + { + $$ = &Default{ColName: $2} + } + +default_opt: + /* empty */ + { + $$ = "" + } +| openb id_or_var closeb + { + $$ = string($2.String()) + } + +boolean_value: + TRUE + { + $$ = BoolVal(true) + } +| FALSE + { + $$ = BoolVal(false) + } + +condition: + value_expression compare value_expression + { + $$ = &ComparisonExpr{Left: $1, Operator: $2, Right: $3} + } +| value_expression IN col_tuple + { + $$ = &ComparisonExpr{Left: $1, Operator: InStr, Right: $3} + } +| value_expression NOT IN col_tuple + { + $$ = &ComparisonExpr{Left: $1, Operator: NotInStr, Right: $4} + } +| value_expression LIKE value_expression like_escape_opt + { + $$ = &ComparisonExpr{Left: $1, Operator: LikeStr, Right: $3, Escape: $4} + } +| value_expression NOT LIKE value_expression like_escape_opt + { + $$ = &ComparisonExpr{Left: $1, Operator: NotLikeStr, Right: $4, Escape: $5} + } +| value_expression REGEXP value_expression + { + $$ = &ComparisonExpr{Left: $1, Operator: RegexpStr, Right: $3} + } +| value_expression NOT REGEXP value_expression + { + $$ = &ComparisonExpr{Left: $1, Operator: NotRegexpStr, Right: $4} + } +| value_expression BETWEEN value_expression AND value_expression + { + $$ = &RangeCond{Left: $1, Operator: BetweenStr, From: $3, To: $5} + } +| value_expression NOT BETWEEN value_expression AND value_expression + { + $$ = &RangeCond{Left: $1, Operator: NotBetweenStr, From: $4, To: $6} + } +| EXISTS subquery + { + $$ = &ExistsExpr{Subquery: $2} + } + +is_suffix: + NULL + { + $$ = IsNullStr + } +| NOT NULL + { + $$ = IsNotNullStr + } +| TRUE + { + $$ = IsTrueStr + } +| NOT TRUE + { + $$ = IsNotTrueStr + } +| FALSE + { + $$ = IsFalseStr + } +| NOT FALSE + { + $$ = IsNotFalseStr + } + +compare: + '=' + { + $$ = EqualStr + } +| '<' + { + $$ = LessThanStr + } +| '>' + { + $$ = GreaterThanStr + } +| LE + { + $$ = LessEqualStr + } +| GE + { + $$ = GreaterEqualStr + } +| NE + { + $$ = NotEqualStr + } +| NULL_SAFE_EQUAL + { + $$ = NullSafeEqualStr + } + +like_escape_opt: + { + $$ = nil + } +| ESCAPE value_expression + { + $$ = $2 + } + +col_tuple: + row_tuple + { + $$ = $1 + } +| subquery + { + $$ = $1 + } +| LIST_ARG + { + $$ = ListArg($1) + } + +list_arg_concatamer: +LIST_ARG { + $$ = []ListArg{ListArg($1)} + } +| list_arg_concatamer LIST_ARG + { + $$ = append($1, ListArg($2)) + } + +subquery: + openb simple_select closeb + { + $$ = &Subquery{$2} + } + +expression_list: + expression + { + $$ = Exprs{$1} + } +| expression_list ',' expression + { + $$ = append($1, $3) + } + +value_expression: + value + { + $$ = $1 + } +| boolean_value + { + $$ = $1 + } +| column_name + { + $$ = $1 + } +| tuple_expression + { + $$ = $1 + } +| subquery + { + $$ = $1 + } +| value_expression '&' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: BitAndStr, Right: $3} + } +| value_expression '|' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: BitOrStr, Right: $3} + } +| value_expression '^' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: BitXorStr, Right: $3} + } +| value_expression '+' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: PlusStr, Right: $3} + } +| value_expression '-' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: MinusStr, Right: $3} + } +| value_expression '*' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: MultStr, Right: $3} + } +| value_expression '/' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: DivStr, Right: $3} + } +| value_expression DIV value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: IntDivStr, Right: $3} + } +| value_expression '%' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: ModStr, Right: $3} + } +| value_expression MOD value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: ModStr, Right: $3} + } +| value_expression SHIFT_LEFT value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: ShiftLeftStr, Right: $3} + } +| value_expression SHIFT_RIGHT value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: ShiftRightStr, Right: $3} + } +| column_name JSON_EXTRACT_OP value + { + $$ = &BinaryExpr{Left: $1, Operator: JSONExtractOp, Right: $3} + } +| column_name JSON_UNQUOTE_EXTRACT_OP value + { + $$ = &BinaryExpr{Left: $1, Operator: JSONUnquoteExtractOp, Right: $3} + } +| value_expression COLLATE charset + { + $$ = &CollateExpr{Expr: $1, Charset: $3} + } +| BINARY value_expression %prec UNARY + { + $$ = &UnaryExpr{Operator: BinaryStr, Expr: $2} + } +| UNDERSCORE_BINARY value_expression %prec UNARY + { + $$ = &UnaryExpr{Operator: UBinaryStr, Expr: $2} + } +| UNDERSCORE_UTF8 value_expression %prec UNARY + { + $$ = &UnaryExpr{Operator: Utf8Str, Expr: $2} + } +| UNDERSCORE_UTF8MB4 value_expression %prec UNARY + { + $$ = &UnaryExpr{Operator: Utf8mb4Str, Expr: $2} + } +| UNDERSCORE_LATIN1 value_expression %prec UNARY + { + $$ = &UnaryExpr{Operator: Latin1Str, Expr: $2} + } +| '+' value_expression %prec UNARY + { + if num, ok := $2.(*SQLVal); ok && num.Type == IntVal { + $$ = num + } else { + $$ = &UnaryExpr{Operator: UPlusStr, Expr: $2} + } + } +| '-' value_expression %prec UNARY + { + if num, ok := $2.(*SQLVal); ok && num.Type == IntVal { + // Handle double negative + if num.Val[0] == '-' { + num.Val = num.Val[1:] + $$ = num + } else { + $$ = NewIntVal(append([]byte("-"), num.Val...)) + } + } else { + $$ = &UnaryExpr{Operator: UMinusStr, Expr: $2} + } + } +| '~' value_expression + { + $$ = &UnaryExpr{Operator: TildaStr, Expr: $2} + } +| '!' value_expression %prec UNARY + { + $$ = &UnaryExpr{Operator: BangStr, Expr: $2} + } +| INTERVAL value_expression sql_id + { + // This rule prevents the usage of INTERVAL + // as a function. If support is needed for that, + // we'll need to revisit this. The solution + // will be non-trivial because of grammar conflicts. + $$ = &IntervalExpr{Expr: $2, Unit: $3.String()} + } +| INTERVAL STRING + { + inputStr := string($2) + stringList := strings.Split(inputStr, " ") + if len(stringList) != 2 { + yylex.Error("the interval string '" + inputStr + "' is not valid") + return 1 + } + stringExpr := NewStrVal([]byte(stringList[0])) + $$ = &IntervalExpr{Expr: stringExpr, Unit: stringList[1]} + } +| function_call_generic +| function_call_keyword +| function_call_nonkeyword +| function_call_conflict + +function_call_table_valued: + cardinality_expansion_function_name openb select_expression_list_opt closeb + { + $$ = &FuncExpr{Name: NewColIdent($1), Exprs: $3} + } + +cardinality_expansion_function_name: + JSON_ARRAY_ELEMENTS_TEXT + { + $$ = JsonArrayElementsTextStr + } +| JSON_EACH + { + $$ = JsonEachStr + } + +/* + Regular function calls without special token or syntax, guaranteed to not + introduce side effects due to being a simple identifier +*/ +function_call_generic: + sql_id openb select_expression_list_opt closeb over_clause_opt + { + $$ = &FuncExpr{Name: $1, Exprs: $3, Over: $5} + } +| sql_id openb DISTINCT select_expression_list closeb over_clause_opt + { + $$ = &FuncExpr{Name: $1, Distinct: true, Exprs: $4, Over: $6} + } +| sql_id openb DISTINCTROW select_expression_list closeb over_clause_opt + { + $$ = &FuncExpr{Name: $1, Distinct: true, Exprs: $4, Over: $6} + } +| table_id '.' reserved_sql_id openb select_expression_list_opt closeb + { + $$ = &FuncExpr{Qualifier: $1, Name: $3, Exprs: $5} + } + +/* + Function calls using reserved keywords, with dedicated grammar rules + as a result +*/ +function_call_keyword: + LEFT openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("left"), Exprs: $3} + } +| RIGHT openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("right"), Exprs: $3} + } +| CONVERT openb expression ',' convert_type closeb + { + $$ = &ConvertExpr{Expr: $3, Type: $5} + } +| CAST openb expression AS convert_type closeb + { + $$ = &ConvertExpr{Expr: $3, Type: $5} + } +| CONVERT openb expression USING charset closeb + { + $$ = &ConvertUsingExpr{Expr: $3, Type: $5} + } +| SUBSTR openb column_name FROM value_expression FOR value_expression closeb + { + $$ = &SubstrExpr{Name: $3, From: $5, To: $7} + } +| SUBSTRING openb column_name FROM value_expression FOR value_expression closeb + { + $$ = &SubstrExpr{Name: $3, From: $5, To: $7} + } +| SUBSTR openb STRING FROM value_expression FOR value_expression closeb + { + $$ = &SubstrExpr{StrVal: NewStrVal($3), From: $5, To: $7} + } +| SUBSTRING openb STRING FROM value_expression FOR value_expression closeb + { + $$ = &SubstrExpr{StrVal: NewStrVal($3), From: $5, To: $7} + } +| MATCH openb select_expression_list closeb AGAINST openb value_expression match_option closeb + { + $$ = &MatchExpr{Columns: $3, Expr: $7, Option: $8} + } +| GROUP_CONCAT openb distinct_opt select_expression_list order_by_opt separator_opt limit_opt closeb + { + $$ = &GroupConcatExpr{Distinct: $3, Exprs: $4, OrderBy: $5, Separator: $6, Limit: $7} + } +| CASE expression_opt when_expression_list else_expression_opt END + { + $$ = &CaseExpr{Expr: $2, Whens: $3, Else: $4} + } +| VALUES openb column_name closeb + { + $$ = &ValuesFuncExpr{Name: $3} + } + +/* + Function calls using non reserved keywords but with special syntax forms. + Dedicated grammar rules are needed because of the special syntax +*/ +function_call_nonkeyword: + CURRENT_TIMESTAMP func_datetime_opt + { + $$ = &FuncExpr{Name:NewColIdent("current_timestamp")} + } +| UTC_TIMESTAMP func_datetime_opt + { + $$ = &FuncExpr{Name:NewColIdent("utc_timestamp")} + } +| UTC_TIME func_datetime_opt + { + $$ = &FuncExpr{Name:NewColIdent("utc_time")} + } +/* doesn't support fsp */ +| UTC_DATE func_datetime_opt + { + $$ = &FuncExpr{Name:NewColIdent("utc_date")} + } + // now +| LOCALTIME func_datetime_opt + { + $$ = &FuncExpr{Name:NewColIdent("localtime")} + } + // now +| LOCALTIMESTAMP func_datetime_opt + { + $$ = &FuncExpr{Name:NewColIdent("localtimestamp")} + } + // curdate +/* doesn't support fsp */ +| CURRENT_DATE func_datetime_opt + { + $$ = &FuncExpr{Name:NewColIdent("current_date")} + } + // curtime +| CURRENT_TIME func_datetime_opt + { + $$ = &FuncExpr{Name:NewColIdent("current_time")} + } +// these functions can also be called with an optional argument +| CURRENT_TIMESTAMP func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewColIdent("current_timestamp"), Fsp:$2} + } +| UTC_TIMESTAMP func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewColIdent("utc_timestamp"), Fsp:$2} + } +| UTC_TIME func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewColIdent("utc_time"), Fsp:$2} + } + // now +| LOCALTIME func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewColIdent("localtime"), Fsp:$2} + } + // now +| LOCALTIMESTAMP func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewColIdent("localtimestamp"), Fsp:$2} + } + // curtime +| CURRENT_TIME func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewColIdent("current_time"), Fsp:$2} + } +| TIMESTAMPADD openb sql_id ',' value_expression ',' value_expression closeb + { + $$ = &TimestampFuncExpr{Name:string("timestampadd"), Unit:$3.String(), Expr1:$5, Expr2:$7} + } +| TIMESTAMPDIFF openb sql_id ',' value_expression ',' value_expression closeb + { + $$ = &TimestampFuncExpr{Name:string("timestampdiff"), Unit:$3.String(), Expr1:$5, Expr2:$7} + } + +func_datetime_opt: + /* empty */ +| openb closeb + +func_datetime_precision: + openb value_expression closeb + { + $$ = $2 + } + +/* + Function calls using non reserved keywords with *normal* syntax forms. Because + the names are non-reserved, they need a dedicated rule so as not to conflict +*/ +function_call_conflict: + IF openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("if"), Exprs: $3} + } +| DATABASE openb select_expression_list_opt closeb + { + $$ = &FuncExpr{Name: NewColIdent("database"), Exprs: $3} + } +| SCHEMA openb select_expression_list_opt closeb + { + $$ = &FuncExpr{Name: NewColIdent("schema"), Exprs: $3} + } +| MOD openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("mod"), Exprs: $3} + } +| REPLACE openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("replace"), Exprs: $3} + } +| SUBSTR openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("substr"), Exprs: $3} + } +| SUBSTRING openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("substr"), Exprs: $3} + } + +match_option: +/*empty*/ + { + $$ = "" + } +| IN BOOLEAN MODE + { + $$ = BooleanModeStr + } +| IN NATURAL LANGUAGE MODE + { + $$ = NaturalLanguageModeStr + } +| IN NATURAL LANGUAGE MODE WITH QUERY EXPANSION + { + $$ = NaturalLanguageModeWithQueryExpansionStr + } +| WITH QUERY EXPANSION + { + $$ = QueryExpansionStr + } + +/* + Window function OVER clause and window specifications +*/ +over_clause_opt: + { + $$ = nil + } +| OVER openb window_spec closeb + { + $$ = &OverClause{WindowSpec: $3} + } +| OVER openb closeb + { + $$ = &OverClause{WindowSpec: &WindowSpec{}} + } +| OVER sql_id + { + $$ = &OverClause{WindowName: $2} + } + +window_spec: + partition_by_opt order_by_opt frame_clause_opt + { + $$ = &WindowSpec{PartitionBy: $1, OrderBy: $2, Frame: $3} + } + +partition_by_opt: + { + $$ = nil + } +| PARTITION BY expression_list + { + $$ = $3 + } + +frame_clause_opt: + { + $$ = nil + } +| ROWS frame_point + { + $$ = &FrameClause{Unit: RowsStr, Start: $2} + } +| ROWS BETWEEN frame_point AND frame_point + { + $$ = &FrameClause{Unit: RowsStr, Start: $3, End: $5} + } +| RANGE frame_point + { + $$ = &FrameClause{Unit: RangeStr, Start: $2} + } +| RANGE BETWEEN frame_point AND frame_point + { + $$ = &FrameClause{Unit: RangeStr, Start: $3, End: $5} + } + +frame_point: + UNBOUNDED PRECEDING + { + $$ = &FramePoint{Type: UnboundedPrecedingStr} + } +| UNBOUNDED FOLLOWING + { + $$ = &FramePoint{Type: UnboundedFollowingStr} + } +| CURRENT ROW + { + $$ = &FramePoint{Type: CurrentRowStr} + } +| value_expression PRECEDING + { + $$ = &FramePoint{Type: PrecedingStr, Expr: $1} + } +| value_expression FOLLOWING + { + $$ = &FramePoint{Type: FollowingStr, Expr: $1} + } + +// CTE (Common Table Expression) rules +with_clause_opt: + { + $$ = nil + } +| WITH cte_list + { + $$ = &With{Recursive: false, CTEs: $2} + } +| WITH RECURSIVE cte_list + { + $$ = &With{Recursive: true, CTEs: $3} + } + +cte_list: + cte + { + $$ = []*CommonTableExpr{$1} + } +| cte_list ',' cte + { + $$ = append($1, $3) + } + +cte: + table_id AS openb select_statement closeb + { + $$ = &CommonTableExpr{Name: $1, Subquery: &Subquery{Select: $4}} + } +| table_id openb column_list closeb AS openb select_statement closeb + { + $$ = &CommonTableExpr{Name: $1, Columns: $3, Subquery: &Subquery{Select: $7}} + } + +charset: + id_or_var +{ + $$ = string($1.String()) +} +| STRING +{ + $$ = string($1) +} + +convert_type: + BINARY length_opt + { + $$ = &ConvertType{Type: string($1), Length: $2} + } +| CHAR length_opt charset_opt + { + $$ = &ConvertType{Type: string($1), Length: $2, Charset: $3, Operator: CharacterSetStr} + } +| CHAR length_opt id_or_var + { + $$ = &ConvertType{Type: string($1), Length: $2, Charset: string($3.String())} + } +| DATE + { + $$ = &ConvertType{Type: string($1)} + } +| DATETIME length_opt + { + $$ = &ConvertType{Type: string($1), Length: $2} + } +| DECIMAL decimal_length_opt + { + $$ = &ConvertType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| REAL + { + $$ = &ConvertType{Type: string($1)} + } +| JSON + { + $$ = &ConvertType{Type: string($1)} + } +| NCHAR length_opt + { + $$ = &ConvertType{Type: string($1), Length: $2} + } +| SIGNED + { + $$ = &ConvertType{Type: string($1)} + } +| SIGNED INTEGER + { + $$ = &ConvertType{Type: string($1)} + } +| TIME length_opt + { + $$ = &ConvertType{Type: string($1), Length: $2} + } +| UNSIGNED + { + $$ = &ConvertType{Type: string($1)} + } +| UNSIGNED INTEGER + { + $$ = &ConvertType{Type: string($1)} + } + +expression_opt: + { + $$ = nil + } +| expression + { + $$ = $1 + } + +separator_opt: + { + $$ = string("") + } +| SEPARATOR STRING + { + $$ = " separator '"+string($2)+"'" + } + +when_expression_list: + when_expression + { + $$ = []*When{$1} + } +| when_expression_list when_expression + { + $$ = append($1, $2) + } + +when_expression: + WHEN expression THEN expression + { + $$ = &When{Cond: $2, Val: $4} + } + +else_expression_opt: + { + $$ = nil + } +| ELSE expression + { + $$ = $2 + } + +column_name: + sql_id + { + $$ = &ColName{Name: $1} + } +| table_id '.' reserved_sql_id + { + $$ = &ColName{Qualifier: TableName{Name: $1}, Name: $3} + } +| table_id '.' reserved_table_id '.' reserved_sql_id + { + $$ = &ColName{Qualifier: TableName{Qualifier: $1, Name: $3}, Name: $5} + } + +value: + STRING + { + $$ = NewStrVal($1) + } +| HEX + { + $$ = NewHexVal($1) + } +| BIT_LITERAL + { + $$ = NewBitVal($1) + } +| INTEGRAL + { + $$ = NewIntVal($1) + } +| FLOAT + { + $$ = NewFloatVal($1) + } +| HEXNUM + { + $$ = NewHexNum($1) + } +| VALUE_ARG + { + $$ = NewValArg($1) + } +| NULL + { + $$ = &NullVal{} + } + +num_val: + sql_id + { + // TODO(sougou): Deprecate this construct. + if $1.Lowered() != "value" { + yylex.Error("expecting value after next") + return 1 + } + $$ = NewIntVal([]byte("1")) + } +| INTEGRAL VALUES + { + $$ = NewIntVal($1) + } +| VALUE_ARG VALUES + { + $$ = NewValArg($1) + } + +group_by_opt: + { + $$ = nil + } +| GROUP BY expression_list + { + $$ = $3 + } + +having_opt: + { + $$ = nil + } +| HAVING expression + { + $$ = $2 + } + +order_by_opt: + { + $$ = nil + } +| ORDER BY order_list + { + $$ = $3 + } + +order_list: + order + { + $$ = OrderBy{$1} + } +| order_list ',' order + { + $$ = append($1, $3) + } + +order: + expression asc_desc_opt + { + $$ = &Order{Expr: $1, Direction: $2} + } + +asc_desc_opt: + { + $$ = AscScr + } +| ASC + { + $$ = AscScr + } +| DESC + { + $$ = DescScr + } + +limit_opt: + { + $$ = nil + } +| LIMIT expression + { + $$ = &Limit{Rowcount: $2} + } +| LIMIT expression ',' expression + { + $$ = &Limit{Offset: $2, Rowcount: $4} + } +| LIMIT expression OFFSET expression + { + $$ = &Limit{Offset: $4, Rowcount: $2} + } + +lock_opt: + { + $$ = "" + } +| FOR UPDATE + { + $$ = ForUpdateStr + } +| LOCK IN SHARE MODE + { + $$ = ShareModeStr + } + +// insert_data expands all combinations into a single rule. +// This avoids a shift/reduce conflict while encountering the +// following two possible constructs: +// insert into t1(a, b) (select * from t2) +// insert into t1(select * from t2) +// Because the rules are together, the parser can keep shifting +// the tokens until it disambiguates a as sql_id and select as keyword. +insert_data: + VALUES tuple_list + { + $$ = &Insert{Rows: $2} + } +| select_statement + { + $$ = &Insert{Rows: $1} + } +| openb ins_column_list closeb VALUES tuple_list + { + $$ = &Insert{Columns: $2, Rows: $5} + } +| openb ins_column_list closeb select_statement + { + $$ = &Insert{Columns: $2, Rows: $4} + } + +ins_column_list: + sql_id + { + $$ = Columns{$1} + } +| sql_id '.' sql_id + { + $$ = Columns{$3} + } +| ins_column_list ',' sql_id + { + $$ = append($$, $3) + } +| ins_column_list ',' sql_id '.' sql_id + { + $$ = append($$, $5) + } + +on_dup_opt: + { + $$ = nil + } +| on_dup + { + $$ = $1 + } + +on_dup: + ON DUPLICATE KEY UPDATE update_list + { + $$ = $5 + } + +returning_opt: + { + $$ = nil + } +| returning + { + $$ = $1 + } + +returning: + RETURNING select_expression_list + { + $$ = $2 + } + +tuple_list: + tuple_or_empty + { + $$ = Values{$1} + } +| tuple_list ',' tuple_or_empty + { + $$ = append($1, $3) + } + +tuple_or_empty: + row_tuple + { + $$ = $1 + } +| openb closeb + { + $$ = ValTuple{} + } + +row_tuple: + openb expression_list closeb + { + $$ = ValTuple($2) + } + +tuple_expression: + row_tuple + { + if len($1) == 1 { + $$ = $1[0] + } else { + $$ = $1 + } + } + +update_list: + update_expression + { + $$ = UpdateExprs{$1} + } +| update_list ',' update_expression + { + $$ = append($1, $3) + } + +update_expression: + column_name '=' expression + { + $$ = &UpdateExpr{Name: $1, Expr: $3} + } + +set_list: + set_expression + { + $$ = SetExprs{$1} + } +| set_session_or_global set_expression + { + $2.Scope = $1 + $$ = SetExprs{$2} + } +| set_list ',' set_expression + { + $$ = append($1, $3) + } + +set_expression: + reserved_sql_id '=' ON + { + $$ = &SetExpr{Name: $1, Expr: NewStrVal([]byte("on"))} + } +| reserved_sql_id '=' OFF + { + $$ = &SetExpr{Name: $1, Expr: NewStrVal([]byte("off"))} + } +| reserved_sql_id '=' expression + { + $$ = &SetExpr{Name: $1, Expr: $3} + } +| reserved_sql_id TO ON + { + $$ = &SetExpr{Name: $1, Expr: NewStrVal([]byte("on"))} + } +| reserved_sql_id TO OFF + { + $$ = &SetExpr{Name: $1, Expr: NewStrVal([]byte("off"))} + } +| reserved_sql_id TO expression + { + $$ = &SetExpr{Name: $1, Expr: $3} + } +| charset_or_character_set charset_value collate_opt + { + $$ = &SetExpr{Name: NewColIdent(string($1)), Expr: $2} + } + +charset_or_character_set: + CHARSET +| CHARACTER SET + { + $$ = []byte("charset") + } +| NAMES + +charset_value: + sql_id + { + $$ = NewStrVal([]byte($1.String())) + } +| STRING + { + $$ = NewStrVal($1) + } +| DEFAULT + { + $$ = &Default{} + } + +for_from: + FOR +| FROM + +exists_opt: + { $$ = 0 } +| IF EXISTS + { $$ = 1 } + +not_exists_opt: + { $$ = 0 } +| IF NOT EXISTS + { $$ = 1 } + +ignore_opt: + { $$ = "" } +| IGNORE + { $$ = IgnoreStr } + +non_add_drop_or_rename_operation: + ALTER + { $$ = struct{}{} } +| AUTO_INCREMENT + { $$ = struct{}{} } +| CHARACTER + { $$ = struct{}{} } +| COMMENT_KEYWORD + { $$ = struct{}{} } +| DEFAULT + { $$ = struct{}{} } +| ORDER + { $$ = struct{}{} } +| CONVERT + { $$ = struct{}{} } +| PARTITION + { $$ = struct{}{} } +| UNUSED + { $$ = struct{}{} } +| id_or_var + { $$ = struct{}{} } + +to_opt: + { $$ = struct{}{} } +| TO + { $$ = struct{}{} } +| AS + { $$ = struct{}{} } + +index_opt: + INDEX + { $$ = struct{}{} } +| KEY + { $$ = struct{}{} } + +constraint_opt: + { $$ = struct{}{} } +| UNIQUE + { $$ = struct{}{} } +| sql_id + { $$ = struct{}{} } + +using_opt: + { $$ = ColIdent{} } +| USING sql_id + { $$ = $2 } + +sql_id: + id_or_var + { + $$ = $1 + } +| non_reserved_keyword + { + $$ = NewColIdent(string($1)) + } + +reserved_sql_id: + sql_id +| reserved_keyword + { + $$ = NewColIdent(string($1)) + } + +table_id: + id_or_var + { + $$ = NewTableIdent(string($1.String())) + } +| non_reserved_keyword + { + $$ = NewTableIdent(string($1)) + } + +reserved_table_id: + table_id +| reserved_keyword + { + $$ = NewTableIdent(string($1)) + } + +exec_var: + at_id '=' value + { + $$ = NewExecVarDef($1, $3) + } + +exec_payload: + at_at_id '=' value + { + $$ = NewExecVarDef($1, $3) + } + +opt_exec_payload: + /*empty*/ + { $$ = nil } + | exec_payload + { + rv := $1; + $$ = &rv + } + +exec_var_list: +/*empty*/ { $$ = nil } +| exec_var + { + $$ = []ExecVarDef{$1} + } +| exec_var_list ',' exec_var + { + $$ = append($1, $3) + } + + +exec_stmt: + EXEC comment_opt table_name exec_var_list opt_exec_payload + { + $$ = NewExec($2, $3, $4, $5) + } + +purge_stmt: + PURGE comment_opt + { + $$ = NewPurge($2, TableName{}, true) + } +| PURGE comment_opt table_name + { + $$ = NewPurge($2, $3, false) + } + +nativequery_stmt: + NATIVEQUERY comment_opt STRING + { + $$ = NewNativeQuery($2, string($3)) + } + + +/* + These are not all necessarily reserved in MySQL, but some are. + + These are more importantly reserved because they may conflict with our grammar. + If you want to move one that is not reserved in MySQL (i.e. ESCAPE) to the + non_reserved_keywords, you'll need to deal with any conflicts. + + Sorted alphabetically +*/ +reserved_keyword: + ADD +| ARRAY +| AND +| AS +| ASC +| AUTO_INCREMENT +| BETWEEN +| BINARY +| BY +| CASE +| COLLATE +| CONVERT +| CREATE +| CROSS +| CUME_DIST +| CURRENT_DATE +| CURRENT_TIME +| CURRENT_TIMESTAMP +| SUBSTR +| SUBSTRING +| DATABASE +| DATABASES +| DELETE +| DENSE_RANK +| DESC +| DESCRIBE +| DISTINCT +| DISTINCTROW +| DIV +| DROP +| ELSE +| END +| ESCAPE +| EXEC +| EXISTS +| EXPLAIN +| FALSE +| FIRST_VALUE +| FOR +| FORCE +| FROM +| GROUP +| GROUPING +| GROUPS +| HAVING +| IF +| IGNORE +| IN +| INDEX +| INNER +| INSERT +| INTERVAL +| INTO +| IS +| JOIN +| JSON_TABLE +| LAG +| LAST_VALUE +| LATERAL +| LEAD +| LEFT +| LIKE +| LIMIT +| LOCALTIME +| LOCALTIMESTAMP +| LOCK +| MEMBER +| MATCH +| MAXVALUE +| MOD +| NATURAL +| NEXT // next should be doable as non-reserved, but is not due to the special `select next num_val` query that vitess supports +| NOT +| NTH_VALUE +| NTILE +| NULL +| OF +| OFF +| ON +| OR +| ORDER +| OUTER +| OVER +| PERCENT_RANK +| RANK +| RECURSIVE +| REGEXP +| RENAME +| REPLACE +| RETURNING +| RIGHT +| ROW_NUMBER +| SCHEMA +| SELECT +| SEPARATOR +| SET +| SHOW +| STRAIGHT_JOIN +| SYSTEM +| TABLE +| THEN +| TIMESTAMPADD +| TIMESTAMPDIFF +| TO +| TRUE +| UNION +| UNIQUE +| UNLOCK +| UPDATE +| USE +| USING +| UTC_DATE +| UTC_TIME +| UTC_TIMESTAMP +| VALUES +| WHEN +| WHERE +| WINDOW +| XOR + +/* + These are non-reserved Vitess, because they don't cause conflicts in the grammar. + Some of them may be reserved in MySQL. The good news is we backtick quote them + when we rewrite the query, so no issue should arise. + + Sorted alphabetically +*/ +non_reserved_keyword: + AGAINST +| ACTION +| ACTIVE +| ADMIN +| AUTH +| BEGIN +| BIGINT +| BIT +| BLOB +| BOOL +| BOOLEAN +| BUCKETS +| CASCADE +| CHAR +| CHARACTER +| CHARSET +| CHECK +| CLONE +| COLLATION +| COLUMNS +| COMMENT_KEYWORD +| COMMIT +| COMMITTED +| COMPONENT +| DATE +| DATETIME +| DECIMAL +| DEFAULT +| DEFINITION +| DESCRIPTION +| DOUBLE +| DUPLICATE +| ENFORCED +| ENGINES +| ENUM +| EXCLUDE +| EXPANSION +| EXTENDED +| FLOAT_TYPE +| FIELDS +| FLUSH +| FOLLOWING +| FOREIGN +| FULLTEXT +| GEOMCOLLECTION +| GEOMETRY +| GEOMETRYCOLLECTION +| GET_MASTER_PUBLIC_KEY +| GLOBAL +| HISTOGRAM +| HISTORY +| INACTIVE +| STACKQL +| INT +| INTEGER +| INTERACTIVE +| INVISIBLE +| INDEXES +| ISOLATION +| JSON +| JSON_ARRAY_ELEMENTS_TEXT +| JSON_EACH +| KEY +| KEY_BLOCK_SIZE +| KEYS +| LANGUAGE +| LAST_INSERT_ID +| LESS +| LEVEL +| LINESTRING +| LOCKED +| LONGBLOB +| LONGTEXT +| LOGIN +| MASTER_COMPRESSION_ALGORITHMS +| MASTER_PUBLIC_KEY_PATH +| MASTER_TLS_CIPHERSUITES +| MASTER_ZSTD_COMPRESSION_LEVEL +| MATERIALIZED +| MEDIUMBLOB +| MEDIUMINT +| MEDIUMTEXT +| MODE +| MULTILINESTRING +| MULTIPOINT +| MULTIPOLYGON +| NAMES +| NATIVEQUERY +| NCHAR +| NESTED +| NETWORK_NAMESPACE +| NOWAIT +| NO +| NULLS +| NUMERIC +| OFFSET +| OJ +| OLD +| OPTIONAL +| ORDINALITY +| ORGANIZATION +| ONLY +| OPTIMIZE +| OTHERS +| PARTITION +| PATH +| PERSIST +| PERSIST_ONLY +| PRECEDING +| PRIVILEGE_CHECKS_USER +| PROCESS +| PLUGINS +| POINT +| POLYGON +| PRIMARY +| PROCEDURE +| PROCESSLIST +| PURGE +| QUERY +| RANDOM +| READ +| REAL +| REFERENCE +| REFERENCES +| REFRESH +| REORGANIZE +| REPAIR +| REPEATABLE +| RESTRICT +| REQUIRE_ROW_FORMAT +| RESOURCE +| RESPECT +| RESTART +| RETAIN +| REUSE +| REVOKE +| ROLE +| ROLLBACK +| SA +| SECONDARY +| SECONDARY_ENGINE +| SECONDARY_LOAD +| SECONDARY_UNLOAD +| SEQUENCE +| SERVICEACCOUNT +| SESSION +| SERIALIZABLE +| SHARE +| SIGNED +| SKIP +| SMALLINT +| SLEEP +| SPATIAL +| SRID +| START +| STATUS +| TABLES +| TEMP +| TEMPORARY +| TEXT +| THAN +| THREAD_PRIORITY +| TIES +| TIME +| TIMESTAMP +| TINYBLOB +| TINYINT +| TINYTEXT +| TRANSACTION +| TRIGGER +| TRUNCATE +| UNBOUNDED +| UNCOMMITTED +| UNSIGNED +| UNUSED +| VARBINARY +| VARCHAR +| VARIABLES +| VCPU +| VIEW +| VINDEX +| VINDEXES +| VISIBLE +| VITESS_METADATA +| VSCHEMA +| WARNINGS +| WITH +| WRITE +| YEAR +| ZEROFILL + +openb: + '(' + { + if incNesting(yylex) { + yylex.Error("max nesting level reached") + return 1 + } + } + +closeb: + ')' + { + decNesting(yylex) + } + +skip_to_end: +{ + skipToEnd(yylex) +} + +ddl_skip_to_end: + { + skipToEnd(yylex) + } +| openb + { + skipToEnd(yylex) + } +| reserved_sql_id + { + skipToEnd(yylex) + } diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/test_queries/django_queries.txt b/internal/stackql-parser-fork/go/vt/sqlparser/test_queries/django_queries.txt new file mode 100644 index 00000000..6b6588be --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/test_queries/django_queries.txt @@ -0,0 +1,290 @@ +set autocommit=1 +SELECT @@SQL_AUTO_IS_NULL +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED +SELECT @@sql_mode +SHOW FULL TABLES +SHOW FULL TABLES +SHOW FULL TABLES +CREATE TABLE `django_migrations` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `app` varchar(255) NOT NULL, `name` varchar(255) NOT NULL, `applied` datetime(6) NOT NULL) +SELECT engine FROM information_schema.tables WHERE table_name = 'django_migrations' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_migrations' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_migrations' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_migrations' +CREATE TABLE `accounts_userprofile` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `password` varchar(128) NOT NULL, `last_login` datetime(6) NULL, `email` varchar(255) NOT NULL UNIQUE, `first_name` varchar(255) NULL, `last_name` varchar(255) NULL, `active` bool NOT NULL, `staff` bool NOT NULL, `admin` bool NOT NULL, `timestamp` datetime(6) NOT NULL) +SELECT engine FROM information_schema.tables WHERE table_name = 'accounts_userprofile' +SELECT engine FROM information_schema.tables WHERE table_name = 'accounts_userprofile' +SELECT engine FROM information_schema.tables WHERE table_name = 'accounts_userprofile' +SELECT engine FROM information_schema.tables WHERE table_name = 'accounts_userprofile' +SELECT engine FROM information_schema.tables WHERE table_name = 'accounts_userprofile' +SELECT engine FROM information_schema.tables WHERE table_name = 'accounts_userprofile' +SELECT engine FROM information_schema.tables WHERE table_name = 'accounts_userprofile' +SELECT engine FROM information_schema.tables WHERE table_name = 'accounts_userprofile' +SELECT engine FROM information_schema.tables WHERE table_name = 'accounts_userprofile' +SELECT engine FROM information_schema.tables WHERE table_name = 'accounts_userprofile' +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('accounts', '0001_initial', '2019-09-11 20:08:24.240837') +ALTER TABLE `accounts_userprofile` ADD COLUMN `uid` varchar(8) DEFAULT 'bfghjkl' NOT NULL UNIQUE +ALTER TABLE `accounts_userprofile` ALTER COLUMN `uid` DROP DEFAULT +SELECT engine FROM information_schema.tables WHERE table_name = 'accounts_userprofile' +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('accounts', '0002_userprofile_uid', '2019-09-11 20:08:24.390839') +ALTER TABLE `accounts_userprofile` ADD COLUMN `bio` varchar(255) NULL +SELECT engine FROM information_schema.tables WHERE table_name = 'accounts_userprofile' +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('accounts', '0003_userprofile_bio', '2019-09-11 20:08:24.521784') +CREATE TABLE `django_content_type` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(100) NOT NULL, `app_label` varchar(100) NOT NULL, `model` varchar(100) NOT NULL) +SELECT engine FROM information_schema.tables WHERE table_name = 'django_content_type' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_content_type' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_content_type' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_content_type' +ALTER TABLE `django_content_type` ADD CONSTRAINT `django_content_type_app_label_model_76bd3d3b_uniq` UNIQUE (`app_label`, `model`) +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('contenttypes', '0001_initial', '2019-09-11 20:08:24.640638') +CREATE TABLE `django_admin_log` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `action_time` datetime(6) NOT NULL, `object_id` longtext NULL, `object_repr` varchar(200) NOT NULL, `action_flag` smallint UNSIGNED NOT NULL, `change_message` longtext NOT NULL, `content_type_id` integer NULL, `user_id` integer NOT NULL) +SELECT engine FROM information_schema.tables WHERE table_name = 'django_admin_log' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_admin_log' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_admin_log' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_admin_log' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_admin_log' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_admin_log' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_admin_log' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_admin_log' +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('admin', '0001_initial', '2019-09-11 20:08:24.711570') +ALTER TABLE `django_admin_log` ADD CONSTRAINT `django_admin_log_content_type_id_c4bce8eb_fk_django_co` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`) +ALTER TABLE `django_admin_log` ADD CONSTRAINT `django_admin_log_user_id_c564eba6_fk_accounts_userprofile_id` FOREIGN KEY (`user_id`) REFERENCES `accounts_userprofile` (`id`) +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('admin', '0002_logentry_remove_auto_add', '2019-09-11 20:08:24.993999') +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('admin', '0003_logentry_add_action_flag_choices', '2019-09-11 20:08:25.005500') +ALTER TABLE `django_content_type` MODIFY `name` varchar(100) NULL +set autocommit=0 +commit +set autocommit=1 +ALTER TABLE `django_content_type` DROP COLUMN `name` +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('contenttypes', '0002_remove_content_type_name', '2019-09-11 20:08:25.195532') +CREATE TABLE `auth_permission` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(50) NOT NULL, `content_type_id` integer NOT NULL, `codename` varchar(100) NOT NULL) +SELECT engine FROM information_schema.tables WHERE table_name = 'auth_permission' +SELECT engine FROM information_schema.tables WHERE table_name = 'auth_permission' +SELECT engine FROM information_schema.tables WHERE table_name = 'auth_permission' +SELECT engine FROM information_schema.tables WHERE table_name = 'auth_permission' +CREATE TABLE `auth_group` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(80) NOT NULL UNIQUE) +SELECT engine FROM information_schema.tables WHERE table_name = 'auth_group' +SELECT engine FROM information_schema.tables WHERE table_name = 'auth_group' +CREATE TABLE `auth_group_permissions` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `group_id` integer NOT NULL, `permission_id` integer NOT NULL) +SELECT engine FROM information_schema.tables WHERE table_name = 'auth_group_permissions' +SELECT engine FROM information_schema.tables WHERE table_name = 'auth_group_permissions' +SELECT engine FROM information_schema.tables WHERE table_name = 'auth_group_permissions' +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('auth', '0001_initial', '2019-09-11 20:08:25.380981') +ALTER TABLE `auth_permission` ADD CONSTRAINT `auth_permission_content_type_id_2f476e4b_fk_django_co` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`) +ALTER TABLE `auth_permission` ADD CONSTRAINT `auth_permission_content_type_id_codename_01ab375a_uniq` UNIQUE (`content_type_id`, `codename`) +ALTER TABLE `auth_group_permissions` ADD CONSTRAINT `auth_group_permissions_group_id_b120cbf9_fk_auth_group_id` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`) +ALTER TABLE `auth_group_permissions` ADD CONSTRAINT `auth_group_permissio_permission_id_84c5c92e_fk_auth_perm` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`) +ALTER TABLE `auth_group_permissions` ADD CONSTRAINT `auth_group_permissions_group_id_permission_id_0cd325b0_uniq` UNIQUE (`group_id`, `permission_id`) +ALTER TABLE `auth_permission` MODIFY `name` varchar(255) NOT NULL +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('auth', '0002_alter_permission_name_max_length', '2019-09-11 20:08:25.927116') +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('auth', '0003_alter_user_email_max_length', '2019-09-11 20:08:25.944751') +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('auth', '0004_alter_user_username_opts', '2019-09-11 20:08:25.954957') +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('auth', '0005_alter_user_last_login_null', '2019-09-11 20:08:25.966743') +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('auth', '0006_require_contenttypes_0002', '2019-09-11 20:08:25.970363') +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('auth', '0007_alter_validators_add_error_messages', '2019-09-11 20:08:25.977706') +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('auth', '0008_alter_user_username_max_length', '2019-09-11 20:08:25.988202') +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('auth', '0009_alter_user_last_name_max_length', '2019-09-11 20:08:25.996464') +ALTER TABLE `auth_group` MODIFY `name` varchar(150) NOT NULL +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('auth', '0010_alter_group_name_max_length', '2019-09-11 20:08:26.006057') +set autocommit=0 +commit +set autocommit=1 +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('auth', '0011_update_proxy_permissions', '2019-09-11 20:08:26.019693') +CREATE TABLE `post_post` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `title` varchar(250) NOT NULL, `body` longtext NOT NULL, `slug` varchar(8) NOT NULL UNIQUE, `pub_date` datetime(6) NOT NULL, `thumbnail` varchar(100) NOT NULL, `author_id` integer NOT NULL) +SELECT engine FROM information_schema.tables WHERE table_name = 'post_post' +SELECT engine FROM information_schema.tables WHERE table_name = 'post_post' +SELECT engine FROM information_schema.tables WHERE table_name = 'post_post' +SELECT engine FROM information_schema.tables WHERE table_name = 'post_post' +SELECT engine FROM information_schema.tables WHERE table_name = 'post_post' +SELECT engine FROM information_schema.tables WHERE table_name = 'post_post' +SELECT engine FROM information_schema.tables WHERE table_name = 'post_post' +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('post', '0001_initial', '2019-09-11 20:08:26.078601') +ALTER TABLE `post_post` ADD CONSTRAINT `post_post_author_id_99d134d5_fk_accounts_userprofile_id` FOREIGN KEY (`author_id`) REFERENCES `accounts_userprofile` (`id`) +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('post', '0002_auto_20180225_1142', '2019-09-11 20:08:26.223293') +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('post', '0003_auto_20180225_1154', '2019-09-11 20:08:26.231490') +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('post', '0004_auto_20180225_1256', '2019-09-11 20:08:26.241872') +CREATE TABLE `bookmark_post` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `obj_id` integer NOT NULL, `user_id` integer NOT NULL) +SELECT engine FROM information_schema.tables WHERE table_name = 'bookmark_post' +SELECT engine FROM information_schema.tables WHERE table_name = 'bookmark_post' +SELECT engine FROM information_schema.tables WHERE table_name = 'bookmark_post' +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('bookmarks', '0001_initial', '2019-09-11 20:08:26.303419') +ALTER TABLE `bookmark_post` ADD CONSTRAINT `bookmark_post_obj_id_cff442ed_fk_post_post_id` FOREIGN KEY (`obj_id`) REFERENCES `post_post` (`id`) +ALTER TABLE `bookmark_post` ADD CONSTRAINT `bookmark_post_user_id_ecbdae36_fk_accounts_userprofile_id` FOREIGN KEY (`user_id`) REFERENCES `accounts_userprofile` (`id`) +SELECT kc.`constraint_name`, kc.`column_name`, kc.`referenced_table_name`, kc.`referenced_column_name` FROM information_schema.key_column_usage AS kc WHERE kc.table_schema = DATABASE() AND kc.table_name = 'bookmark_post' ORDER BY kc.`ordinal_position` +SELECT c.constraint_name, c.constraint_type FROM information_schema.table_constraints AS c WHERE c.table_schema = DATABASE() AND c.table_name = 'bookmark_post' +SHOW INDEX FROM `bookmark_post` +ALTER TABLE `bookmark_post` DROP FOREIGN KEY `bookmark_post_obj_id_cff442ed_fk_post_post_id` +ALTER TABLE `bookmark_post` ADD CONSTRAINT `bookmark_post_obj_id_cff442ed_fk_post_post_id` FOREIGN KEY (`obj_id`) REFERENCES `post_post` (`id`) +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('bookmarks', '0002_auto_20180307_0102', '2019-09-11 20:08:26.751223') +CREATE TABLE `comments_comment` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `object_id` integer UNSIGNED NOT NULL, `content` longtext NOT NULL, `timestamp` datetime(6) NOT NULL, `content_type_id` integer NOT NULL, `user_id` integer NOT NULL) +SELECT engine FROM information_schema.tables WHERE table_name = 'comments_comment' +SELECT engine FROM information_schema.tables WHERE table_name = 'comments_comment' +SELECT engine FROM information_schema.tables WHERE table_name = 'comments_comment' +SELECT engine FROM information_schema.tables WHERE table_name = 'comments_comment' +SELECT engine FROM information_schema.tables WHERE table_name = 'comments_comment' +SELECT engine FROM information_schema.tables WHERE table_name = 'comments_comment' +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('comments', '0001_initial', '2019-09-11 20:08:26.827533') +ALTER TABLE `comments_comment` ADD CONSTRAINT `comments_comment_content_type_id_72fd5dbe_fk_django_co` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`) +ALTER TABLE `comments_comment` ADD CONSTRAINT `comments_comment_user_id_a1db4881_fk_accounts_userprofile_id` FOREIGN KEY (`user_id`) REFERENCES `accounts_userprofile` (`id`) +ALTER TABLE `comments_comment` ADD COLUMN `parent_comment_id` integer NULL +SELECT engine FROM information_schema.tables WHERE table_name = 'comments_comment' +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('comments', '0002_comment_parent_comment', '2019-09-11 20:08:27.232388') +ALTER TABLE `comments_comment` ADD CONSTRAINT `comments_comment_parent_comment_id_71289d4a_fk_comments_` FOREIGN KEY (`parent_comment_id`) REFERENCES `comments_comment` (`id`) +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('comments', '0003_auto_20180227_2159', '2019-09-11 20:08:27.408413') +CREATE TABLE `likesdislikes_likedislike` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `vote` smallint NOT NULL, `object_id` integer UNSIGNED NOT NULL, `content_type_id` integer NOT NULL, `user_id` integer NOT NULL) +SELECT engine FROM information_schema.tables WHERE table_name = 'likesdislikes_likedislike' +SELECT engine FROM information_schema.tables WHERE table_name = 'likesdislikes_likedislike' +SELECT engine FROM information_schema.tables WHERE table_name = 'likesdislikes_likedislike' +SELECT engine FROM information_schema.tables WHERE table_name = 'likesdislikes_likedislike' +SELECT engine FROM information_schema.tables WHERE table_name = 'likesdislikes_likedislike' +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('likesdislikes', '0001_initial', '2019-09-11 20:08:27.471337') +ALTER TABLE `likesdislikes_likedislike` ADD CONSTRAINT `likesdislikes_likedi_content_type_id_1bd751d8_fk_django_co` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`) +ALTER TABLE `likesdislikes_likedislike` ADD CONSTRAINT `likesdislikes_likedi_user_id_fbedd04e_fk_accounts_` FOREIGN KEY (`user_id`) REFERENCES `accounts_userprofile` (`id`) +CREATE TABLE `django_session` (`session_key` varchar(40) NOT NULL PRIMARY KEY, `session_data` longtext NOT NULL, `expire_date` datetime(6) NOT NULL) +SELECT engine FROM information_schema.tables WHERE table_name = 'django_session' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_session' +SELECT engine FROM information_schema.tables WHERE table_name = 'django_session' +SHOW FULL TABLES +INSERT INTO `django_migrations` (`app`, `name`, `applied`) VALUES ('sessions', '0001_initial', '2019-09-11 20:08:27.790749') +CREATE INDEX `django_session_expire_date_a5c62663` ON `django_session` (`expire_date`) +SHOW FULL TABLES +SELECT `django_migrations`.`app`, `django_migrations`.`name` FROM `django_migrations` +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'admin' +set autocommit=0 +INSERT INTO `django_content_type` (`app_label`, `model`) VALUES ('admin', 'logentry') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE (`django_content_type`.`app_label` = 'admin' AND `django_content_type`.`model` = 'logentry') +SELECT `auth_permission`.`content_type_id`, `auth_permission`.`codename` FROM `auth_permission` INNER JOIN `django_content_type` ON (`auth_permission`.`content_type_id` = `django_content_type`.`id`) WHERE `auth_permission`.`content_type_id` IN (1) ORDER BY `django_content_type`.`app_label` ASC, `django_content_type`.`model` ASC, `auth_permission`.`codename` ASC +set autocommit=0 +INSERT INTO `auth_permission` (`name`, `content_type_id`, `codename`) VALUES ('Can add log entry', 1, 'add_logentry'), ('Can change log entry', 1, 'change_logentry'), ('Can delete log entry', 1, 'delete_logentry'), ('Can view log entry', 1, 'view_logentry') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'admin' +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'auth' +set autocommit=0 +INSERT INTO `django_content_type` (`app_label`, `model`) VALUES ('auth', 'permission'), ('auth', 'group') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE (`django_content_type`.`app_label` = 'auth' AND `django_content_type`.`model` = 'permission') +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE (`django_content_type`.`app_label` = 'auth' AND `django_content_type`.`model` = 'group') +SELECT `auth_permission`.`content_type_id`, `auth_permission`.`codename` FROM `auth_permission` INNER JOIN `django_content_type` ON (`auth_permission`.`content_type_id` = `django_content_type`.`id`) WHERE `auth_permission`.`content_type_id` IN (2, 3) ORDER BY `django_content_type`.`app_label` ASC, `django_content_type`.`model` ASC, `auth_permission`.`codename` ASC +set autocommit=0 +INSERT INTO `auth_permission` (`name`, `content_type_id`, `codename`) VALUES ('Can add permission', 2, 'add_permission'), ('Can change permission', 2, 'change_permission'), ('Can delete permission', 2, 'delete_permission'), ('Can view permission', 2, 'view_permission'), ('Can add group', 3, 'add_group'), ('Can change group', 3, 'change_group'), ('Can delete group', 3, 'delete_group'), ('Can view group', 3, 'view_group') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'auth' +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'contenttypes' +set autocommit=0 +INSERT INTO `django_content_type` (`app_label`, `model`) VALUES ('contenttypes', 'contenttype') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE (`django_content_type`.`app_label` = 'contenttypes' AND `django_content_type`.`model` = 'contenttype') +SELECT `auth_permission`.`content_type_id`, `auth_permission`.`codename` FROM `auth_permission` INNER JOIN `django_content_type` ON (`auth_permission`.`content_type_id` = `django_content_type`.`id`) WHERE `auth_permission`.`content_type_id` IN (4) ORDER BY `django_content_type`.`app_label` ASC, `django_content_type`.`model` ASC, `auth_permission`.`codename` ASC +set autocommit=0 +INSERT INTO `auth_permission` (`name`, `content_type_id`, `codename`) VALUES ('Can add content type', 4, 'add_contenttype'), ('Can change content type', 4, 'change_contenttype'), ('Can delete content type', 4, 'delete_contenttype'), ('Can view content type', 4, 'view_contenttype') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'contenttypes' +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'sessions' +set autocommit=0 +INSERT INTO `django_content_type` (`app_label`, `model`) VALUES ('sessions', 'session') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE (`django_content_type`.`app_label` = 'sessions' AND `django_content_type`.`model` = 'session') +SELECT `auth_permission`.`content_type_id`, `auth_permission`.`codename` FROM `auth_permission` INNER JOIN `django_content_type` ON (`auth_permission`.`content_type_id` = `django_content_type`.`id`) WHERE `auth_permission`.`content_type_id` IN (5) ORDER BY `django_content_type`.`app_label` ASC, `django_content_type`.`model` ASC, `auth_permission`.`codename` ASC +set autocommit=0 +INSERT INTO `auth_permission` (`name`, `content_type_id`, `codename`) VALUES ('Can add session', 5, 'add_session'), ('Can change session', 5, 'change_session'), ('Can delete session', 5, 'delete_session'), ('Can view session', 5, 'view_session') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'sessions' +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'post' +set autocommit=0 +INSERT INTO `django_content_type` (`app_label`, `model`) VALUES ('post', 'post') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE (`django_content_type`.`app_label` = 'post' AND `django_content_type`.`model` = 'post') +SELECT `auth_permission`.`content_type_id`, `auth_permission`.`codename` FROM `auth_permission` INNER JOIN `django_content_type` ON (`auth_permission`.`content_type_id` = `django_content_type`.`id`) WHERE `auth_permission`.`content_type_id` IN (6) ORDER BY `django_content_type`.`app_label` ASC, `django_content_type`.`model` ASC, `auth_permission`.`codename` ASC +set autocommit=0 +INSERT INTO `auth_permission` (`name`, `content_type_id`, `codename`) VALUES ('Can add post', 6, 'add_post'), ('Can change post', 6, 'change_post'), ('Can delete post', 6, 'delete_post'), ('Can view post', 6, 'view_post') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'post' +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'accounts' +set autocommit=0 +INSERT INTO `django_content_type` (`app_label`, `model`) VALUES ('accounts', 'userprofile') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE (`django_content_type`.`app_label` = 'accounts' AND `django_content_type`.`model` = 'userprofile') +SELECT `auth_permission`.`content_type_id`, `auth_permission`.`codename` FROM `auth_permission` INNER JOIN `django_content_type` ON (`auth_permission`.`content_type_id` = `django_content_type`.`id`) WHERE `auth_permission`.`content_type_id` IN (7) ORDER BY `django_content_type`.`app_label` ASC, `django_content_type`.`model` ASC, `auth_permission`.`codename` ASC +set autocommit=0 +INSERT INTO `auth_permission` (`name`, `content_type_id`, `codename`) VALUES ('Can add user profile', 7, 'add_userprofile'), ('Can change user profile', 7, 'change_userprofile'), ('Can delete user profile', 7, 'delete_userprofile'), ('Can view user profile', 7, 'view_userprofile') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'accounts' +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'comments' +set autocommit=0 +INSERT INTO `django_content_type` (`app_label`, `model`) VALUES ('comments', 'comment') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE (`django_content_type`.`app_label` = 'comments' AND `django_content_type`.`model` = 'comment') +SELECT `auth_permission`.`content_type_id`, `auth_permission`.`codename` FROM `auth_permission` INNER JOIN `django_content_type` ON (`auth_permission`.`content_type_id` = `django_content_type`.`id`) WHERE `auth_permission`.`content_type_id` IN (8) ORDER BY `django_content_type`.`app_label` ASC, `django_content_type`.`model` ASC, `auth_permission`.`codename` ASC +set autocommit=0 +INSERT INTO `auth_permission` (`name`, `content_type_id`, `codename`) VALUES ('Can add comment', 8, 'add_comment'), ('Can change comment', 8, 'change_comment'), ('Can delete comment', 8, 'delete_comment'), ('Can view comment', 8, 'view_comment') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'comments' +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'likesdislikes' +set autocommit=0 +INSERT INTO `django_content_type` (`app_label`, `model`) VALUES ('likesdislikes', 'likedislike') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE (`django_content_type`.`app_label` = 'likesdislikes' AND `django_content_type`.`model` = 'likedislike') +SELECT `auth_permission`.`content_type_id`, `auth_permission`.`codename` FROM `auth_permission` INNER JOIN `django_content_type` ON (`auth_permission`.`content_type_id` = `django_content_type`.`id`) WHERE `auth_permission`.`content_type_id` IN (9) ORDER BY `django_content_type`.`app_label` ASC, `django_content_type`.`model` ASC, `auth_permission`.`codename` ASC +set autocommit=0 +INSERT INTO `auth_permission` (`name`, `content_type_id`, `codename`) VALUES ('Can add like dislike', 9, 'add_likedislike'), ('Can change like dislike', 9, 'change_likedislike'), ('Can delete like dislike', 9, 'delete_likedislike'), ('Can view like dislike', 9, 'view_likedislike') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'likesdislikes' +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'bookmarks' +set autocommit=0 +INSERT INTO `django_content_type` (`app_label`, `model`) VALUES ('bookmarks', 'bookmarkpost') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE (`django_content_type`.`app_label` = 'bookmarks' AND `django_content_type`.`model` = 'bookmarkpost') +SELECT `auth_permission`.`content_type_id`, `auth_permission`.`codename` FROM `auth_permission` INNER JOIN `django_content_type` ON (`auth_permission`.`content_type_id` = `django_content_type`.`id`) WHERE `auth_permission`.`content_type_id` IN (10) ORDER BY `django_content_type`.`app_label` ASC, `django_content_type`.`model` ASC, `auth_permission`.`codename` ASC +set autocommit=0 +INSERT INTO `auth_permission` (`name`, `content_type_id`, `codename`) VALUES ('Can add bookmark post', 10, 'add_bookmarkpost'), ('Can change bookmark post', 10, 'change_bookmarkpost'), ('Can delete bookmark post', 10, 'delete_bookmarkpost'), ('Can view bookmark post', 10, 'view_bookmarkpost') +commit +set autocommit=1 +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'bookmarks' +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'pagedown' +SELECT `django_content_type`.`id`, `django_content_type`.`app_label`, `django_content_type`.`model` FROM `django_content_type` WHERE `django_content_type`.`app_label` = 'pagedown' \ No newline at end of file diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/token.go b/internal/stackql-parser-fork/go/vt/sqlparser/token.go new file mode 100644 index 00000000..8869805e --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/token.go @@ -0,0 +1,1103 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bytes" + "fmt" + "io" + "strings" + + "github.com/stackql/stackql-parser/go/bytes2" + "github.com/stackql/stackql-parser/go/sqltypes" + + "github.com/Masterminds/semver" +) + +const ( + defaultBufSize = 4096 + eofChar = 0x100 + literalDelim uint16 = '"' +) + +// Tokenizer is the struct used to generate SQL +// tokens for the parser. +type Tokenizer struct { + InStream io.Reader + AllowComments bool + SkipSpecialComments bool + SkipToEnd bool + lastChar uint16 + Position int + lastToken []byte + LastError error + posVarIndex int + ParseTree Statement + partialDDL *DDL + nesting int + multi bool + specialComment *Tokenizer + + buf []byte + bufPos int + bufSize int +} + +// NewStringTokenizer creates a new Tokenizer for the +// sql string. +func NewStringTokenizer(sql string) *Tokenizer { + buf := []byte(sql) + return &Tokenizer{ + buf: buf, + bufSize: len(buf), + } +} + +// NewTokenizer creates a new Tokenizer reading a sql +// string from the io.Reader. +func NewTokenizer(r io.Reader) *Tokenizer { + return &Tokenizer{ + InStream: r, + buf: make([]byte, defaultBufSize), + } +} + +// keywords is a map of mysql keywords that fall into two categories: +// 1) keywords considered reserved by MySQL +// 2) keywords for us to handle specially in sql.y +// +// Those marked as UNUSED are likely reserved keywords. We add them here so that +// when rewriting queries we can properly backtick quote them so they don't cause issues +// +// NOTE: If you add new keywords, add them also to the reserved_keywords or +// non_reserved_keywords grammar in sql.y -- this will allow the keyword to be used +// in identifiers. See the docs for each grammar to determine which one to put it into. +var keywords = map[string]int{ + "accessible": UNUSED, + "action": ACTION, + "add": ADD, + "against": AGAINST, + "all": ALL, + "alter": ALTER, + "analyze": ANALYZE, + "and": AND, + "as": AS, + "asc": ASC, + "asensitive": UNUSED, + "auth": AUTH, + "auto_increment": AUTO_INCREMENT, + "before": UNUSED, + "begin": BEGIN, + "between": BETWEEN, + "bigint": BIGINT, + "binary": BINARY, + "_binary": UNDERSCORE_BINARY, + "_utf8mb4": UNDERSCORE_UTF8MB4, + "_utf8": UNDERSCORE_UTF8, + "_latin1": UNDERSCORE_LATIN1, + "bit": BIT, + "blob": BLOB, + "bool": BOOL, + "boolean": BOOLEAN, + "both": UNUSED, + "by": BY, + "call": UNUSED, + "cascade": CASCADE, + "case": CASE, + "cast": CAST, + "change": UNUSED, + "char": CHAR, + "character": CHARACTER, + "charset": CHARSET, + "check": CHECK, + "collate": COLLATE, + "collation": COLLATION, + "column": COLUMN, + "columns": COLUMNS, + "comment": COMMENT_KEYWORD, + "committed": COMMITTED, + "commit": COMMIT, + "condition": UNUSED, + "constraint": CONSTRAINT, + "continue": UNUSED, + "convert": CONVERT, + "substr": SUBSTR, + "substring": SUBSTRING, + "create": CREATE, + "cross": CROSS, + "current": CURRENT, + "current_date": CURRENT_DATE, + "current_time": CURRENT_TIME, + "current_timestamp": CURRENT_TIMESTAMP, + "current_user": UNUSED, + "cursor": UNUSED, + "database": DATABASE, + "databases": DATABASES, + "day_hour": UNUSED, + "day_microsecond": UNUSED, + "day_minute": UNUSED, + "day_second": UNUSED, + "date": DATE, + "datetime": DATETIME, + "dec": UNUSED, + "decimal": DECIMAL, + "declare": UNUSED, + "default": DEFAULT, + "delayed": UNUSED, + "delete": DELETE, + "desc": DESC, + "describe": DESCRIBE, + "deterministic": UNUSED, + "distinct": DISTINCT, + "distinctrow": DISTINCTROW, + "div": DIV, + "double": DOUBLE, + "do": DO, + "drop": DROP, + "duplicate": DUPLICATE, + "each": UNUSED, + "else": ELSE, + "elseif": UNUSED, + "enclosed": UNUSED, + "end": END, + "engines": ENGINES, + "enum": ENUM, + "escape": ESCAPE, + "escaped": UNUSED, + "exec": EXEC, + "exists": EXISTS, + "exit": UNUSED, + "explain": EXPLAIN, + "expansion": EXPANSION, + "extended": EXTENDED, + "false": FALSE, + "fetch": UNUSED, + "fields": FIELDS, + "float": FLOAT_TYPE, + "float4": UNUSED, + "float8": UNUSED, + "flush": FLUSH, + "following": FOLLOWING, + "for": FOR, + "force": FORCE, + "foreign": FOREIGN, + "format": FORMAT, + "from": FROM, + "full": FULL, + "fulltext": FULLTEXT, + "generated": UNUSED, + "geometry": GEOMETRY, + "geometrycollection": GEOMETRYCOLLECTION, + "get": UNUSED, + "global": GLOBAL, + "grant": UNUSED, + "group": GROUP, + "group_concat": GROUP_CONCAT, + "having": HAVING, + "high_priority": UNUSED, + "hour_microsecond": UNUSED, + "hour_minute": UNUSED, + "hour_second": UNUSED, + "if": IF, + "ignore": IGNORE, + "in": IN, + "index": INDEX, + "indexes": INDEXES, + "infile": UNUSED, + "inout": UNUSED, + "inner": INNER, + "insensitive": UNUSED, + "insert": INSERT, + "int": INT, + "int1": UNUSED, + "int2": UNUSED, + "int3": UNUSED, + "int4": UNUSED, + "int8": UNUSED, + "integer": INTEGER, + "interactive": INTERACTIVE, + "interval": INTERVAL, + "into": INTO, + "io_after_gtids": UNUSED, + "is": IS, + "isolation": ISOLATION, + "iterate": UNUSED, + "join": JOIN, + "json": JSON, + "json_each": JSON_EACH, + "json_array_elements_text": JSON_ARRAY_ELEMENTS_TEXT, + "key": KEY, + "keys": KEYS, + "key_block_size": KEY_BLOCK_SIZE, + "kill": UNUSED, + "language": LANGUAGE, + "last_insert_id": LAST_INSERT_ID, + "leading": UNUSED, + "leave": UNUSED, + "left": LEFT, + "less": LESS, + "level": LEVEL, + "like": LIKE, + "list": LIST, + "limit": LIMIT, + "linear": UNUSED, + "lines": UNUSED, + "linestring": LINESTRING, + "load": UNUSED, + "localtime": LOCALTIME, + "localtimestamp": LOCALTIMESTAMP, + "lock": LOCK, + "login": LOGIN, + "long": UNUSED, + "longblob": LONGBLOB, + "longtext": LONGTEXT, + "loop": UNUSED, + "low_priority": UNUSED, + "master_bind": UNUSED, + "match": MATCH, + "materialized": MATERIALIZED, + "maxvalue": MAXVALUE, + "mediumblob": MEDIUMBLOB, + "mediumint": MEDIUMINT, + "mediumtext": MEDIUMTEXT, + "middleint": UNUSED, + "minute_microsecond": UNUSED, + "minute_second": UNUSED, + "mod": MOD, + "mode": MODE, + "modifies": UNUSED, + "multilinestring": MULTILINESTRING, + "multipoint": MULTIPOINT, + "multipolygon": MULTIPOLYGON, + "names": NAMES, + "nativequery": NATIVEQUERY, + "natural": NATURAL, + "nchar": NCHAR, + "next": NEXT, + "no": NO, + "not": NOT, + "no_write_to_binlog": UNUSED, + "null": NULL, + "numeric": NUMERIC, + "off": OFF, + "offset": OFFSET, + "on": ON, + "only": ONLY, + "optimize": OPTIMIZE, + "optimizer_costs": UNUSED, + "option": UNUSED, + "optionally": UNUSED, + "or": OR, + "order": ORDER, + "out": UNUSED, + "outer": OUTER, + "outfile": UNUSED, + "over": OVER, + "partition": PARTITION, + "plugins": PLUGINS, + "point": POINT, + "polygon": POLYGON, + "preceding": PRECEDING, + "precision": UNUSED, + "primary": PRIMARY, + "processlist": PROCESSLIST, + "procedure": PROCEDURE, + "pull": PULL, + "purge": PURGE, + "query": QUERY, + "range": RANGE, + "read": READ, + "reads": UNUSED, + "read_write": UNUSED, + "real": REAL, + "recursive": RECURSIVE, + "references": REFERENCES, + "refresh": REFRESH, + "registry": REGISTRY, + "regexp": REGEXP, + "release": RELEASE, + "rename": RENAME, + "reorganize": REORGANIZE, + "repair": REPAIR, + "repeat": UNUSED, + "repeatable": REPEATABLE, + "replace": REPLACE, + "require": UNUSED, + "resignal": UNUSED, + "restrict": RESTRICT, + "return": UNUSED, + "returning": RETURNING, + "revoke": REVOKE, + "right": RIGHT, + "rlike": REGEXP, + "rollback": ROLLBACK, + "row": ROW, + "rows": ROWS, + "sa": SA, + "savepoint": SAVEPOINT, + "schema": SCHEMA, + "second_microsecond": UNUSED, + "select": SELECT, + "sensitive": UNUSED, + "separator": SEPARATOR, + "sequence": SEQUENCE, + "serializable": SERIALIZABLE, + "serviceaccount": SERVICEACCOUNT, + "session": SESSION, + "set": SET, + "share": SHARE, + "show": SHOW, + "signal": UNUSED, + "signed": SIGNED, + "sleep": SLEEP, + "smallint": SMALLINT, + "spatial": SPATIAL, + "specific": UNUSED, + "sql": UNUSED, + "sqlexception": UNUSED, + "sqlstate": UNUSED, + "sqlwarning": UNUSED, + "sql_big_result": UNUSED, + "sql_cache": SQL_CACHE, + "sql_calc_found_rows": SQL_CALC_FOUND_ROWS, + "sql_no_cache": SQL_NO_CACHE, + "sql_small_result": UNUSED, + "ssl": UNUSED, + "stackql": STACKQL, + "start": START, + "starting": UNUSED, + "status": STATUS, + "stored": UNUSED, + "straight_join": STRAIGHT_JOIN, + "stream": STREAM, + "table": TABLE, + "tables": TABLES, + "terminated": UNUSED, + "temp": TEMP, + "temporary": TEMPORARY, + "than": THAN, + "then": THEN, + "text": TEXT, + "time": TIME, + "timestamp": TIMESTAMP, + "timestampadd": TIMESTAMPADD, + "timestampdiff": TIMESTAMPDIFF, + "tinyblob": TINYBLOB, + "tinyint": TINYINT, + "tinytext": TINYTEXT, + "to": TO, + "trailing": UNUSED, + "transaction": TRANSACTION, + "tree": TREE, + "traditional": TRADITIONAL, + "trigger": TRIGGER, + "true": TRUE, + "truncate": TRUNCATE, + "unbounded": UNBOUNDED, + "uncommitted": UNCOMMITTED, + "undo": UNUSED, + "union": UNION, + "unique": UNIQUE, + "unlock": UNLOCK, + "unsigned": UNSIGNED, + "update": UPDATE, + "usage": UNUSED, + "use": USE, + "using": USING, + "utc_date": UTC_DATE, + "utc_time": UTC_TIME, + "utc_timestamp": UTC_TIMESTAMP, + "values": VALUES, + "variables": VARIABLES, + "varbinary": VARBINARY, + "varchar": VARCHAR, + "varcharacter": UNUSED, + "varying": UNUSED, + "virtual": UNUSED, + "vindex": VINDEX, + "vindexes": VINDEXES, + "view": VIEW, + "vitess": VITESS, + "vitess_metadata": VITESS_METADATA, + "vschema": VSCHEMA, + "warnings": WARNINGS, + "when": WHEN, + "where": WHERE, + "while": UNUSED, + "with": WITH, + "work": WORK, + "write": WRITE, + "xor": XOR, + "year": YEAR, + "year_month": UNUSED, + "zerofill": ZEROFILL, +} + +// KeywordStrings contains the reverse mapping of token to keyword strings +var KeywordStrings = map[int]string{} + +func init() { + for str, id := range keywords { + if id == UNUSED { + continue + } + KeywordStrings[id] = strings.ToLower(str) + } +} + +// KeywordString returns the string corresponding to the given keyword +func KeywordString(id int) string { + str, ok := KeywordStrings[id] + if !ok { + return "" + } + return str +} + +// Lex returns the next token form the Tokenizer. +// This function is used by go yacc. +func (tkn *Tokenizer) Lex(lval *yySymType) int { + if tkn.SkipToEnd { + return tkn.skipStatement() + } + + typ, val := tkn.Scan() + for typ == COMMENT { + if tkn.AllowComments { + break + } + typ, val = tkn.Scan() + } + if typ == 0 || typ == ';' || typ == LEX_ERROR { + // If encounter end of statement or invalid token, + // we should not accept partially parsed DDLs. They + // should instead result in parser errors. See the + // Parse function to see how this is handled. + tkn.partialDDL = nil + } + lval.bytes = val + tkn.lastToken = val + return typ +} + +// PositionedErr holds context related to parser errors +type PositionedErr struct { + Err string + Pos int + Near []byte +} + +func (p PositionedErr) Error() string { + if p.Near != nil { + return fmt.Sprintf("%s at position %v near '%s'", p.Err, p.Pos, p.Near) + } + return fmt.Sprintf("%s at position %v", p.Err, p.Pos) +} + +// Error is called by go yacc if there's a parsing error. +func (tkn *Tokenizer) Error(err string) { + tkn.LastError = PositionedErr{Err: err, Pos: tkn.Position, Near: tkn.lastToken} + + // Try and re-sync to the next statement + tkn.skipStatement() +} + +// Scan scans the tokenizer for the next token and returns +// the token type and an optional value. +func (tkn *Tokenizer) Scan() (int, []byte) { + if tkn.specialComment != nil { + // Enter specialComment scan mode. + // for scanning such kind of comment: /*! MySQL-specific code */ + specialComment := tkn.specialComment + tok, val := specialComment.Scan() + if tok != 0 { + // return the specialComment scan result as the result + return tok, val + } + // leave specialComment scan mode after all stream consumed. + tkn.specialComment = nil + } + if tkn.lastChar == 0 { + tkn.next() + } + + tkn.skipBlank() + switch ch := tkn.lastChar; { + case ch == '@': + tokenID := AT_ID + tkn.next() + if tkn.lastChar == '@' { + tokenID = AT_AT_ID + tkn.next() + } + var tID int + var tBytes []byte + ch = tkn.lastChar + tkn.next() + if ch == literalDelim { + tID, tBytes = tkn.scanLiteralIdentifier() + } else { + tID, tBytes = tkn.scanIdentifier(byte(ch), true) + } + if tID == LEX_ERROR { + return tID, nil + } + return tokenID, tBytes + case isLetter(ch): + tkn.next() + if ch == 'X' || ch == 'x' { + if tkn.lastChar == '\'' { + tkn.next() + return tkn.scanHex() + } + } + if ch == 'B' || ch == 'b' { + if tkn.lastChar == '\'' { + tkn.next() + return tkn.scanBitLiteral() + } + } + if ch == 'v' { + if _, err := semver.NewVersion(tkn.peek()); err == nil { + return tkn.scanIdentifierPermissive(byte(ch), false) + } + } + return tkn.scanIdentifier(byte(ch), false) + case isDigit(ch): + return tkn.scanNumber(false) + case ch == ':': + return tkn.scanBindVar() + case ch == ';': + if tkn.multi { + // In multi mode, ';' is treated as EOF. So, we don't advance. + // Repeated calls to Scan will keep returning 0 until ParseNext + // forces the advance. + return 0, nil + } + tkn.next() + return ';', nil + case ch == eofChar: + return 0, nil + default: + tkn.next() + switch ch { + case '=', ',', '(', ')', '+', '*', '%', '^', '~': + return int(ch), nil + case '&': + if tkn.lastChar == '&' { + tkn.next() + return AND, nil + } + return int(ch), nil + case '|': + if tkn.lastChar == '|' { + tkn.next() + return OR, nil + } + return int(ch), nil + case '?': + tkn.posVarIndex++ + buf := new(bytes2.Buffer) + fmt.Fprintf(buf, ":v%d", tkn.posVarIndex) + return VALUE_ARG, buf.Bytes() + case '.': + if isDigit(tkn.lastChar) { + return tkn.scanNumber(true) + } + return int(ch), nil + case '/': + switch tkn.lastChar { + case '/': + tkn.next() + return tkn.scanCommentType1("//") + case '*': + tkn.next() + if tkn.lastChar == '!' && !tkn.SkipSpecialComments { + return tkn.scanMySQLSpecificComment() + } + return tkn.scanCommentType2() + default: + return int(ch), nil + } + case '#': + return tkn.scanCommentType1("#") + case '-': + switch tkn.lastChar { + case '-': + tkn.next() + return tkn.scanCommentType1("--") + case '>': + tkn.next() + if tkn.lastChar == '>' { + tkn.next() + return JSON_UNQUOTE_EXTRACT_OP, nil + } + return JSON_EXTRACT_OP, nil + } + return int(ch), nil + case '<': + switch tkn.lastChar { + case '>': + tkn.next() + return NE, nil + case '<': + tkn.next() + return SHIFT_LEFT, nil + case '=': + tkn.next() + switch tkn.lastChar { + case '>': + tkn.next() + return NULL_SAFE_EQUAL, nil + default: + return LE, nil + } + default: + return int(ch), nil + } + case '>': + switch tkn.lastChar { + case '=': + tkn.next() + return GE, nil + case '>': + tkn.next() + return SHIFT_RIGHT, nil + default: + return int(ch), nil + } + case '!': + if tkn.lastChar == '=' { + tkn.next() + return NE, nil + } + return int(ch), nil + case '\'': + return tkn.scanString(ch, STRING) + case literalDelim: + return tkn.scanLiteralIdentifier() + default: + return LEX_ERROR, []byte{byte(ch)} + } + } +} + +// skipStatement scans until end of statement. +func (tkn *Tokenizer) skipStatement() int { + tkn.SkipToEnd = false + for { + typ, _ := tkn.Scan() + if typ == 0 || typ == ';' || typ == LEX_ERROR { + return typ + } + } +} + +func (tkn *Tokenizer) skipBlank() { + ch := tkn.lastChar + for ch == ' ' || ch == '\n' || ch == '\r' || ch == '\t' { + tkn.next() + ch = tkn.lastChar + } +} + +func (tkn *Tokenizer) scanIdentifier(firstByte byte, isVariable bool) (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteByte(firstByte) + for isLetter(tkn.lastChar) || + isDigit(tkn.lastChar) || + tkn.lastChar == '@' || + (isVariable && isCarat(tkn.lastChar)) { + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() + } + lowered := bytes.ToLower(buffer.Bytes()) + loweredStr := string(lowered) + if keywordID, found := keywords[loweredStr]; found { + return keywordID, buffer.Bytes() + } + // dual must always be case-insensitive + if loweredStr == "dual" { + return ID, lowered + } + return ID, buffer.Bytes() +} + +func (tkn *Tokenizer) scanIdentifierPermissive(firstByte byte, isVariable bool) (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteByte(firstByte) + for isLetter(tkn.lastChar) || + isDigit(tkn.lastChar) || + tkn.lastChar == '@' || + tkn.lastChar == '.' || + (isVariable && isCarat(tkn.lastChar)) { + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() + } + lowered := bytes.ToLower(buffer.Bytes()) + loweredStr := string(lowered) + if keywordID, found := keywords[loweredStr]; found { + return keywordID, buffer.Bytes() + } + // dual must always be case-insensitive + if loweredStr == "dual" { + return ID, lowered + } + return ID, buffer.Bytes() +} + +func (tkn *Tokenizer) scanHex() (int, []byte) { + buffer := &bytes2.Buffer{} + tkn.scanMantissa(16, buffer) + if tkn.lastChar != '\'' { + return LEX_ERROR, buffer.Bytes() + } + tkn.next() + if buffer.Len()%2 != 0 { + return LEX_ERROR, buffer.Bytes() + } + return HEX, buffer.Bytes() +} + +func (tkn *Tokenizer) scanBitLiteral() (int, []byte) { + buffer := &bytes2.Buffer{} + tkn.scanMantissa(2, buffer) + if tkn.lastChar != '\'' { + return LEX_ERROR, buffer.Bytes() + } + tkn.next() + return BIT_LITERAL, buffer.Bytes() +} + +func (tkn *Tokenizer) scanLiteralIdentifier() (int, []byte) { + buffer := &bytes2.Buffer{} + backTickSeen := false + for { + if backTickSeen { + if tkn.lastChar != literalDelim { + break + } + backTickSeen = false + buffer.WriteByte(byte(literalDelim)) + tkn.next() + continue + } + // The previous char was not a backtick. + switch tkn.lastChar { + case literalDelim: + backTickSeen = true + case eofChar: + // Premature EOF. + return LEX_ERROR, buffer.Bytes() + default: + buffer.WriteByte(byte(tkn.lastChar)) + } + tkn.next() + } + if buffer.Len() == 0 { + return LEX_ERROR, buffer.Bytes() + } + return ID, buffer.Bytes() +} + +func (tkn *Tokenizer) scanBindVar() (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteByte(byte(tkn.lastChar)) + token := VALUE_ARG + tkn.next() + if tkn.lastChar == ':' { + token = LIST_ARG + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() + } + if !isLetter(tkn.lastChar) { + return LEX_ERROR, buffer.Bytes() + } + for isLetter(tkn.lastChar) || isDigit(tkn.lastChar) || tkn.lastChar == '.' { + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() + } + return token, buffer.Bytes() +} + +func (tkn *Tokenizer) scanMantissa(base int, buffer *bytes2.Buffer) { + for digitVal(tkn.lastChar) < base { + tkn.consumeNext(buffer) + } +} + +func (tkn *Tokenizer) scanNumber(seenDecimalPoint bool) (int, []byte) { + token := INTEGRAL + buffer := &bytes2.Buffer{} + if seenDecimalPoint { + token = FLOAT + buffer.WriteByte('.') + tkn.scanMantissa(10, buffer) + goto exponent + } + + // 0x construct. + if tkn.lastChar == '0' { + tkn.consumeNext(buffer) + if tkn.lastChar == 'x' || tkn.lastChar == 'X' { + token = HEXNUM + tkn.consumeNext(buffer) + tkn.scanMantissa(16, buffer) + goto exit + } + } + + tkn.scanMantissa(10, buffer) + + if tkn.lastChar == '.' { + token = FLOAT + tkn.consumeNext(buffer) + tkn.scanMantissa(10, buffer) + } + +exponent: + if tkn.lastChar == 'e' || tkn.lastChar == 'E' { + token = FLOAT + tkn.consumeNext(buffer) + if tkn.lastChar == '+' || tkn.lastChar == '-' { + tkn.consumeNext(buffer) + } + tkn.scanMantissa(10, buffer) + } + +exit: + // A letter cannot immediately follow a number. + if isLetter(tkn.lastChar) { + return LEX_ERROR, buffer.Bytes() + } + + return token, buffer.Bytes() +} + +func (tkn *Tokenizer) scanString(delim uint16, typ int) (int, []byte) { + var buffer bytes2.Buffer + for { + ch := tkn.lastChar + if ch == eofChar { + // Unterminated string. + return LEX_ERROR, buffer.Bytes() + } + + if ch != delim && ch != '\\' { + buffer.WriteByte(byte(ch)) + + // Scan ahead to the next interesting character. + start := tkn.bufPos + for ; tkn.bufPos < tkn.bufSize; tkn.bufPos++ { + ch = uint16(tkn.buf[tkn.bufPos]) + if ch == delim || ch == '\\' { + break + } + } + + buffer.Write(tkn.buf[start:tkn.bufPos]) + tkn.Position += (tkn.bufPos - start) + + if tkn.bufPos >= tkn.bufSize { + // Reached the end of the buffer without finding a delim or + // escape character. + tkn.next() + continue + } + + tkn.bufPos++ + tkn.Position++ + } + tkn.next() // Read one past the delim or escape character. + + if ch == '\\' { + if tkn.lastChar == eofChar { + // String terminates mid escape character. + return LEX_ERROR, buffer.Bytes() + } + if decodedChar := sqltypes.SQLDecodeMap[byte(tkn.lastChar)]; decodedChar == sqltypes.DontEscape { + ch = tkn.lastChar + } else { + ch = uint16(decodedChar) + } + + } else if ch == delim && tkn.lastChar != delim { + // Correctly terminated string, which is not a double delim. + break + } + + buffer.WriteByte(byte(ch)) + tkn.next() + } + + return typ, buffer.Bytes() +} + +func (tkn *Tokenizer) scanCommentType1(prefix string) (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteString(prefix) + for tkn.lastChar != eofChar { + if tkn.lastChar == '\n' { + tkn.consumeNext(buffer) + break + } + tkn.consumeNext(buffer) + } + return COMMENT, buffer.Bytes() +} + +func (tkn *Tokenizer) scanCommentType2() (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteString("/*") + for { + if tkn.lastChar == '*' { + tkn.consumeNext(buffer) + if tkn.lastChar == '/' { + tkn.consumeNext(buffer) + break + } + continue + } + if tkn.lastChar == eofChar { + return LEX_ERROR, buffer.Bytes() + } + tkn.consumeNext(buffer) + } + return COMMENT, buffer.Bytes() +} + +func (tkn *Tokenizer) scanMySQLSpecificComment() (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteString("/*!") + tkn.next() + for { + if tkn.lastChar == '*' { + tkn.consumeNext(buffer) + if tkn.lastChar == '/' { + tkn.consumeNext(buffer) + break + } + continue + } + if tkn.lastChar == eofChar { + return LEX_ERROR, buffer.Bytes() + } + tkn.consumeNext(buffer) + } + _, sql := ExtractMysqlComment(buffer.String()) + tkn.specialComment = NewStringTokenizer(sql) + return tkn.Scan() +} + +func (tkn *Tokenizer) consumeNext(buffer *bytes2.Buffer) { + if tkn.lastChar == eofChar { + // This should never happen. + panic("unexpected EOF") + } + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() +} + +func (tkn *Tokenizer) peek() string { + if tkn.bufPos < 2 { + return "" + } + bufStr := string(tkn.buf[tkn.bufPos-2:]) + if strings.Contains(bufStr, ";") { + return strings.Split(bufStr, ";")[0] + } + fields := strings.Fields(bufStr) + if len(fields) > 0 { + return fields[0] + } + var err error + for { + tkn.bufSize, err = tkn.InStream.Read(tkn.buf) + if err == io.EOF { + break + } + if err != io.EOF && err != nil { + tkn.LastError = err + break + } + bufStr = string(tkn.buf[tkn.bufPos:]) + if strings.Contains(bufStr, ";") { + return strings.Split(bufStr, ";")[0] + } + fields = strings.Fields(bufStr) + if len(fields) > 0 { + return fields[0] + } + } + return bufStr +} + +func (tkn *Tokenizer) next() { + if tkn.bufPos >= tkn.bufSize && tkn.InStream != nil { + // Try and refill the buffer + var err error + tkn.bufPos = 0 + if tkn.bufSize, err = tkn.InStream.Read(tkn.buf); err != io.EOF && err != nil { + tkn.LastError = err + } + } + + if tkn.bufPos >= tkn.bufSize { + if tkn.lastChar != eofChar { + tkn.Position++ + tkn.lastChar = eofChar + } + } else { + tkn.Position++ + tkn.lastChar = uint16(tkn.buf[tkn.bufPos]) + tkn.bufPos++ + } +} + +// reset clears any internal state. +func (tkn *Tokenizer) reset() { + tkn.ParseTree = nil + tkn.partialDDL = nil + tkn.specialComment = nil + tkn.posVarIndex = 0 + tkn.nesting = 0 + tkn.SkipToEnd = false +} + +func isLetter(ch uint16) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch == '$' +} + +func isCarat(ch uint16) bool { + return ch == '.' || ch == '\'' || ch == literalDelim +} + +func digitVal(ch uint16) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch) - '0' + case 'a' <= ch && ch <= 'f': + return int(ch) - 'a' + 10 + case 'A' <= ch && ch <= 'F': + return int(ch) - 'A' + 10 + } + return 16 // larger than any legal digit val +} + +func isDigit(ch uint16) bool { + return '0' <= ch && ch <= '9' +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/token_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/token_test.go new file mode 100644 index 00000000..cac25328 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/token_test.go @@ -0,0 +1,207 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "fmt" + "testing" +) + +func TestLiteralID(t *testing.T) { + testcases := []struct { + in string + id int + out string + }{{ + in: "`aa`", + id: ID, + out: "aa", + }, { + in: "```a```", + id: ID, + out: "`a`", + }, { + in: "`a``b`", + id: ID, + out: "a`b", + }, { + in: "`a``b`c", + id: ID, + out: "a`b", + }, { + in: "`a``b", + id: LEX_ERROR, + out: "a`b", + }, { + in: "`a``b``", + id: LEX_ERROR, + out: "a`b`", + }, { + in: "``", + id: LEX_ERROR, + out: "", + }, { + in: "@x", + id: AT_ID, + out: "x", + }, { + in: "@@x", + id: AT_AT_ID, + out: "x", + }, { + in: "@@`x y`", + id: AT_AT_ID, + out: "x y", + }, { + in: "@@`@x @y`", + id: AT_AT_ID, + out: "@x @y", + }} + + for _, tcase := range testcases { + tkn := NewStringTokenizer(tcase.in) + id, out := tkn.Scan() + if tcase.id != id || string(out) != tcase.out { + t.Errorf("Scan(%s): %d, %s, want %d, %s", tcase.in, id, out, tcase.id, tcase.out) + } + } +} + +func tokenName(id int) string { + if id == STRING { + return "STRING" + } else if id == LEX_ERROR { + return "LEX_ERROR" + } + return fmt.Sprintf("%d", id) +} + +func TestString(t *testing.T) { + testcases := []struct { + in string + id int + want string + }{{ + in: "''", + id: STRING, + want: "", + }, { + in: "''''", + id: STRING, + want: "'", + }, { + in: "'hello'", + id: STRING, + want: "hello", + }, { + in: "'\\n'", + id: STRING, + want: "\n", + }, { + in: "'\\nhello\\n'", + id: STRING, + want: "\nhello\n", + }, { + in: "'a''b'", + id: STRING, + want: "a'b", + }, { + in: "'a\\'b'", + id: STRING, + want: "a'b", + }, { + in: "'\\'", + id: LEX_ERROR, + want: "'", + }, { + in: "'", + id: LEX_ERROR, + want: "", + }, { + in: "'hello\\'", + id: LEX_ERROR, + want: "hello'", + }, { + in: "'hello", + id: LEX_ERROR, + want: "hello", + }, { + in: "'hello\\", + id: LEX_ERROR, + want: "hello", + }} + + for _, tcase := range testcases { + id, got := NewStringTokenizer(tcase.in).Scan() + if tcase.id != id || string(got) != tcase.want { + t.Errorf("Scan(%q) = (%s, %q), want (%s, %q)", tcase.in, tokenName(id), got, tokenName(tcase.id), tcase.want) + } + } +} + +func TestSplitStatement(t *testing.T) { + testcases := []struct { + in string + sql string + rem string + }{{ + in: "select * from table", + sql: "select * from table", + }, { + in: "select * from table; ", + sql: "select * from table", + rem: " ", + }, { + in: "select * from table; select * from table2;", + sql: "select * from table", + rem: " select * from table2;", + }, { + in: "select * from /* comment */ table;", + sql: "select * from /* comment */ table", + }, { + in: "select * from /* comment ; */ table;", + sql: "select * from /* comment ; */ table", + }, { + in: "select * from table where semi = ';';", + sql: "select * from table where semi = ';'", + }, { + in: "-- select * from table", + sql: "-- select * from table", + }, { + in: " ", + sql: " ", + }, { + in: "", + sql: "", + }} + + for _, tcase := range testcases { + sql, rem, err := SplitStatement(tcase.in) + if err != nil { + t.Errorf("EndOfStatementPosition(%s): ERROR: %v", tcase.in, err) + continue + } + + if tcase.sql != sql { + t.Errorf("EndOfStatementPosition(%s) got sql \"%s\" want \"%s\"", tcase.in, sql, tcase.sql) + } + + if tcase.rem != rem { + t.Errorf("EndOfStatementPosition(%s) got remainder \"%s\" want \"%s\"", tcase.in, rem, tcase.rem) + } + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/tracked_buffer.go b/internal/stackql-parser-fork/go/vt/sqlparser/tracked_buffer.go new file mode 100644 index 00000000..a6ee9a76 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/tracked_buffer.go @@ -0,0 +1,220 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "fmt" + "strings" +) + +// NodeFormatter defines the signature of a custom node formatter +// function that can be given to TrackedBuffer for code generation. +type NodeFormatter func(buf *TrackedBuffer, node SQLNode) + +// TrackedBuffer is used to rebuild a query from the ast. +// bindLocations keeps track of locations in the buffer that +// use bind variables for efficient future substitutions. +// nodeFormatter is the formatting function the buffer will +// use to format a node. By default(nil), it's FormatNode. +// But you can supply a different formatting function if you +// want to generate a query that's different from the default. +type TrackedBuffer struct { + *strings.Builder + bindLocations []bindLocation + nodeFormatter NodeFormatter + isDelimitCols bool +} + +// NewTrackedBuffer creates a new TrackedBuffer. +func NewTrackedBuffer(nodeFormatter NodeFormatter) *TrackedBuffer { + return &TrackedBuffer{ + Builder: new(strings.Builder), + nodeFormatter: nodeFormatter, + } +} + +func (buf *TrackedBuffer) IsDelimitCols() bool { + return buf.isDelimitCols +} + +func (buf *TrackedBuffer) WithDelimitCols(isDelimitCols bool) *TrackedBuffer { + buf.isDelimitCols = isDelimitCols + return buf +} + +// WriteNode function, initiates the writing of a single SQLNode tree by passing +// through to Myprintf with a default format string +func (buf *TrackedBuffer) WriteNode(node SQLNode) *TrackedBuffer { + buf.Myprintf("%v", node) + return buf +} + +// Myprintf mimics fmt.Fprintf(buf, ...), but limited to Node(%v), +// Node.Value(%s) and string(%s). It also allows a %a for a value argument, in +// which case it adds tracking info for future substitutions. +// It adds parens as needed to follow precedence rules when printing expressions +// +// The name must be something other than the usual Printf() to avoid "go vet" +// warnings due to our custom format specifiers. +// *** THIS METHOD SHOULD NOT BE USED FROM ast.go. USE astPrintf INSTEAD *** +func (buf *TrackedBuffer) Myprintf(format string, values ...interface{}) { + buf.astPrintf(nil, format, values...) +} + +func (buf *TrackedBuffer) AstPrintf(currentNode SQLNode, format string, values ...interface{}) { + buf.astPrintf(currentNode, format, values...) +} + +// astPrintf is for internal use by the ast structs +func (buf *TrackedBuffer) astPrintf(currentNode SQLNode, format string, values ...interface{}) { + currentExpr, checkParens := currentNode.(Expr) + if checkParens { + // expressions that have Precedence Syntactic will never need parens + checkParens = precedenceFor(currentExpr) != Syntactic + } + + end := len(format) + fieldnum := 0 + for i := 0; i < end; { + lasti := i + for i < end && format[i] != '%' { + i++ + } + if i > lasti { + buf.WriteString(format[lasti:i]) + } + if i >= end { + break + } + i++ // '%' + switch format[i] { + case 'c': + switch v := values[fieldnum].(type) { + case byte: + buf.WriteByte(v) + case rune: + buf.WriteRune(v) + default: + panic(fmt.Sprintf("unexpected TrackedBuffer type %T", v)) + } + case 's': + switch v := values[fieldnum].(type) { + case []byte: + buf.Write(v) + case string: + buf.WriteString(v) + default: + panic(fmt.Sprintf("unexpected TrackedBuffer type %T", v)) + } + case 'v': + value := values[fieldnum] + expr := getExpressionForParensEval(checkParens, value) + + if expr != nil { // + needParens := needParens(currentExpr, expr) + buf.printIf(needParens, "(") + buf.formatter(expr) + buf.printIf(needParens, ")") + } else { + buf.formatter(value.(SQLNode)) + } + + case 'a': + buf.WriteArg(values[fieldnum].(string)) + default: + panic("unexpected") + } + fieldnum++ + i++ + } +} + +func getExpressionForParensEval(checkParens bool, value interface{}) Expr { + if checkParens { + expr, isExpr := value.(Expr) + if isExpr { + return expr + } + } + return nil +} + +func (buf *TrackedBuffer) printIf(condition bool, text string) { + if condition { + buf.WriteString(text) + } +} + +func (buf *TrackedBuffer) formatter(node SQLNode) { + if buf.nodeFormatter == nil { + node.Format(buf) + } else { + buf.nodeFormatter(buf, node) + } +} + +func needParens(op, val Expr) bool { + if areBothISExpr(op, val) { + return true + } + + opBinding := precedenceFor(op) + valBinding := precedenceFor(val) + + return !(opBinding == Syntactic || valBinding == Syntactic) && valBinding > opBinding +} + +func areBothISExpr(op Expr, val Expr) bool { + _, isOpIS := op.(*IsExpr) + if isOpIS { + _, isValIS := val.(*IsExpr) + if isValIS { + // when using IS on an IS op, we need special handling + return true + } + } + return false +} + +// WriteArg writes a value argument into the buffer along with +// tracking information for future substitutions. arg must contain +// the ":" or "::" prefix. +func (buf *TrackedBuffer) WriteArg(arg string) { + buf.bindLocations = append(buf.bindLocations, bindLocation{ + offset: buf.Len(), + length: len(arg), + }) + buf.WriteString(arg) +} + +// ParsedQuery returns a ParsedQuery that contains bind +// locations for easy substitution. +func (buf *TrackedBuffer) ParsedQuery() *ParsedQuery { + return &ParsedQuery{Query: buf.String(), bindLocations: buf.bindLocations} +} + +// HasBindVars returns true if the parsed query uses bind vars. +func (buf *TrackedBuffer) HasBindVars() bool { + return len(buf.bindLocations) != 0 +} + +// BuildParsedQuery builds a ParsedQuery from the input. +func BuildParsedQuery(in string, vars ...interface{}) *ParsedQuery { + buf := NewTrackedBuffer(nil) + buf.Myprintf(in, vars...) + return buf.ParsedQuery() +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/truncate_query.go b/internal/stackql-parser-fork/go/vt/sqlparser/truncate_query.go new file mode 100644 index 00000000..a441c812 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/truncate_query.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "flag" +) + +var ( + // TruncateUILen truncate queries in debug UIs to the given length. 0 means unlimited. + TruncateUILen = flag.Int("sql-max-length-ui", 512, "truncate queries in debug UIs to the given length (default 512)") + + // TruncateErrLen truncate queries in error logs to the given length. 0 means unlimited. + TruncateErrLen = flag.Int("sql-max-length-errors", 0, "truncate queries in error logs to the given length (default unlimited)") +) + +func truncateQuery(query string, max int) string { + sql, comments := SplitMarginComments(query) + + if max == 0 || len(sql) <= max { + return comments.Leading + sql + comments.Trailing + } + + return comments.Leading + sql[:max-12] + " [TRUNCATED]" + comments.Trailing +} + +// TruncateForUI is used when displaying queries on various Vitess status pages +// to keep the pages small enough to load and render properly +func TruncateForUI(query string) string { + return truncateQuery(query, *TruncateUILen) +} + +// TruncateForLog is used when displaying queries as part of error logs +// to avoid overwhelming logging systems with potentially long queries and +// bind value data. +func TruncateForLog(query string) string { + return truncateQuery(query, *TruncateErrLen) +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/ast_walker.go b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/ast_walker.go new file mode 100644 index 00000000..822fb6c4 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/ast_walker.go @@ -0,0 +1,130 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package visitorgen + +import ( + "go/ast" + "reflect" +) + +var _ ast.Visitor = (*walker)(nil) + +type walker struct { + result SourceFile +} + +// Walk walks the given AST and translates it to the simplified AST used by the next steps +func Walk(node ast.Node) *SourceFile { + var w walker + ast.Walk(&w, node) + return &w.result +} + +// Visit implements the ast.Visitor interface +func (w *walker) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.TypeSpec: + switch t2 := n.Type.(type) { + case *ast.InterfaceType: + w.append(&InterfaceDeclaration{ + name: n.Name.Name, + block: "", + }) + case *ast.StructType: + var fields []*Field + for _, f := range t2.Fields.List { + for _, name := range f.Names { + fields = append(fields, &Field{ + name: name.Name, + typ: sastType(f.Type), + }) + } + + } + w.append(&StructDeclaration{ + name: n.Name.Name, + fields: fields, + }) + case *ast.ArrayType: + w.append(&TypeAlias{ + name: n.Name.Name, + typ: &Array{inner: sastType(t2.Elt)}, + }) + case *ast.Ident: + w.append(&TypeAlias{ + name: n.Name.Name, + typ: &TypeString{t2.Name}, + }) + + default: + panic(reflect.TypeOf(t2)) + } + case *ast.FuncDecl: + if len(n.Recv.List) > 1 || len(n.Recv.List[0].Names) > 1 { + panic("don't know what to do!") + } + var f *Field + if len(n.Recv.List) == 1 { + r := n.Recv.List[0] + t := sastType(r.Type) + if len(r.Names) > 1 { + panic("don't know what to do!") + } + if len(r.Names) == 1 { + f = &Field{ + name: r.Names[0].Name, + typ: t, + } + } else { + f = &Field{ + name: "", + typ: t, + } + } + } + + w.append(&FuncDeclaration{ + receiver: f, + name: n.Name.Name, + block: "", + arguments: nil, + }) + } + + return w +} + +func (w *walker) append(line Sast) { + w.result.lines = append(w.result.lines, line) +} + +func sastType(e ast.Expr) Type { + switch n := e.(type) { + case *ast.StarExpr: + return &Ref{sastType(n.X)} + case *ast.Ident: + return &TypeString{n.Name} + case *ast.ArrayType: + return &Array{inner: sastType(n.Elt)} + case *ast.InterfaceType: + return &TypeString{"interface{}"} + case *ast.StructType: + return &TypeString{"struct{}"} + } + + panic(reflect.TypeOf(e)) +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/ast_walker_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/ast_walker_test.go new file mode 100644 index 00000000..a4b01f70 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/ast_walker_test.go @@ -0,0 +1,239 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package visitorgen + +import ( + "go/parser" + "go/token" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stretchr/testify/require" +) + +func TestSingleInterface(t *testing.T) { + input := ` +package sqlparser + +type Nodeiface interface { + iNode() +} +` + + fset := token.NewFileSet() + ast, err := parser.ParseFile(fset, "ast.go", input, 0) + require.NoError(t, err) + + result := Walk(ast) + expected := SourceFile{ + lines: []Sast{&InterfaceDeclaration{ + name: "Nodeiface", + block: "", + }}, + } + assert.Equal(t, expected.String(), result.String()) +} + +func TestEmptyStruct(t *testing.T) { + input := ` +package sqlparser + +type Empty struct {} +` + + fset := token.NewFileSet() + ast, err := parser.ParseFile(fset, "ast.go", input, 0) + require.NoError(t, err) + + result := Walk(ast) + expected := SourceFile{ + lines: []Sast{&StructDeclaration{ + name: "Empty", + fields: []*Field{}, + }}, + } + assert.Equal(t, expected.String(), result.String()) +} + +func TestStructWithStringField(t *testing.T) { + input := ` +package sqlparser + +type Struct struct { + field string +} +` + + fset := token.NewFileSet() + ast, err := parser.ParseFile(fset, "ast.go", input, 0) + require.NoError(t, err) + + result := Walk(ast) + expected := SourceFile{ + lines: []Sast{&StructDeclaration{ + name: "Struct", + fields: []*Field{{ + name: "field", + typ: &TypeString{typName: "string"}, + }}, + }}, + } + assert.Equal(t, expected.String(), result.String()) +} + +func TestStructWithDifferentTypes(t *testing.T) { + input := ` +package sqlparser + +type Struct struct { + field string + reference *string + array []string + arrayOfRef []*string +} +` + + fset := token.NewFileSet() + ast, err := parser.ParseFile(fset, "ast.go", input, 0) + require.NoError(t, err) + + result := Walk(ast) + expected := SourceFile{ + lines: []Sast{&StructDeclaration{ + name: "Struct", + fields: []*Field{{ + name: "field", + typ: &TypeString{typName: "string"}, + }, { + name: "reference", + typ: &Ref{&TypeString{typName: "string"}}, + }, { + name: "array", + typ: &Array{&TypeString{typName: "string"}}, + }, { + name: "arrayOfRef", + typ: &Array{&Ref{&TypeString{typName: "string"}}}, + }}, + }}, + } + assert.Equal(t, expected.String(), result.String()) +} + +func TestStructWithTwoStringFieldInOneLine(t *testing.T) { + input := ` +package sqlparser + +type Struct struct { + left, right string +} +` + + fset := token.NewFileSet() + ast, err := parser.ParseFile(fset, "ast.go", input, 0) + require.NoError(t, err) + + result := Walk(ast) + expected := SourceFile{ + lines: []Sast{&StructDeclaration{ + name: "Struct", + fields: []*Field{{ + name: "left", + typ: &TypeString{typName: "string"}, + }, { + name: "right", + typ: &TypeString{typName: "string"}, + }}, + }}, + } + assert.Equal(t, expected.String(), result.String()) +} + +func TestStructWithSingleMethod(t *testing.T) { + input := ` +package sqlparser + +type Empty struct {} + +func (*Empty) method() {} +` + + fset := token.NewFileSet() + ast, err := parser.ParseFile(fset, "ast.go", input, 0) + require.NoError(t, err) + + result := Walk(ast) + expected := SourceFile{ + lines: []Sast{ + &StructDeclaration{ + name: "Empty", + fields: []*Field{}}, + &FuncDeclaration{ + receiver: &Field{ + name: "", + typ: &Ref{&TypeString{"Empty"}}, + }, + name: "method", + block: "", + arguments: []*Field{}, + }, + }, + } + assert.Equal(t, expected.String(), result.String()) +} + +func TestSingleArrayType(t *testing.T) { + input := ` +package sqlparser + +type Strings []string +` + + fset := token.NewFileSet() + ast, err := parser.ParseFile(fset, "ast.go", input, 0) + require.NoError(t, err) + + result := Walk(ast) + expected := SourceFile{ + lines: []Sast{&TypeAlias{ + name: "Strings", + typ: &Array{&TypeString{"string"}}, + }}, + } + assert.Equal(t, expected.String(), result.String()) +} + +func TestSingleTypeAlias(t *testing.T) { + input := ` +package sqlparser + +type String string +` + + fset := token.NewFileSet() + ast, err := parser.ParseFile(fset, "ast.go", input, 0) + require.NoError(t, err) + + result := Walk(ast) + expected := SourceFile{ + lines: []Sast{&TypeAlias{ + name: "String", + typ: &TypeString{"string"}, + }}, + } + assert.Equal(t, expected.String(), result.String()) +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/main/main.go b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/main/main.go new file mode 100644 index 00000000..3401b136 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/main/main.go @@ -0,0 +1,164 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/parser" + "go/token" + "io/ioutil" + "os" + + "github.com/stackql/stackql-parser/go/exit" + "github.com/stackql/stackql-parser/go/vt/log" + + "github.com/stackql/stackql-parser/go/vt/sqlparser/visitorgen" +) + +var ( + inputFile = flag.String("input", "", "input file to use") + outputFile = flag.String("output", "", "output file") + compare = flag.Bool("compareOnly", false, "instead of writing to the output file, compare if the generated visitor is still valid for this ast.go") +) + +const usage = `Usage of visitorgen: + +go run /path/to/visitorgen/main -input=/path/to/ast.go -output=/path/to/rewriter.go +` + +func main() { + defer exit.Recover() + flag.Usage = printUsage + flag.Parse() + + if *inputFile == "" || *outputFile == "" { + printUsage() + exit.Return(1) + } + + fs := token.NewFileSet() + file, err := parser.ParseFile(fs, *inputFile, nil, parser.DeclarationErrors) + if err != nil { + log.Error(err) + exit.Return(1) + } + + astWalkResult := visitorgen.Walk(file) + vp := visitorgen.Transform(astWalkResult) + vd := visitorgen.ToVisitorPlan(vp) + + replacementMethods := visitorgen.EmitReplacementMethods(vd) + typeSwitch := visitorgen.EmitTypeSwitches(vd) + + b := &bytes.Buffer{} + fmt.Fprint(b, fileHeader) + fmt.Fprintln(b) + fmt.Fprintln(b, replacementMethods) + fmt.Fprint(b, applyHeader) + fmt.Fprintln(b, typeSwitch) + fmt.Fprintln(b, fileFooter) + + if *compare { + currentFile, err := ioutil.ReadFile(*outputFile) + if err != nil { + log.Error(err) + exit.Return(1) + } + if !bytes.Equal(b.Bytes(), currentFile) { + fmt.Println("rewriter needs to be re-generated: go generate " + *outputFile) + exit.Return(1) + } + } else { + err = ioutil.WriteFile(*outputFile, b.Bytes(), 0644) + if err != nil { + log.Error(err) + exit.Return(1) + } + } + +} + +func printUsage() { + os.Stderr.WriteString(usage) + os.Stderr.WriteString("\nOptions:\n") + flag.PrintDefaults() +} + +const fileHeader = `// Code generated by visitorgen/main/main.go. DO NOT EDIT. + +package sqlparser + +//go:generate go run ./visitorgen/main -input=ast.go -output=rewriter.go + +import ( + "reflect" +) + +type replacerFunc func(newNode, parent SQLNode) + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor +} +` + +const applyHeader = ` +// apply is where the visiting happens. Here is where we keep the big switch-case that will be used +// to do the actual visiting of SQLNodes +func (a *application) apply(parent, node SQLNode, replacer replacerFunc) { + if node == nil || isNilValue(node) { + return + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.replacer = replacer + a.cursor.node = node + a.cursor.parent = parent + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases is alphabetical) + switch n := node.(type) { + case nil: + ` + +const fileFooter = ` + default: + panic("unknown ast type " + reflect.TypeOf(node).String()) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +func isNilValue(i interface{}) bool { + valueOf := reflect.ValueOf(i) + kind := valueOf.Kind() + isNullable := kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice + return isNullable && valueOf.IsNil() +}` diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/sast.go b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/sast.go new file mode 100644 index 00000000..e46485e8 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/sast.go @@ -0,0 +1,178 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package visitorgen + +// simplified ast - when reading the golang ast of the ast.go file, we translate the golang ast objects +// to this much simpler format, that contains only the necessary information and no more +type ( + // SourceFile contains all important lines from an ast.go file + SourceFile struct { + lines []Sast + } + + // Sast or simplified AST, is a representation of the ast.go lines we are interested in + Sast interface { + toSastString() string + } + + // InterfaceDeclaration represents a declaration of an interface. This is used to keep track of which types + // need to be handled by the visitor framework + InterfaceDeclaration struct { + name, block string + } + + // TypeAlias is used whenever we see a `type XXX YYY` - XXX is the new name for YYY. + // Note that YYY could be an array or a reference + TypeAlias struct { + name string + typ Type + } + + // FuncDeclaration represents a function declaration. These are tracked to know which types implement interfaces. + FuncDeclaration struct { + receiver *Field + name, block string + arguments []*Field + } + + // StructDeclaration represents a struct. It contains the fields and their types + StructDeclaration struct { + name string + fields []*Field + } + + // Field is a field in a struct - a name with a type tuple + Field struct { + name string + typ Type + } + + // Type represents a type in the golang type system. Used to keep track of type we need to handle, + // and the types of fields. + Type interface { + toTypString() string + rawTypeName() string + } + + // TypeString is a raw type name, such as `string` + TypeString struct { + typName string + } + + // Ref is a reference to something, such as `*string` + Ref struct { + inner Type + } + + // Array is an array of things, such as `[]string` + Array struct { + inner Type + } +) + +var _ Sast = (*InterfaceDeclaration)(nil) +var _ Sast = (*StructDeclaration)(nil) +var _ Sast = (*FuncDeclaration)(nil) +var _ Sast = (*TypeAlias)(nil) + +var _ Type = (*TypeString)(nil) +var _ Type = (*Ref)(nil) +var _ Type = (*Array)(nil) + +// String returns a textual representation of the SourceFile. This is for testing purposed +func (t *SourceFile) String() string { + var result string + for _, l := range t.lines { + result += l.toSastString() + result += "\n" + } + + return result +} + +func (t *Ref) toTypString() string { + return "*" + t.inner.toTypString() +} + +func (t *Array) toTypString() string { + return "[]" + t.inner.toTypString() +} + +func (t *TypeString) toTypString() string { + return t.typName +} + +func (f *FuncDeclaration) toSastString() string { + var receiver string + if f.receiver != nil { + receiver = "(" + f.receiver.String() + ") " + } + var args string + for i, arg := range f.arguments { + if i > 0 { + args += ", " + } + args += arg.String() + } + + return "func " + receiver + f.name + "(" + args + ") {" + blockInNewLines(f.block) + "}" +} + +func (i *InterfaceDeclaration) toSastString() string { + return "type " + i.name + " interface {" + blockInNewLines(i.block) + "}" +} + +func (a *TypeAlias) toSastString() string { + return "type " + a.name + " " + a.typ.toTypString() +} + +func (s *StructDeclaration) toSastString() string { + var block string + for _, f := range s.fields { + block += "\t" + f.String() + "\n" + } + + return "type " + s.name + " struct {" + blockInNewLines(block) + "}" +} + +func blockInNewLines(block string) string { + if block == "" { + return "" + } + return "\n" + block + "\n" +} + +// String returns a string representation of a field +func (f *Field) String() string { + if f.name != "" { + return f.name + " " + f.typ.toTypString() + } + + return f.typ.toTypString() +} + +func (t *TypeString) rawTypeName() string { + return t.typName +} + +func (t *Ref) rawTypeName() string { + return t.inner.rawTypeName() +} + +func (t *Array) rawTypeName() string { + return t.inner.rawTypeName() +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/struct_producer.go b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/struct_producer.go new file mode 100644 index 00000000..1c293f30 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/struct_producer.go @@ -0,0 +1,253 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package visitorgen + +import ( + "fmt" + "sort" +) + +// VisitorData is the data needed to produce the output file +type ( + // VisitorItem represents something that needs to be added to the rewriter infrastructure + VisitorItem interface { + toFieldItemString() string + typeName() string + asSwitchCase() string + asReplMethod() string + getFieldName() string + } + + // SingleFieldItem is a single field in a struct + SingleFieldItem struct { + StructType, FieldType Type + FieldName string + } + + // ArrayFieldItem is an array field in a struct + ArrayFieldItem struct { + StructType, ItemType Type + FieldName string + } + + // ArrayItem is an array that implements SQLNode + ArrayItem struct { + StructType, ItemType Type + } + + // VisitorPlan represents all the output needed for the rewriter + VisitorPlan struct { + Switches []*SwitchCase // The cases for the big switch statement used to implement the visitor + } + + // SwitchCase is what we need to know to produce all the type switch cases in the visitor. + SwitchCase struct { + Type Type + Fields []VisitorItem + } +) + +var _ VisitorItem = (*SingleFieldItem)(nil) +var _ VisitorItem = (*ArrayItem)(nil) +var _ VisitorItem = (*ArrayFieldItem)(nil) +var _ sort.Interface = (*VisitorPlan)(nil) +var _ sort.Interface = (*SwitchCase)(nil) + +// ToVisitorPlan transforms the source information into a plan for the visitor code that needs to be produced +func ToVisitorPlan(input *SourceInformation) *VisitorPlan { + var output VisitorPlan + + for _, typ := range input.interestingTypes { + switchit := &SwitchCase{Type: typ} + stroct, isStruct := input.structs[typ.rawTypeName()] + if isStruct { + for _, f := range stroct.fields { + switchit.Fields = append(switchit.Fields, trySingleItem(input, f, typ)...) + } + } else { + itemType := input.getItemTypeOfArray(typ) + if itemType != nil && input.isSQLNode(itemType) { + switchit.Fields = append(switchit.Fields, &ArrayItem{ + StructType: typ, + ItemType: itemType, + }) + } + } + sort.Sort(switchit) + output.Switches = append(output.Switches, switchit) + } + sort.Sort(&output) + return &output +} + +func trySingleItem(input *SourceInformation, f *Field, typ Type) []VisitorItem { + if input.isSQLNode(f.typ) { + return []VisitorItem{&SingleFieldItem{ + StructType: typ, + FieldType: f.typ, + FieldName: f.name, + }} + } + + arrType, isArray := f.typ.(*Array) + if isArray && input.isSQLNode(arrType.inner) { + return []VisitorItem{&ArrayFieldItem{ + StructType: typ, + ItemType: arrType.inner, + FieldName: f.name, + }} + } + return []VisitorItem{} +} + +// String returns a string, used for testing +func (v *VisitorPlan) String() string { + var sb builder + for _, s := range v.Switches { + sb.appendF("Type: %v", s.Type.toTypString()) + for _, f := range s.Fields { + sb.appendF("\t%v", f.toFieldItemString()) + } + } + return sb.String() +} + +func (s *SingleFieldItem) toFieldItemString() string { + return fmt.Sprintf("single item: %v of type: %v", s.FieldName, s.FieldType.toTypString()) +} + +func (s *SingleFieldItem) asSwitchCase() string { + return fmt.Sprintf(` a.apply(node, n.%s, %s)`, s.FieldName, s.typeName()) +} + +func (s *SingleFieldItem) asReplMethod() string { + _, isRef := s.StructType.(*Ref) + + if isRef { + return fmt.Sprintf(`func %s(newNode, parent SQLNode) { + parent.(%s).%s = newNode.(%s) +}`, s.typeName(), s.StructType.toTypString(), s.FieldName, s.FieldType.toTypString()) + } + + return fmt.Sprintf(`func %s(newNode, parent SQLNode) { + tmp := parent.(%s) + tmp.%s = newNode.(%s) +}`, s.typeName(), s.StructType.toTypString(), s.FieldName, s.FieldType.toTypString()) + +} + +func (ai *ArrayItem) asReplMethod() string { + name := ai.typeName() + return fmt.Sprintf(`type %s int + +func (r *%s) replace(newNode, container SQLNode) { + container.(%s)[int(*r)] = newNode.(%s) +} + +func (r *%s) inc() { + *r++ +}`, name, name, ai.StructType.toTypString(), ai.ItemType.toTypString(), name) +} + +func (afi *ArrayFieldItem) asReplMethod() string { + name := afi.typeName() + return fmt.Sprintf(`type %s int + +func (r *%s) replace(newNode, container SQLNode) { + container.(%s).%s[int(*r)] = newNode.(%s) +} + +func (r *%s) inc() { + *r++ +}`, name, name, afi.StructType.toTypString(), afi.FieldName, afi.ItemType.toTypString(), name) +} + +func (s *SingleFieldItem) getFieldName() string { + return s.FieldName +} + +func (s *SingleFieldItem) typeName() string { + return "replace" + s.StructType.rawTypeName() + s.FieldName +} + +func (afi *ArrayFieldItem) toFieldItemString() string { + return fmt.Sprintf("array field item: %v.%v contains items of type %v", afi.StructType.toTypString(), afi.FieldName, afi.ItemType.toTypString()) +} + +func (ai *ArrayItem) toFieldItemString() string { + return fmt.Sprintf("array item: %v containing items of type %v", ai.StructType.toTypString(), ai.ItemType.toTypString()) +} + +func (ai *ArrayItem) getFieldName() string { + panic("Should not be called!") +} + +func (afi *ArrayFieldItem) getFieldName() string { + return afi.FieldName +} + +func (ai *ArrayItem) asSwitchCase() string { + return fmt.Sprintf(` replacer := %s(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + }`, ai.typeName()) +} + +func (afi *ArrayFieldItem) asSwitchCase() string { + return fmt.Sprintf(` replacer%s := %s(0) + replacer%sB := &replacer%s + for _, item := range n.%s { + a.apply(node, item, replacer%sB.replace) + replacer%sB.inc() + }`, afi.FieldName, afi.typeName(), afi.FieldName, afi.FieldName, afi.FieldName, afi.FieldName, afi.FieldName) +} + +func (ai *ArrayItem) typeName() string { + return "replace" + ai.StructType.rawTypeName() + "Items" +} + +func (afi *ArrayFieldItem) typeName() string { + return "replace" + afi.StructType.rawTypeName() + afi.FieldName +} +func (v *VisitorPlan) Len() int { + return len(v.Switches) +} + +func (v *VisitorPlan) Less(i, j int) bool { + return v.Switches[i].Type.rawTypeName() < v.Switches[j].Type.rawTypeName() +} + +func (v *VisitorPlan) Swap(i, j int) { + temp := v.Switches[i] + v.Switches[i] = v.Switches[j] + v.Switches[j] = temp +} +func (s *SwitchCase) Len() int { + return len(s.Fields) +} + +func (s *SwitchCase) Less(i, j int) bool { + return s.Fields[i].getFieldName() < s.Fields[j].getFieldName() +} + +func (s *SwitchCase) Swap(i, j int) { + temp := s.Fields[i] + s.Fields[i] = s.Fields[j] + s.Fields[j] = temp +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/struct_producer_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/struct_producer_test.go new file mode 100644 index 00000000..065b532a --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/struct_producer_test.go @@ -0,0 +1,423 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package visitorgen + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEmptyStructVisitor(t *testing.T) { + /* + type Node interface{} + type Struct struct {} + func (*Struct) iNode() {} + */ + + input := &SourceInformation{ + interestingTypes: map[string]Type{ + "*Struct": &Ref{&TypeString{"Struct"}}, + }, + interfaces: map[string]bool{ + "Node": true, + }, + structs: map[string]*StructDeclaration{ + "Struct": {name: "Struct", fields: []*Field{}}, + }, + typeAliases: map[string]*TypeAlias{}, + } + + result := ToVisitorPlan(input) + + expected := &VisitorPlan{ + Switches: []*SwitchCase{{ + Type: &Ref{&TypeString{"Struct"}}, + Fields: []VisitorItem{}, + }}, + } + + assert.Equal(t, expected.String(), result.String()) +} + +func TestStructWithSqlNodeField(t *testing.T) { + /* + type Node interface{} + type Struct struct { + Field Node + } + func (*Struct) iNode() {} + */ + input := &SourceInformation{ + interestingTypes: map[string]Type{ + "*Struct": &Ref{&TypeString{"Struct"}}, + }, + interfaces: map[string]bool{ + "Node": true, + }, + structs: map[string]*StructDeclaration{ + "Struct": {name: "Struct", fields: []*Field{ + {name: "Field", typ: &TypeString{"Node"}}, + }}, + }, + typeAliases: map[string]*TypeAlias{}, + } + + result := ToVisitorPlan(input) + + expected := &VisitorPlan{ + Switches: []*SwitchCase{{ + Type: &Ref{&TypeString{"Struct"}}, + Fields: []VisitorItem{&SingleFieldItem{ + StructType: &Ref{&TypeString{"Struct"}}, + FieldType: &TypeString{"Node"}, + FieldName: "Field", + }}, + }}, + } + + assert.Equal(t, expected.String(), result.String()) +} + +func TestStructWithStringField2(t *testing.T) { + /* + type Node interface{} + type Struct struct { + Field Node + } + func (*Struct) iNode() {} + */ + + input := &SourceInformation{ + interestingTypes: map[string]Type{ + "*Struct": &Ref{&TypeString{"Struct"}}, + }, + interfaces: map[string]bool{ + "Node": true, + }, + structs: map[string]*StructDeclaration{ + "Struct": {name: "Struct", fields: []*Field{ + {name: "Field", typ: &TypeString{"string"}}, + }}, + }, + typeAliases: map[string]*TypeAlias{}, + } + + result := ToVisitorPlan(input) + + expected := &VisitorPlan{ + Switches: []*SwitchCase{{ + Type: &Ref{&TypeString{"Struct"}}, + Fields: []VisitorItem{}, + }}, + } + + assert.Equal(t, expected.String(), result.String()) +} + +func TestArrayAsSqlNode(t *testing.T) { + /* + type NodeInterface interface { + iNode() + } + + func (*NodeArray) iNode{} + + type NodeArray []NodeInterface + */ + + input := &SourceInformation{ + interfaces: map[string]bool{"NodeInterface": true}, + interestingTypes: map[string]Type{ + "*NodeArray": &Ref{&TypeString{"NodeArray"}}}, + structs: map[string]*StructDeclaration{}, + typeAliases: map[string]*TypeAlias{ + "NodeArray": { + name: "NodeArray", + typ: &Array{&TypeString{"NodeInterface"}}, + }, + }, + } + + result := ToVisitorPlan(input) + + expected := &VisitorPlan{ + Switches: []*SwitchCase{{ + Type: &Ref{&TypeString{"NodeArray"}}, + Fields: []VisitorItem{&ArrayItem{ + StructType: &Ref{&TypeString{"NodeArray"}}, + ItemType: &TypeString{"NodeInterface"}, + }}, + }}, + } + + assert.Equal(t, expected.String(), result.String()) +} + +func TestStructWithStructField(t *testing.T) { + /* + type Node interface{} + type Struct struct { + Field *Struct + } + func (*Struct) iNode() {} + */ + + input := &SourceInformation{ + interestingTypes: map[string]Type{ + "*Struct": &Ref{&TypeString{"Struct"}}}, + structs: map[string]*StructDeclaration{ + "Struct": {name: "Struct", fields: []*Field{ + {name: "Field", typ: &Ref{&TypeString{"Struct"}}}, + }}, + }, + typeAliases: map[string]*TypeAlias{}, + } + + result := ToVisitorPlan(input) + + expected := &VisitorPlan{ + Switches: []*SwitchCase{{ + Type: &Ref{&TypeString{"Struct"}}, + Fields: []VisitorItem{&SingleFieldItem{ + StructType: &Ref{&TypeString{"Struct"}}, + FieldType: &Ref{&TypeString{"Struct"}}, + FieldName: "Field", + }}, + }}, + } + + assert.Equal(t, expected.String(), result.String()) +} + +func TestStructWithArrayOfNodes(t *testing.T) { + /* + type NodeInterface interface {} + type Struct struct { + Items []NodeInterface + } + + func (*Struct) iNode{} + */ + + input := &SourceInformation{ + interfaces: map[string]bool{ + "NodeInterface": true, + }, + interestingTypes: map[string]Type{ + "*Struct": &Ref{&TypeString{"Struct"}}}, + structs: map[string]*StructDeclaration{ + "Struct": {name: "Struct", fields: []*Field{ + {name: "Items", typ: &Array{&TypeString{"NodeInterface"}}}, + }}, + }, + typeAliases: map[string]*TypeAlias{}, + } + + result := ToVisitorPlan(input) + + expected := &VisitorPlan{ + Switches: []*SwitchCase{{ + Type: &Ref{&TypeString{"Struct"}}, + Fields: []VisitorItem{&ArrayFieldItem{ + StructType: &Ref{&TypeString{"Struct"}}, + ItemType: &TypeString{"NodeInterface"}, + FieldName: "Items", + }}, + }}, + } + + assert.Equal(t, expected.String(), result.String()) +} + +func TestStructWithArrayOfStrings(t *testing.T) { + /* + type NodeInterface interface {} + type Struct struct { + Items []string + } + + func (*Struct) iNode{} + */ + + input := &SourceInformation{ + interfaces: map[string]bool{ + "NodeInterface": true, + }, + interestingTypes: map[string]Type{ + "*Struct": &Ref{&TypeString{"Struct"}}}, + structs: map[string]*StructDeclaration{ + "Struct": {name: "Struct", fields: []*Field{ + {name: "Items", typ: &Array{&TypeString{"string"}}}, + }}, + }, + typeAliases: map[string]*TypeAlias{}, + } + + result := ToVisitorPlan(input) + + expected := &VisitorPlan{ + Switches: []*SwitchCase{{ + Type: &Ref{&TypeString{"Struct"}}, + Fields: []VisitorItem{}, + }}, + } + + assert.Equal(t, expected.String(), result.String()) +} + +func TestArrayOfStringsThatImplementSQLNode(t *testing.T) { + /* + type NodeInterface interface {} + type Struct []string + func (Struct) iNode{} + */ + + input := &SourceInformation{ + interfaces: map[string]bool{"NodeInterface": true}, + interestingTypes: map[string]Type{"Struct": &Ref{&TypeString{"Struct"}}}, + structs: map[string]*StructDeclaration{}, + typeAliases: map[string]*TypeAlias{ + "Struct": { + name: "Struct", + typ: &Array{&TypeString{"string"}}, + }, + }, + } + + result := ToVisitorPlan(input) + + expected := &VisitorPlan{ + Switches: []*SwitchCase{{ + Type: &Ref{&TypeString{"Struct"}}, + Fields: []VisitorItem{}, + }}, + } + + assert.Equal(t, expected.String(), result.String()) +} + +func TestSortingOfOutputs(t *testing.T) { + /* + type NodeInterface interface {} + type AStruct struct { + AField NodeInterface + BField NodeInterface + } + type BStruct struct { + CField NodeInterface + } + func (*AStruct) iNode{} + func (*BStruct) iNode{} + */ + + input := &SourceInformation{ + interfaces: map[string]bool{"NodeInterface": true}, + interestingTypes: map[string]Type{ + "AStruct": &Ref{&TypeString{"AStruct"}}, + "BStruct": &Ref{&TypeString{"BStruct"}}, + }, + structs: map[string]*StructDeclaration{ + "AStruct": {name: "AStruct", fields: []*Field{ + {name: "BField", typ: &TypeString{"NodeInterface"}}, + {name: "AField", typ: &TypeString{"NodeInterface"}}, + }}, + "BStruct": {name: "BStruct", fields: []*Field{ + {name: "CField", typ: &TypeString{"NodeInterface"}}, + }}, + }, + typeAliases: map[string]*TypeAlias{}, + } + + result := ToVisitorPlan(input) + + expected := &VisitorPlan{ + Switches: []*SwitchCase{ + {Type: &Ref{&TypeString{"AStruct"}}, + Fields: []VisitorItem{ + &SingleFieldItem{ + StructType: &Ref{&TypeString{"AStruct"}}, + FieldType: &TypeString{"NodeInterface"}, + FieldName: "AField", + }, + &SingleFieldItem{ + StructType: &Ref{&TypeString{"AStruct"}}, + FieldType: &TypeString{"NodeInterface"}, + FieldName: "BField", + }}}, + {Type: &Ref{&TypeString{"BStruct"}}, + Fields: []VisitorItem{ + &SingleFieldItem{ + StructType: &Ref{&TypeString{"BStruct"}}, + FieldType: &TypeString{"NodeInterface"}, + FieldName: "CField", + }}}}, + } + assert.Equal(t, expected.String(), result.String()) +} + +func TestAliasOfAlias(t *testing.T) { + /* + type NodeInterface interface { + iNode() + } + + type NodeArray []NodeInterface + type AliasOfAlias NodeArray + + func (NodeArray) iNode{} + func (AliasOfAlias) iNode{} + */ + + input := &SourceInformation{ + interfaces: map[string]bool{"NodeInterface": true}, + interestingTypes: map[string]Type{ + "NodeArray": &TypeString{"NodeArray"}, + "AliasOfAlias": &TypeString{"AliasOfAlias"}, + }, + structs: map[string]*StructDeclaration{}, + typeAliases: map[string]*TypeAlias{ + "NodeArray": { + name: "NodeArray", + typ: &Array{&TypeString{"NodeInterface"}}, + }, + "AliasOfAlias": { + name: "NodeArray", + typ: &TypeString{"NodeArray"}, + }, + }, + } + + result := ToVisitorPlan(input) + + expected := &VisitorPlan{ + Switches: []*SwitchCase{ + {Type: &TypeString{"AliasOfAlias"}, + Fields: []VisitorItem{&ArrayItem{ + StructType: &TypeString{"AliasOfAlias"}, + ItemType: &TypeString{"NodeInterface"}, + }}, + }, + {Type: &TypeString{"NodeArray"}, + Fields: []VisitorItem{&ArrayItem{ + StructType: &TypeString{"NodeArray"}, + ItemType: &TypeString{"NodeInterface"}, + }}, + }}, + } + assert.Equal(t, expected.String(), result.String()) +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/transformer.go b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/transformer.go new file mode 100644 index 00000000..98129be8 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/transformer.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package visitorgen + +import "fmt" + +// Transform takes an input file and collects the information into an easier to consume format +func Transform(input *SourceFile) *SourceInformation { + interestingTypes := make(map[string]Type) + interfaces := make(map[string]bool) + structs := make(map[string]*StructDeclaration) + typeAliases := make(map[string]*TypeAlias) + + for _, l := range input.lines { + switch line := l.(type) { + case *FuncDeclaration: + interestingTypes[line.receiver.typ.toTypString()] = line.receiver.typ + case *StructDeclaration: + structs[line.name] = line + case *TypeAlias: + typeAliases[line.name] = line + case *InterfaceDeclaration: + interfaces[line.name] = true + } + } + + return &SourceInformation{ + interfaces: interfaces, + interestingTypes: interestingTypes, + structs: structs, + typeAliases: typeAliases, + } +} + +// SourceInformation contains the information from the ast.go file, but in a format that is easier to consume +type SourceInformation struct { + interestingTypes map[string]Type + interfaces map[string]bool + structs map[string]*StructDeclaration + typeAliases map[string]*TypeAlias +} + +func (v *SourceInformation) String() string { + var types string + for _, k := range v.interestingTypes { + types += k.toTypString() + "\n" + } + var structs string + for _, k := range v.structs { + structs += k.toSastString() + "\n" + } + var typeAliases string + for _, k := range v.typeAliases { + typeAliases += k.toSastString() + "\n" + } + + return fmt.Sprintf("Types to build visitor for:\n%s\nStructs with fields: \n%s\nTypeAliases with type: \n%s\n", types, structs, typeAliases) +} + +// getItemTypeOfArray will return nil if the given type is not pointing to a array type. +// If it is an array type, the type of it's items will be returned +func (v *SourceInformation) getItemTypeOfArray(typ Type) Type { + alias := v.typeAliases[typ.rawTypeName()] + if alias == nil { + return nil + } + arrTyp, isArray := alias.typ.(*Array) + if !isArray { + return v.getItemTypeOfArray(alias.typ) + } + return arrTyp.inner +} + +func (v *SourceInformation) isSQLNode(typ Type) bool { + _, isInteresting := v.interestingTypes[typ.toTypString()] + if isInteresting { + return true + } + _, isInterface := v.interfaces[typ.toTypString()] + return isInterface +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/transformer_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/transformer_test.go new file mode 100644 index 00000000..4a0849e9 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/transformer_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package visitorgen + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSimplestAst(t *testing.T) { + /* + type NodeInterface interface { + iNode() + } + + type NodeStruct struct {} + + func (*NodeStruct) iNode{} + */ + input := &SourceFile{ + lines: []Sast{ + &InterfaceDeclaration{ + name: "NodeInterface", + block: "// an interface lives here"}, + &StructDeclaration{ + name: "NodeStruct", + fields: []*Field{}}, + &FuncDeclaration{ + receiver: &Field{ + name: "", + typ: &Ref{&TypeString{"NodeStruct"}}, + }, + name: "iNode", + block: "", + arguments: []*Field{}}, + }, + } + + expected := &SourceInformation{ + interestingTypes: map[string]Type{ + "*NodeStruct": &Ref{&TypeString{"NodeStruct"}}}, + structs: map[string]*StructDeclaration{ + "NodeStruct": { + name: "NodeStruct", + fields: []*Field{}}}, + } + + assert.Equal(t, expected.String(), Transform(input).String()) +} + +func TestAstWithArray(t *testing.T) { + /* + type NodeInterface interface { + iNode() + } + + func (*NodeArray) iNode{} + + type NodeArray []NodeInterface + */ + input := &SourceFile{ + lines: []Sast{ + &InterfaceDeclaration{ + name: "NodeInterface"}, + &TypeAlias{ + name: "NodeArray", + typ: &Array{&TypeString{"NodeInterface"}}, + }, + &FuncDeclaration{ + receiver: &Field{ + name: "", + typ: &Ref{&TypeString{"NodeArray"}}, + }, + name: "iNode", + block: "", + arguments: []*Field{}}, + }, + } + + expected := &SourceInformation{ + interestingTypes: map[string]Type{ + "*NodeArray": &Ref{&TypeString{"NodeArray"}}}, + structs: map[string]*StructDeclaration{}, + typeAliases: map[string]*TypeAlias{ + "NodeArray": { + name: "NodeArray", + typ: &Array{&TypeString{"NodeInterface"}}, + }, + }, + } + + result := Transform(input) + + assert.Equal(t, expected.String(), result.String()) +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/visitor_emitter.go b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/visitor_emitter.go new file mode 100644 index 00000000..889c05fe --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/visitor_emitter.go @@ -0,0 +1,76 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package visitorgen + +import ( + "fmt" + "strings" +) + +// EmitReplacementMethods is an anti-parser (a.k.a prettifier) - it takes a struct that is much like an AST, +// and produces a string from it. This method will produce the replacement methods that make it possible to +// replace objects in fields or in slices. +func EmitReplacementMethods(vd *VisitorPlan) string { + var sb builder + for _, s := range vd.Switches { + for _, k := range s.Fields { + sb.appendF(k.asReplMethod()) + sb.newLine() + } + } + + return sb.String() +} + +// EmitTypeSwitches is an anti-parser (a.k.a prettifier) - it takes a struct that is much like an AST, +// and produces a string from it. This method will produce the switch cases needed to cover the Vitess AST. +func EmitTypeSwitches(vd *VisitorPlan) string { + var sb builder + for _, s := range vd.Switches { + sb.newLine() + sb.appendF(" case %s:", s.Type.toTypString()) + for _, k := range s.Fields { + sb.appendF(k.asSwitchCase()) + } + } + + return sb.String() +} + +func (b *builder) String() string { + return strings.TrimSpace(b.sb.String()) +} + +type builder struct { + sb strings.Builder +} + +func (b *builder) appendF(format string, data ...interface{}) *builder { + _, err := b.sb.WriteString(fmt.Sprintf(format, data...)) + if err != nil { + panic(err) + } + b.newLine() + return b +} + +func (b *builder) newLine() { + _, err := b.sb.WriteString("\n") + if err != nil { + panic(err) + } +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/visitor_emitter_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/visitor_emitter_test.go new file mode 100644 index 00000000..94666daa --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/visitor_emitter_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package visitorgen + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSingleItem(t *testing.T) { + sfi := SingleFieldItem{ + StructType: &Ref{&TypeString{"Struct"}}, + FieldType: &TypeString{"string"}, + FieldName: "Field", + } + + expectedReplacer := `func replaceStructField(newNode, parent SQLNode) { + parent.(*Struct).Field = newNode.(string) +}` + + expectedSwitch := ` a.apply(node, n.Field, replaceStructField)` + require.Equal(t, expectedReplacer, sfi.asReplMethod()) + require.Equal(t, expectedSwitch, sfi.asSwitchCase()) +} + +func TestArrayFieldItem(t *testing.T) { + sfi := ArrayFieldItem{ + StructType: &Ref{&TypeString{"Struct"}}, + ItemType: &TypeString{"string"}, + FieldName: "Field", + } + + expectedReplacer := `type replaceStructField int + +func (r *replaceStructField) replace(newNode, container SQLNode) { + container.(*Struct).Field[int(*r)] = newNode.(string) +} + +func (r *replaceStructField) inc() { + *r++ +}` + + expectedSwitch := ` replacerField := replaceStructField(0) + replacerFieldB := &replacerField + for _, item := range n.Field { + a.apply(node, item, replacerFieldB.replace) + replacerFieldB.inc() + }` + require.Equal(t, expectedReplacer, sfi.asReplMethod()) + require.Equal(t, expectedSwitch, sfi.asSwitchCase()) +} + +func TestArrayItem(t *testing.T) { + sfi := ArrayItem{ + StructType: &Ref{&TypeString{"Struct"}}, + ItemType: &TypeString{"string"}, + } + + expectedReplacer := `type replaceStructItems int + +func (r *replaceStructItems) replace(newNode, container SQLNode) { + container.(*Struct)[int(*r)] = newNode.(string) +} + +func (r *replaceStructItems) inc() { + *r++ +}` + + expectedSwitch := ` replacer := replaceStructItems(0) + replacerRef := &replacer + for _, item := range n { + a.apply(node, item, replacerRef.replace) + replacerRef.inc() + }` + require.Equal(t, expectedReplacer, sfi.asReplMethod()) + require.Equal(t, expectedSwitch, sfi.asSwitchCase()) +} diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/visitorgen.go b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/visitorgen.go new file mode 100644 index 00000000..284f8c4d --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/visitorgen/visitorgen.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//Package visitorgen is responsible for taking the ast.go of Vitess and +//and producing visitor infrastructure for it. +// +//This is accomplished in a few steps. +//Step 1: Walk the AST and collect the interesting information into a format that is +// easy to consume for the next step. The output format is a *SourceFile, that +// contains the needed information in a format that is pretty close to the golang ast, +// but simplified +//Step 2: A SourceFile is packaged into a SourceInformation. SourceInformation is still +// concerned with the input ast - it's just an even more distilled and easy to +// consume format for the last step. This step is performed by the code in transformer.go. +//Step 3: Using the SourceInformation, the struct_producer.go code produces the final data structure +// used, a VisitorPlan. This is focused on the output - it contains a list of all fields or +// arrays that need to be handled by the visitor produced. +//Step 4: The VisitorPlan is lastly turned into a string that is written as the output of +// this whole process. +package visitorgen diff --git a/internal/stackql-parser-fork/go/vt/sqlparser/window_test.go b/internal/stackql-parser-fork/go/vt/sqlparser/window_test.go new file mode 100644 index 00000000..4a511120 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/sqlparser/window_test.go @@ -0,0 +1,66 @@ +package sqlparser + +import ( + "testing" +) + +func TestWindowFunctions(t *testing.T) { + tests := []struct { + name string + sql string + valid bool + }{ + { + name: "simple window function", + sql: "SELECT SUM(count) OVER () FROM t", + valid: true, + }, + { + name: "window function with ORDER BY", + sql: "SELECT RANK() OVER (ORDER BY count DESC) FROM t", + valid: true, + }, + { + name: "window function with PARTITION BY", + sql: "SELECT SUM(count) OVER (PARTITION BY category) FROM t", + valid: true, + }, + { + name: "window function with PARTITION BY and ORDER BY", + sql: "SELECT SUM(count) OVER (PARTITION BY category ORDER BY name) FROM t", + valid: true, + }, + { + name: "window function with frame", + sql: "SELECT SUM(count) OVER (ORDER BY id ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) FROM t", + valid: true, + }, + { + name: "complex window function query", + sql: "SELECT serviceName, COUNT(*) as service_count, SUM(COUNT(*)) OVER () as total_count FROM t GROUP BY serviceName", + valid: true, + }, + { + name: "ROW_NUMBER window function", + sql: "SELECT ROW_NUMBER() OVER (ORDER BY id) as rn FROM t", + valid: true, + }, + { + name: "multiple window functions", + sql: "SELECT SUM(x) OVER (), COUNT(*) OVER (ORDER BY y) FROM t", + valid: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + _, err := Parse(tc.sql) + if tc.valid && err != nil { + t.Errorf("expected valid SQL but got error: %v", err) + } + if !tc.valid && err == nil { + t.Errorf("expected invalid SQL but got success") + } + }) + } +} diff --git a/internal/stackql-parser-fork/go/vt/vterrors/LICENSE b/internal/stackql-parser-fork/go/vt/vterrors/LICENSE new file mode 100644 index 00000000..835ba3e7 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vterrors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/stackql-parser-fork/go/vt/vterrors/aggregate.go b/internal/stackql-parser-fork/go/vt/vterrors/aggregate.go new file mode 100644 index 00000000..70e8b291 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vterrors/aggregate.go @@ -0,0 +1,106 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vterrors + +import ( + "sort" + "strings" + + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" +) + +// A list of all vtrpcpb.Code, ordered by priority. These priorities are +// used when aggregating multiple errors in VtGate. +// Higher priority error codes are more urgent for users to see. They are +// prioritized based on the following question: assuming a scatter query produced multiple +// errors, which of the errors is the most likely to give the user useful information +// about why the query failed and how they should proceed? +const ( + // Informational errors. + PriorityOK = iota + PriorityCanceled + PriorityAlreadyExists + PriorityOutOfRange + // Potentially retryable errors. + PriorityUnavailable + PriorityDeadlineExceeded + PriorityAborted + PriorityFailedPrecondition + // Permanent errors. + PriorityResourceExhausted + PriorityUnknown + PriorityUnauthenticated + PriorityPermissionDenied + PriorityInvalidArgument + PriorityNotFound + PriorityUnimplemented + // Serious errors. + PriorityInternal + PriorityDataLoss +) + +var errorPriorities = map[vtrpcpb.Code]int{ + vtrpcpb.Code_OK: PriorityOK, + vtrpcpb.Code_CANCELED: PriorityCanceled, + vtrpcpb.Code_UNKNOWN: PriorityUnknown, + vtrpcpb.Code_INVALID_ARGUMENT: PriorityInvalidArgument, + vtrpcpb.Code_DEADLINE_EXCEEDED: PriorityDeadlineExceeded, + vtrpcpb.Code_NOT_FOUND: PriorityNotFound, + vtrpcpb.Code_ALREADY_EXISTS: PriorityAlreadyExists, + vtrpcpb.Code_PERMISSION_DENIED: PriorityPermissionDenied, + vtrpcpb.Code_UNAUTHENTICATED: PriorityUnauthenticated, + vtrpcpb.Code_RESOURCE_EXHAUSTED: PriorityResourceExhausted, + vtrpcpb.Code_FAILED_PRECONDITION: PriorityFailedPrecondition, + vtrpcpb.Code_ABORTED: PriorityAborted, + vtrpcpb.Code_OUT_OF_RANGE: PriorityOutOfRange, + vtrpcpb.Code_UNIMPLEMENTED: PriorityUnimplemented, + vtrpcpb.Code_INTERNAL: PriorityInternal, + vtrpcpb.Code_UNAVAILABLE: PriorityUnavailable, + vtrpcpb.Code_DATA_LOSS: PriorityDataLoss, +} + +// Aggregate aggregates several errors into a single one. +// The resulting error code will be the one with the highest +// priority as defined by the priority constants in this package. +func Aggregate(errors []error) error { + if len(errors) == 0 { + return nil + } + return New(aggregateCodes(errors), aggregateErrors(errors)) +} + +func aggregateCodes(errors []error) vtrpcpb.Code { + highCode := vtrpcpb.Code_OK + for _, e := range errors { + code := Code(e) + if errorPriorities[code] > errorPriorities[highCode] { + highCode = code + } + } + return highCode +} + +// ConcatenateErrors aggregates an array of errors into a single error by string concatenation. +func aggregateErrors(errs []error) string { + errStrs := make([]string, 0, len(errs)) + for _, e := range errs { + errStrs = append(errStrs, e.Error()) + } + // sort the error strings so we always have deterministic ordering + sort.Strings(errStrs) + return strings.Join(errStrs, "\n") +} diff --git a/internal/stackql-parser-fork/go/vt/vterrors/aggregate_test.go b/internal/stackql-parser-fork/go/vt/vterrors/aggregate_test.go new file mode 100644 index 00000000..5852fb44 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vterrors/aggregate_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vterrors + +import ( + "errors" + "fmt" + "testing" + + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" +) + +var errGeneric = "generic error" + +func errFromCode(c vtrpcpb.Code) error { + return New(c, errGeneric) +} + +func TestAggregateVtGateErrorCodes(t *testing.T) { + var testcases = []struct { + input []error + expected vtrpcpb.Code + }{ + { + // aggregation of no errors is a success code + input: nil, + expected: vtrpcpb.Code_OK, + }, + { + // single error code gets returned directly + input: []error{errFromCode(vtrpcpb.Code_INVALID_ARGUMENT)}, + expected: vtrpcpb.Code_INVALID_ARGUMENT, + }, + { + // aggregate two codes to the highest priority + input: []error{ + errFromCode(vtrpcpb.Code_UNAVAILABLE), + errFromCode(vtrpcpb.Code_INVALID_ARGUMENT), + }, + expected: vtrpcpb.Code_INVALID_ARGUMENT, + }, + { + // unknown errors map to the unknown code + input: []error{ + fmt.Errorf("unknown error"), + }, + expected: vtrpcpb.Code_UNKNOWN, + }, + } + for _, tc := range testcases { + out := aggregateCodes(tc.input) + if out != tc.expected { + t.Errorf("AggregateVtGateErrorCodes(%v) = %v \nwant: %v", + tc.input, out, tc.expected) + } + } +} + +func TestAggregateVtGateErrors(t *testing.T) { + var testcases = []struct { + input []error + expected error + }{ + { + input: nil, + expected: nil, + }, + { + input: []error{ + errFromCode(vtrpcpb.Code_UNAVAILABLE), + errFromCode(vtrpcpb.Code_INVALID_ARGUMENT), + }, + expected: New( + vtrpcpb.Code_INVALID_ARGUMENT, + aggregateErrors([]error{ + errors.New(errGeneric), + errors.New(errGeneric), + }), + ), + }, + } + for _, tc := range testcases { + out := Aggregate(tc.input) + if !Equals(out, tc.expected) { + t.Errorf("AggregateVtGateErrors(%+v) = %+v \nwant: %+v", + tc.input, out, tc.expected) + } + } +} diff --git a/internal/stackql-parser-fork/go/vt/vterrors/errors_test.go b/internal/stackql-parser-fork/go/vt/vterrors/errors_test.go new file mode 100644 index 00000000..2c104609 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vterrors/errors_test.go @@ -0,0 +1,307 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vterrors + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "testing" + + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" + + "golang.org/x/net/context" +) + +func TestWrapNil(t *testing.T) { + got := Wrap(nil, "no error") + if got != nil { + t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWrap(t *testing.T) { + tests := []struct { + err error + message string + wantMessage string + wantCode vtrpcpb.Code + }{ + {io.EOF, "read error", "read error: EOF", vtrpcpb.Code_UNKNOWN}, + {New(vtrpcpb.Code_ALREADY_EXISTS, "oops"), "client error", "client error: oops", vtrpcpb.Code_ALREADY_EXISTS}, + } + + for _, tt := range tests { + got := Wrap(tt.err, tt.message) + if got.Error() != tt.wantMessage { + t.Errorf("Wrap(%v, %q): got: [%v], want [%v]", tt.err, tt.message, got, tt.wantMessage) + } + if Code(got) != tt.wantCode { + t.Errorf("Wrap(%v, %v): got: [%v], want [%v]", tt.err, tt, Code(got), tt.wantCode) + } + } +} + +type nilError struct{} + +func (nilError) Error() string { return "nil error" } + +func TestRootCause(t *testing.T) { + x := New(vtrpcpb.Code_FAILED_PRECONDITION, "error") + tests := []struct { + err error + want error + }{{ + // nil error is nil + err: nil, + want: nil, + }, { + // explicit nil error is nil + err: (error)(nil), + want: nil, + }, { + // typed nil is nil + err: (*nilError)(nil), + want: (*nilError)(nil), + }, { + // uncaused error is unaffected + err: io.EOF, + want: io.EOF, + }, { + // caused error returns cause + err: Wrap(io.EOF, "ignored"), + want: io.EOF, + }, { + err: x, // return from errors.New + want: x, + }} + + for i, tt := range tests { + got := RootCause(tt.err) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want) + } + } +} + +func TestCause(t *testing.T) { + x := New(vtrpcpb.Code_FAILED_PRECONDITION, "error") + tests := []struct { + err error + want error + }{{ + // nil error is nil + err: nil, + want: nil, + }, { + // uncaused error is nil + err: io.EOF, + want: nil, + }, { + // caused error returns cause + err: Wrap(io.EOF, "ignored"), + want: io.EOF, + }, { + err: x, // return from errors.New + want: nil, + }} + + for i, tt := range tests { + got := Cause(tt.err) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want) + } + } +} + +func TestWrapfNil(t *testing.T) { + got := Wrapf(nil, "no error") + if got != nil { + t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWrapf(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"}, + {Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"}, + } + + for _, tt := range tests { + got := Wrapf(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) + } + } +} + +func TestErrorf(t *testing.T) { + tests := []struct { + err error + want string + }{ + {Errorf(vtrpcpb.Code_DATA_LOSS, "read error without format specifiers"), "read error without format specifiers"}, + {Errorf(vtrpcpb.Code_DATA_LOSS, "read error with %d format specifier", 1), "read error with 1 format specifier"}, + } + + for _, tt := range tests { + got := tt.err.Error() + if got != tt.want { + t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want) + } + } +} + +func innerMost() error { + return Wrap(io.ErrNoProgress, "oh noes") +} + +func middle() error { + return innerMost() +} + +func outer() error { + return middle() +} + +func TestStackFormat(t *testing.T) { + err := outer() + got := fmt.Sprintf("%v", err) + + assertContains(t, got, "innerMost", false) + assertContains(t, got, "middle", false) + assertContains(t, got, "outer", false) + + LogErrStacks = true + defer func() { LogErrStacks = false }() + got = fmt.Sprintf("%v", err) + assertContains(t, got, "innerMost", true) + assertContains(t, got, "middle", true) + assertContains(t, got, "outer", true) +} + +// errors.New, etc values are not expected to be compared by value +// but the change in errors#27 made them incomparable. Assert that +// various kinds of errors have a functional equality operator, even +// if the result of that equality is always false. +func TestErrorEquality(t *testing.T) { + vals := []error{ + nil, + io.EOF, + errors.New("EOF"), + New(vtrpcpb.Code_ALREADY_EXISTS, "EOF"), + Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "EOF"), + Wrap(io.EOF, "EOF"), + Wrapf(io.EOF, "EOF%d", 2), + } + + for i := range vals { + for j := range vals { + _ = vals[i] == vals[j] // mustn't panic + } + } +} + +func TestCreation(t *testing.T) { + testcases := []struct { + in, want vtrpcpb.Code + }{{ + in: vtrpcpb.Code_CANCELED, + want: vtrpcpb.Code_CANCELED, + }, { + in: vtrpcpb.Code_UNKNOWN, + want: vtrpcpb.Code_UNKNOWN, + }} + for _, tcase := range testcases { + if got := Code(New(tcase.in, "")); got != tcase.want { + t.Errorf("Code(New(%v)): %v, want %v", tcase.in, got, tcase.want) + } + if got := Code(Errorf(tcase.in, "")); got != tcase.want { + t.Errorf("Code(Errorf(%v)): %v, want %v", tcase.in, got, tcase.want) + } + } +} + +func TestCode(t *testing.T) { + testcases := []struct { + in error + want vtrpcpb.Code + }{{ + in: nil, + want: vtrpcpb.Code_OK, + }, { + in: errors.New("generic"), + want: vtrpcpb.Code_UNKNOWN, + }, { + in: New(vtrpcpb.Code_CANCELED, "generic"), + want: vtrpcpb.Code_CANCELED, + }, { + in: context.Canceled, + want: vtrpcpb.Code_CANCELED, + }, { + in: context.DeadlineExceeded, + want: vtrpcpb.Code_DEADLINE_EXCEEDED, + }} + for _, tcase := range testcases { + if got := Code(tcase.in); got != tcase.want { + t.Errorf("Code(%v): %v, want %v", tcase.in, got, tcase.want) + } + } +} + +func TestWrapping(t *testing.T) { + err1 := Errorf(vtrpcpb.Code_UNAVAILABLE, "foo") + err2 := Wrapf(err1, "bar") + err3 := Wrapf(err2, "baz") + errorWithoutStack := fmt.Sprintf("%v", err3) + + LogErrStacks = true + errorWithStack := fmt.Sprintf("%v", err3) + LogErrStacks = false + + assertEquals(t, err3.Error(), "baz: bar: foo") + assertContains(t, errorWithoutStack, "foo", true) + assertContains(t, errorWithoutStack, "bar", true) + assertContains(t, errorWithoutStack, "baz", true) + assertContains(t, errorWithoutStack, "TestWrapping", false) + + assertContains(t, errorWithStack, "foo", true) + assertContains(t, errorWithStack, "bar", true) + assertContains(t, errorWithStack, "baz", true) + assertContains(t, errorWithStack, "TestWrapping", true) + +} + +func assertContains(t *testing.T, s, substring string, contains bool) { + t.Helper() + if doesContain := strings.Contains(s, substring); doesContain != contains { + t.Errorf("string `%v` contains `%v`: %v, want %v", s, substring, doesContain, contains) + } +} + +func assertEquals(t *testing.T, a, b interface{}) { + if a != b { + t.Fatalf("expected [%s] to be equal to [%s]", a, b) + } +} diff --git a/internal/stackql-parser-fork/go/vt/vterrors/grpc.go b/internal/stackql-parser-fork/go/vt/vterrors/grpc.go new file mode 100644 index 00000000..64bff794 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vterrors/grpc.go @@ -0,0 +1,143 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vterrors + +import ( + "fmt" + "io" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" +) + +// This file contains functions to convert errors to and from gRPC codes. +// Use these methods to return an error through gRPC and still +// retain its code. + +// CodeToLegacyErrorCode maps a vtrpcpb.Code to a vtrpcpb.LegacyErrorCode. +func CodeToLegacyErrorCode(code vtrpcpb.Code) vtrpcpb.LegacyErrorCode { + switch code { + case vtrpcpb.Code_OK: + return vtrpcpb.LegacyErrorCode_SUCCESS_LEGACY + case vtrpcpb.Code_CANCELED: + return vtrpcpb.LegacyErrorCode_CANCELLED_LEGACY + case vtrpcpb.Code_UNKNOWN: + return vtrpcpb.LegacyErrorCode_UNKNOWN_ERROR_LEGACY + case vtrpcpb.Code_INVALID_ARGUMENT: + return vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY + case vtrpcpb.Code_DEADLINE_EXCEEDED: + return vtrpcpb.LegacyErrorCode_DEADLINE_EXCEEDED_LEGACY + case vtrpcpb.Code_ALREADY_EXISTS: + return vtrpcpb.LegacyErrorCode_INTEGRITY_ERROR_LEGACY + case vtrpcpb.Code_PERMISSION_DENIED: + return vtrpcpb.LegacyErrorCode_PERMISSION_DENIED_LEGACY + case vtrpcpb.Code_RESOURCE_EXHAUSTED: + return vtrpcpb.LegacyErrorCode_RESOURCE_EXHAUSTED_LEGACY + case vtrpcpb.Code_FAILED_PRECONDITION: + return vtrpcpb.LegacyErrorCode_QUERY_NOT_SERVED_LEGACY + case vtrpcpb.Code_ABORTED: + return vtrpcpb.LegacyErrorCode_NOT_IN_TX_LEGACY + case vtrpcpb.Code_INTERNAL: + return vtrpcpb.LegacyErrorCode_INTERNAL_ERROR_LEGACY + case vtrpcpb.Code_UNAVAILABLE: + // Legacy code assumes Unavailable errors are sent as Internal. + return vtrpcpb.LegacyErrorCode_INTERNAL_ERROR_LEGACY + case vtrpcpb.Code_UNAUTHENTICATED: + return vtrpcpb.LegacyErrorCode_UNAUTHENTICATED_LEGACY + default: + return vtrpcpb.LegacyErrorCode_UNKNOWN_ERROR_LEGACY + } +} + +// LegacyErrorCodeToCode maps a vtrpcpb.LegacyErrorCode to a gRPC vtrpcpb.Code. +func LegacyErrorCodeToCode(code vtrpcpb.LegacyErrorCode) vtrpcpb.Code { + switch code { + case vtrpcpb.LegacyErrorCode_SUCCESS_LEGACY: + return vtrpcpb.Code_OK + case vtrpcpb.LegacyErrorCode_CANCELLED_LEGACY: + return vtrpcpb.Code_CANCELED + case vtrpcpb.LegacyErrorCode_UNKNOWN_ERROR_LEGACY: + return vtrpcpb.Code_UNKNOWN + case vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY: + return vtrpcpb.Code_INVALID_ARGUMENT + case vtrpcpb.LegacyErrorCode_DEADLINE_EXCEEDED_LEGACY: + return vtrpcpb.Code_DEADLINE_EXCEEDED + case vtrpcpb.LegacyErrorCode_INTEGRITY_ERROR_LEGACY: + return vtrpcpb.Code_ALREADY_EXISTS + case vtrpcpb.LegacyErrorCode_PERMISSION_DENIED_LEGACY: + return vtrpcpb.Code_PERMISSION_DENIED + case vtrpcpb.LegacyErrorCode_RESOURCE_EXHAUSTED_LEGACY: + return vtrpcpb.Code_RESOURCE_EXHAUSTED + case vtrpcpb.LegacyErrorCode_QUERY_NOT_SERVED_LEGACY: + return vtrpcpb.Code_FAILED_PRECONDITION + case vtrpcpb.LegacyErrorCode_NOT_IN_TX_LEGACY: + return vtrpcpb.Code_ABORTED + case vtrpcpb.LegacyErrorCode_INTERNAL_ERROR_LEGACY: + // Legacy code sends internal error instead of Unavailable. + return vtrpcpb.Code_UNAVAILABLE + case vtrpcpb.LegacyErrorCode_TRANSIENT_ERROR_LEGACY: + return vtrpcpb.Code_UNAVAILABLE + case vtrpcpb.LegacyErrorCode_UNAUTHENTICATED_LEGACY: + return vtrpcpb.Code_UNAUTHENTICATED + default: + return vtrpcpb.Code_UNKNOWN + } +} + +// truncateError shortens errors because gRPC has a size restriction on them. +func truncateError(err error) string { + // For more details see: https://github.com/grpc/grpc-go/issues/443 + // The gRPC spec says "Clients may limit the size of Response-Headers, + // Trailers, and Trailers-Only, with a default of 8 KiB each suggested." + // Therefore, we assume 8 KiB minus some headroom. + GRPCErrorLimit := 8*1024 - 512 + if len(err.Error()) <= GRPCErrorLimit { + return err.Error() + } + truncateInfo := "[...] [remainder of the error is truncated because gRPC has a size limit on errors.]" + truncatedErr := err.Error()[:GRPCErrorLimit] + return fmt.Sprintf("%v %v", truncatedErr, truncateInfo) +} + +// ToGRPC returns an error as a gRPC error, with the appropriate error code. +func ToGRPC(err error) error { + if err == nil { + return nil + } + return status.Errorf(codes.Code(Code(err)), "%v", truncateError(err)) +} + +// FromGRPC returns a gRPC error as a vtError, translating between error codes. +// However, there are a few errors which are not translated and passed as they +// are. For example, io.EOF since our code base checks for this error to find +// out that a stream has finished. +func FromGRPC(err error) error { + if err == nil { + return nil + } + if err == io.EOF { + // Do not wrap io.EOF because we compare against it for finished streams. + return err + } + code := codes.Unknown + if s, ok := status.FromError(err); ok { + code = s.Code() + } + return New(vtrpcpb.Code(code), err.Error()) +} diff --git a/internal/stackql-parser-fork/go/vt/vterrors/proto3.go b/internal/stackql-parser-fork/go/vt/vterrors/proto3.go new file mode 100644 index 00000000..71cd5fe8 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vterrors/proto3.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vterrors + +import ( + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" +) + +// This file contains the necessary methods to send and receive errors +// as payloads of proto3 structures. It converts vtError to and from +// *vtrpcpb.RPCError. Use these methods when a RPC call can return both +// data and an error. + +// FromVTRPC recovers a vtError from a *vtrpcpb.RPCError (which is how vtError +// is transmitted across proto3 RPC boundaries). +func FromVTRPC(rpcErr *vtrpcpb.RPCError) error { + if rpcErr == nil { + return nil + } + code := rpcErr.Code + if code == vtrpcpb.Code_OK { + code = LegacyErrorCodeToCode(rpcErr.LegacyCode) + } + return New(code, rpcErr.Message) +} + +// ToVTRPC converts from vtError to a vtrpcpb.RPCError. +func ToVTRPC(err error) *vtrpcpb.RPCError { + if err == nil { + return nil + } + code := Code(err) + return &vtrpcpb.RPCError{ + LegacyCode: CodeToLegacyErrorCode(code), + Code: code, + Message: err.Error(), + } +} diff --git a/internal/stackql-parser-fork/go/vt/vterrors/proto3_test.go b/internal/stackql-parser-fork/go/vt/vterrors/proto3_test.go new file mode 100644 index 00000000..92cd4f76 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vterrors/proto3_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vterrors + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" +) + +func TestFromVtRPCError(t *testing.T) { + testcases := []struct { + in *vtrpcpb.RPCError + want error + }{{ + in: nil, + want: nil, + }, { + in: &vtrpcpb.RPCError{ + LegacyCode: vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY, + Message: "bad input", + }, + want: New(vtrpcpb.Code_INVALID_ARGUMENT, "bad input"), + }, { + in: &vtrpcpb.RPCError{ + LegacyCode: vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY, + Message: "bad input", + Code: vtrpcpb.Code_INVALID_ARGUMENT, + }, + want: New(vtrpcpb.Code_INVALID_ARGUMENT, "bad input"), + }, { + in: &vtrpcpb.RPCError{ + Message: "bad input", + Code: vtrpcpb.Code_INVALID_ARGUMENT, + }, + want: New(vtrpcpb.Code_INVALID_ARGUMENT, "bad input"), + }} + for _, tcase := range testcases { + got := FromVTRPC(tcase.in) + if !Equals(got, tcase.want) { + t.Errorf("FromVtRPCError(%v): [%v], want [%v]", tcase.in, got, tcase.want) + } + } +} + +func TestVtRPCErrorFromVtError(t *testing.T) { + testcases := []struct { + in error + want *vtrpcpb.RPCError + }{{ + in: nil, + want: nil, + }, { + in: New(vtrpcpb.Code_INVALID_ARGUMENT, "bad input"), + want: &vtrpcpb.RPCError{ + LegacyCode: vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY, + Message: "bad input", + Code: vtrpcpb.Code_INVALID_ARGUMENT, + }, + }} + for _, tcase := range testcases { + got := ToVTRPC(tcase.in) + if !proto.Equal(got, tcase.want) { + t.Errorf("VtRPCErrorFromVtError(%v): %v, want %v", tcase.in, got, tcase.want) + } + } +} diff --git a/internal/stackql-parser-fork/go/vt/vterrors/stack.go b/internal/stackql-parser-fork/go/vt/vterrors/stack.go new file mode 100644 index 00000000..ba926429 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vterrors/stack.go @@ -0,0 +1,163 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vterrors + +/* This file is copied from https://github.com/pkg/errors/blob/v0.8.0/stack.go */ + +import ( + "fmt" + "io" + "path" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +// Format format the stacktrace according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + + if s.Flag('#') { + fmt.Fprintf(s, "%#v", []Frame(st)) + } else { + for _, f := range st { + fmt.Fprintf(s, "\n%v", f) + } + } + + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} diff --git a/internal/stackql-parser-fork/go/vt/vterrors/vterrors.go b/internal/stackql-parser-fork/go/vt/vterrors/vterrors.go new file mode 100644 index 00000000..372a2d9a --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vterrors/vterrors.go @@ -0,0 +1,311 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package vterrors provides simple error handling primitives for Vitess +// +// In all Vitess code, errors should be propagated using vterrors.Wrapf() +// and not fmt.Errorf(). This makes sure that stacktraces are kept and +// propagated correctly. +// +// # New errors should be created using vterrors.New or vterrors.Errorf +// +// Vitess uses canonical error codes for error reporting. This is based +// on years of industry experience with error reporting. This idea is +// that errors should be classified into a small set of errors (10 or so) +// with very specific meaning. Each error has a code, and a message. When +// errors are passed around (even through RPCs), the code is +// propagated. To handle errors, only the code should be looked at (and +// not string-matching on the error message). +// +// Error codes are defined in /proto/vtrpc.proto. Along with an +// RPCError message that can be used to transmit errors through RPCs, in +// the message payloads. These codes match the names and numbers defined +// by gRPC. +// +// A standardized error implementation that allows you to build an error +// with an associated canonical code is also defined. +// While sending an error through gRPC, these codes are transmitted +// using gRPC's error propagation mechanism and decoded back to +// the original code on the other end. +// +// # Retrieving the cause of an error +// +// Using vterrors.Wrap constructs a stack of errors, adding context to the +// preceding error, instead of simply building up a string. +// Depending on the nature of the error it may be necessary to reverse the +// operation of errors.Wrap to retrieve the original error for inspection. +// Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by vterrors.Cause and vterrors.RootCause. +// +// - vterrors.Cause will find the immediate cause if one is available, or nil +// if the error is not a `causer` or if no cause is available. +// +// - vterrors.RootCause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.RootCause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// +// # Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Most but not all of the code in this file was originally copied from +// https://github.com/pkg/errors/blob/v0.8.0/errors.go +package vterrors + +import ( + "flag" + "fmt" + "io" + + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" + + "golang.org/x/net/context" +) + +// LogErrStacks controls whether or not printing errors includes the +// embedded stack trace in the output. +var LogErrStacks bool + +func init() { + flag.BoolVar(&LogErrStacks, "log_err_stacks", false, "log stack traces for errors") +} + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(code vtrpcpb.Code, message string) error { + return &fundamental{ + msg: message, + code: code, + stack: callers(), + } +} + +// NewWithoutCode returns an error when no applicable error code is available +// It will record the stack trace when creating the error +func NewWithoutCode(message string) error { + return &fundamental{ + msg: message, + code: vtrpcpb.Code_UNKNOWN, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(code vtrpcpb.Code, format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + code: code, + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + code vtrpcpb.Code + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + panicIfError(io.WriteString(s, "Code: "+f.code.String()+"\n")) + panicIfError(io.WriteString(s, f.msg+"\n")) + if LogErrStacks { + f.stack.Format(s, verb) + } + return + case 's': + panicIfError(io.WriteString(s, f.msg)) + case 'q': + panicIfError(fmt.Fprintf(s, "%q", f.msg)) + } +} + +// Code returns the error code if it's a vtError. +// If err is nil, it returns ok. +func Code(err error) vtrpcpb.Code { + if err == nil { + return vtrpcpb.Code_OK + } + if err, ok := err.(*fundamental); ok { + return err.code + } + + cause := Cause(err) + if cause != err && cause != nil { + // If we did not find an error code at the outer level, let's find the cause and check it's code + return Code(cause) + } + + // Handle some special cases. + switch err { + case context.Canceled: + return vtrpcpb.Code_CANCELED + case context.DeadlineExceeded: + return vtrpcpb.Code_DEADLINE_EXCEEDED + } + return vtrpcpb.Code_UNKNOWN +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + return &wrapping{ + cause: err, + msg: message, + stack: callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &wrapping{ + cause: err, + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +type wrapping struct { + cause error + msg string + stack *stack +} + +func (w *wrapping) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *wrapping) Cause() error { return w.cause } + +func (w *wrapping) Format(s fmt.State, verb rune) { + if rune('v') == verb { + panicIfError(fmt.Fprintf(s, "%v\n", w.Cause())) + panicIfError(io.WriteString(s, w.msg)) + if LogErrStacks { + w.stack.Format(s, verb) + } + return + } + + if rune('s') == verb || rune('q') == verb { + panicIfError(io.WriteString(s, w.Error())) + } +} + +// since we can't return an error, let's panic if something goes wrong here +func panicIfError(_ int, err error) { + if err != nil { + panic(err) + } +} + +// RootCause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func RootCause(err error) error { + for { + cause := Cause(err) + if cause == nil { + return err + } + err = cause + } +} + +// Cause will return the immediate cause, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, nil will be returned +func Cause(err error) error { + type causer interface { + Cause() error + } + + causerObj, ok := err.(causer) + if !ok { + return nil + } + + return causerObj.Cause() +} + +// Equals returns true iff the error message and the code returned by Code() +// are equal. +func Equals(a, b error) bool { + if a == nil && b == nil { + // Both are nil. + return true + } + + if a == nil || b == nil { + // One of the two is nil, since we know both are not nil. + return false + } + + return a.Error() == b.Error() && Code(a) == Code(b) +} + +// Print is meant to print the vtError object in test failures. +// For comparing two vterrors, use Equals() instead. +func Print(err error) string { + return fmt.Sprintf("%v: %v\n", Code(err), err.Error()) +} diff --git a/internal/stackql-parser-fork/go/vt/vtgate/evalengine/arithmetic.go b/internal/stackql-parser-fork/go/vt/vtgate/evalengine/arithmetic.go new file mode 100644 index 00000000..b3512063 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vtgate/evalengine/arithmetic.go @@ -0,0 +1,820 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "bytes" + "fmt" + + "github.com/stackql/stackql-parser/go/sqltypes" + + "strconv" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" + "github.com/stackql/stackql-parser/go/vt/vterrors" +) + +// numeric represents a numeric value extracted from +// a Value, used for arithmetic operations. +var zeroBytes = []byte("0") + +// Add adds two values together +// if v1 or v2 is null, then it returns null +func Add(v1, v2 sqltypes.Value) (sqltypes.Value, error) { + if v1.IsNull() || v2.IsNull() { + return sqltypes.NULL, nil + } + + lv1, err := newEvalResult(v1) + if err != nil { + return sqltypes.NULL, err + } + + lv2, err := newEvalResult(v2) + if err != nil { + return sqltypes.NULL, err + } + + lresult, err := addNumericWithError(lv1, lv2) + if err != nil { + return sqltypes.NULL, err + } + + return castFromNumeric(lresult, lresult.typ), nil +} + +// Subtract takes two values and subtracts them +func Subtract(v1, v2 sqltypes.Value) (sqltypes.Value, error) { + if v1.IsNull() || v2.IsNull() { + return sqltypes.NULL, nil + } + + lv1, err := newEvalResult(v1) + if err != nil { + return sqltypes.NULL, err + } + + lv2, err := newEvalResult(v2) + if err != nil { + return sqltypes.NULL, err + } + + lresult, err := subtractNumericWithError(lv1, lv2) + if err != nil { + return sqltypes.NULL, err + } + + return castFromNumeric(lresult, lresult.typ), nil +} + +// Multiply takes two values and multiplies it together +func Multiply(v1, v2 sqltypes.Value) (sqltypes.Value, error) { + if v1.IsNull() || v2.IsNull() { + return sqltypes.NULL, nil + } + + lv1, err := newEvalResult(v1) + if err != nil { + return sqltypes.NULL, err + } + lv2, err := newEvalResult(v2) + if err != nil { + return sqltypes.NULL, err + } + lresult, err := multiplyNumericWithError(lv1, lv2) + if err != nil { + return sqltypes.NULL, err + } + + return castFromNumeric(lresult, lresult.typ), nil +} + +// Divide (Float) for MySQL. Replicates behavior of "/" operator +func Divide(v1, v2 sqltypes.Value) (sqltypes.Value, error) { + if v1.IsNull() || v2.IsNull() { + return sqltypes.NULL, nil + } + + lv2AsFloat, err := ToFloat64(v2) + divisorIsZero := lv2AsFloat == 0 + + if divisorIsZero || err != nil { + return sqltypes.NULL, err + } + + lv1, err := newEvalResult(v1) + if err != nil { + return sqltypes.NULL, err + } + + lv2, err := newEvalResult(v2) + if err != nil { + return sqltypes.NULL, err + } + + lresult, err := divideNumericWithError(lv1, lv2) + if err != nil { + return sqltypes.NULL, err + } + + return castFromNumeric(lresult, lresult.typ), nil +} + +// NullsafeAdd adds two Values in a null-safe manner. A null value +// is treated as 0. If both values are null, then a null is returned. +// If both values are not null, a numeric value is built +// from each input: Signed->int64, Unsigned->uint64, Float->float64. +// Otherwise the 'best type fit' is chosen for the number: int64 or float64. +// Addition is performed by upgrading types as needed, or in case +// of overflow: int64->uint64, int64->float64, uint64->float64. +// Unsigned ints can only be added to positive ints. After the +// addition, if one of the input types was Decimal, then +// a Decimal is built. Otherwise, the final type of the +// result is preserved. +func NullsafeAdd(v1, v2 sqltypes.Value, resultType querypb.Type) sqltypes.Value { + if v1.IsNull() { + v1 = sqltypes.MakeTrusted(resultType, zeroBytes) + } + if v2.IsNull() { + v2 = sqltypes.MakeTrusted(resultType, zeroBytes) + } + + lv1, err := newEvalResult(v1) + if err != nil { + return sqltypes.NULL + } + lv2, err := newEvalResult(v2) + if err != nil { + return sqltypes.NULL + } + lresult := addNumeric(lv1, lv2) + + return castFromNumeric(lresult, resultType) +} + +// NullsafeCompare returns 0 if v1==v2, -1 if v1v2. +// NULL is the lowest value. If any value is +// numeric, then a numeric comparison is performed after +// necessary conversions. If none are numeric, then it's +// a simple binary comparison. Uncomparable values return an error. +func NullsafeCompare(v1, v2 sqltypes.Value) (int, error) { + // Based on the categorization defined for the types, + // we're going to allow comparison of the following: + // Null, isNumber, IsBinary. This will exclude IsQuoted + // types that are not Binary, and Expression. + if v1.IsNull() { + if v2.IsNull() { + return 0, nil + } + return -1, nil + } + if v2.IsNull() { + return 1, nil + } + if sqltypes.IsNumber(v1.Type()) || sqltypes.IsNumber(v2.Type()) { + lv1, err := newEvalResult(v1) + if err != nil { + return 0, err + } + lv2, err := newEvalResult(v2) + if err != nil { + return 0, err + } + return compareNumeric(lv1, lv2), nil + } + if isByteComparable(v1) && isByteComparable(v2) { + return bytes.Compare(v1.ToBytes(), v2.ToBytes()), nil + } + return 0, fmt.Errorf("types are not comparable: %v vs %v", v1.Type(), v2.Type()) +} + +// isByteComparable returns true if the type is binary or date/time. +func isByteComparable(v sqltypes.Value) bool { + if v.IsBinary() { + return true + } + switch v.Type() { + case sqltypes.Timestamp, sqltypes.Date, sqltypes.Time, sqltypes.Datetime: + return true + } + return false +} + +// Min returns the minimum of v1 and v2. If one of the +// values is NULL, it returns the other value. If both +// are NULL, it returns NULL. +func Min(v1, v2 sqltypes.Value) (sqltypes.Value, error) { + return minmax(v1, v2, true) +} + +// Max returns the maximum of v1 and v2. If one of the +// values is NULL, it returns the other value. If both +// are NULL, it returns NULL. +func Max(v1, v2 sqltypes.Value) (sqltypes.Value, error) { + return minmax(v1, v2, false) +} + +func minmax(v1, v2 sqltypes.Value, min bool) (sqltypes.Value, error) { + if v1.IsNull() { + return v2, nil + } + if v2.IsNull() { + return v1, nil + } + + n, err := NullsafeCompare(v1, v2) + if err != nil { + return sqltypes.NULL, err + } + + // XNOR construct. See tests. + v1isSmaller := n < 0 + if min == v1isSmaller { + return v1, nil + } + return v2, nil +} + +// Cast converts a Value to the target type. +func Cast(v sqltypes.Value, typ querypb.Type) (sqltypes.Value, error) { + if v.Type() == typ || v.IsNull() { + return v, nil + } + if sqltypes.IsSigned(typ) && v.IsSigned() { + return sqltypes.MakeTrusted(typ, v.ToBytes()), nil + } + if sqltypes.IsUnsigned(typ) && v.IsUnsigned() { + return sqltypes.MakeTrusted(typ, v.ToBytes()), nil + } + if (sqltypes.IsFloat(typ) || typ == sqltypes.Decimal) && (v.IsIntegral() || v.IsFloat() || v.Type() == sqltypes.Decimal) { + return sqltypes.MakeTrusted(typ, v.ToBytes()), nil + } + if sqltypes.IsQuoted(typ) && (v.IsIntegral() || v.IsFloat() || v.Type() == sqltypes.Decimal || v.IsQuoted()) { + return sqltypes.MakeTrusted(typ, v.ToBytes()), nil + } + + // Explicitly disallow Expression. + if v.Type() == sqltypes.Expression { + return sqltypes.NULL, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v cannot be cast to %v", v, typ) + } + + // If the above fast-paths were not possible, + // go through full validation. + return sqltypes.NewValue(typ, v.ToBytes()) +} + +// ToUint64 converts Value to uint64. +func ToUint64(v sqltypes.Value) (uint64, error) { + num, err := newIntegralNumeric(v) + if err != nil { + return 0, err + } + switch num.typ { + case sqltypes.Int64: + if num.ival < 0 { + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "negative number cannot be converted to unsigned: %d", num.ival) + } + return uint64(num.ival), nil + case sqltypes.Uint64: + return num.uval, nil + } + panic("unreachable") +} + +// ToInt64 converts Value to int64. +func ToInt64(v sqltypes.Value) (int64, error) { + num, err := newIntegralNumeric(v) + if err != nil { + return 0, err + } + switch num.typ { + case sqltypes.Int64: + return num.ival, nil + case sqltypes.Uint64: + ival := int64(num.uval) + if ival < 0 { + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsigned number overflows int64 value: %d", num.uval) + } + return ival, nil + } + panic("unreachable") +} + +// ToFloat64 converts Value to float64. +func ToFloat64(v sqltypes.Value) (float64, error) { + num, err := newEvalResult(v) + if err != nil { + return 0, err + } + switch num.typ { + case sqltypes.Int64: + return float64(num.ival), nil + case sqltypes.Uint64: + return float64(num.uval), nil + case sqltypes.Float64: + return num.fval, nil + } + + if sqltypes.IsText(num.typ) || sqltypes.IsBinary(num.typ) { + fval, err := strconv.ParseFloat(string(v.Raw()), 64) + if err != nil { + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + } + return fval, nil + } + + return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "cannot convert to float: %s", v.String()) +} + +// ToNative converts Value to a native go type. +// Decimal is returned as []byte. +func ToNative(v sqltypes.Value) (interface{}, error) { + var out interface{} + var err error + switch { + case v.Type() == sqltypes.Null: + // no-op + case v.IsSigned(): + return ToInt64(v) + case v.IsUnsigned(): + return ToUint64(v) + case v.IsFloat(): + return ToFloat64(v) + case v.IsQuoted() || v.Type() == sqltypes.Bit || v.Type() == sqltypes.Decimal: + out = v.ToBytes() + case v.Type() == sqltypes.Expression: + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v cannot be converted to a go type", v) + } + return out, err +} + +// newEvalResult parses a value and produces an evalResult containing the value +func newEvalResult(v sqltypes.Value) (evalResult, error) { + raw := v.Raw() + switch { + case v.IsBinary() || v.IsText(): + return evalResult{bytes: raw, typ: sqltypes.VarBinary}, nil + case v.IsSigned(): + ival, err := strconv.ParseInt(string(raw), 10, 64) + if err != nil { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + } + return evalResult{ival: ival, typ: sqltypes.Int64}, nil + case v.IsUnsigned(): + uval, err := strconv.ParseUint(string(raw), 10, 64) + if err != nil { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + } + return evalResult{uval: uval, typ: sqltypes.Uint64}, nil + case v.IsFloat() || v.Type() == sqltypes.Decimal: + fval, err := strconv.ParseFloat(string(raw), 64) + if err != nil { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + } + return evalResult{fval: fval, typ: sqltypes.Float64}, nil + } + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "this should not be reached. got %s", v.String()) +} + +// newIntegralNumeric parses a value and produces an Int64 or Uint64. +func newIntegralNumeric(v sqltypes.Value) (evalResult, error) { + str := v.ToString() + switch { + case v.IsSigned(): + ival, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + } + return evalResult{ival: ival, typ: sqltypes.Int64}, nil + case v.IsUnsigned(): + uval, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + } + return evalResult{uval: uval, typ: sqltypes.Uint64}, nil + } + + // For other types, do best effort. + if ival, err := strconv.ParseInt(str, 10, 64); err == nil { + return evalResult{ival: ival, typ: sqltypes.Int64}, nil + } + if uval, err := strconv.ParseUint(str, 10, 64); err == nil { + return evalResult{uval: uval, typ: sqltypes.Uint64}, nil + } + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: '%s'", str) +} + +func addNumeric(v1, v2 evalResult) evalResult { + v1, v2 = makeNumericAndprioritize(v1, v2) + switch v1.typ { + case sqltypes.Int64: + return intPlusInt(v1.ival, v2.ival) + case sqltypes.Uint64: + switch v2.typ { + case sqltypes.Int64: + return uintPlusInt(v1.uval, v2.ival) + case sqltypes.Uint64: + return uintPlusUint(v1.uval, v2.uval) + } + case sqltypes.Float64: + return floatPlusAny(v1.fval, v2) + } + panic("unreachable") +} + +func addNumericWithError(v1, v2 evalResult) (evalResult, error) { + v1, v2 = makeNumericAndprioritize(v1, v2) + switch v1.typ { + case sqltypes.Int64: + return intPlusIntWithError(v1.ival, v2.ival) + case sqltypes.Uint64: + switch v2.typ { + case sqltypes.Int64: + return uintPlusIntWithError(v1.uval, v2.ival) + case sqltypes.Uint64: + return uintPlusUintWithError(v1.uval, v2.uval) + } + case sqltypes.Float64: + return floatPlusAny(v1.fval, v2), nil + } + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid arithmetic between: %s %s", v1.Value().String(), v2.Value().String()) + +} + +func subtractNumericWithError(i1, i2 evalResult) (evalResult, error) { + v1 := makeNumeric(i1) + v2 := makeNumeric(i2) + switch v1.typ { + case sqltypes.Int64: + switch v2.typ { + case sqltypes.Int64: + return intMinusIntWithError(v1.ival, v2.ival) + case sqltypes.Uint64: + return intMinusUintWithError(v1.ival, v2.uval) + case sqltypes.Float64: + return anyMinusFloat(v1, v2.fval), nil + } + case sqltypes.Uint64: + switch v2.typ { + case sqltypes.Int64: + return uintMinusIntWithError(v1.uval, v2.ival) + case sqltypes.Uint64: + return uintMinusUintWithError(v1.uval, v2.uval) + case sqltypes.Float64: + return anyMinusFloat(v1, v2.fval), nil + } + case sqltypes.Float64: + return floatMinusAny(v1.fval, v2), nil + } + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid arithmetic between: %s %s", v1.Value().String(), v2.Value().String()) +} + +func multiplyNumericWithError(v1, v2 evalResult) (evalResult, error) { + v1, v2 = makeNumericAndprioritize(v1, v2) + switch v1.typ { + case sqltypes.Int64: + return intTimesIntWithError(v1.ival, v2.ival) + case sqltypes.Uint64: + switch v2.typ { + case sqltypes.Int64: + return uintTimesIntWithError(v1.uval, v2.ival) + case sqltypes.Uint64: + return uintTimesUintWithError(v1.uval, v2.uval) + } + case sqltypes.Float64: + return floatTimesAny(v1.fval, v2), nil + } + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid arithmetic between: %s %s", v1.Value().String(), v2.Value().String()) + +} + +func divideNumericWithError(i1, i2 evalResult) (evalResult, error) { + v1 := makeNumeric(i1) + v2 := makeNumeric(i2) + switch v1.typ { + case sqltypes.Int64: + return floatDivideAnyWithError(float64(v1.ival), v2) + + case sqltypes.Uint64: + return floatDivideAnyWithError(float64(v1.uval), v2) + + case sqltypes.Float64: + return floatDivideAnyWithError(v1.fval, v2) + } + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid arithmetic between: %s %s", v1.Value().String(), v2.Value().String()) +} + +// makeNumericAndprioritize reorders the input parameters +// to be Float64, Uint64, Int64. +func makeNumericAndprioritize(i1, i2 evalResult) (evalResult, evalResult) { + v1 := makeNumeric(i1) + v2 := makeNumeric(i2) + switch v1.typ { + case sqltypes.Int64: + if v2.typ == sqltypes.Uint64 || v2.typ == sqltypes.Float64 { + return v2, v1 + } + case sqltypes.Uint64: + if v2.typ == sqltypes.Float64 { + return v2, v1 + } + } + return v1, v2 +} + +func makeNumeric(v evalResult) evalResult { + if sqltypes.IsNumber(v.typ) { + return v + } + if ival, err := strconv.ParseInt(string(v.bytes), 10, 64); err == nil { + return evalResult{ival: ival, typ: sqltypes.Int64} + } + if fval, err := strconv.ParseFloat(string(v.bytes), 64); err == nil { + return evalResult{fval: fval, typ: sqltypes.Float64} + } + return evalResult{ival: 0, typ: sqltypes.Int64} +} + +func intPlusInt(v1, v2 int64) evalResult { + result := v1 + v2 + if v1 > 0 && v2 > 0 && result < 0 { + goto overflow + } + if v1 < 0 && v2 < 0 && result > 0 { + goto overflow + } + return evalResult{typ: sqltypes.Int64, ival: result} + +overflow: + return evalResult{typ: sqltypes.Float64, fval: float64(v1) + float64(v2)} +} + +func intPlusIntWithError(v1, v2 int64) (evalResult, error) { + result := v1 + v2 + if (result > v1) != (v2 > 0) { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT value is out of range in %v + %v", v1, v2) + } + return evalResult{typ: sqltypes.Int64, ival: result}, nil +} + +func intMinusIntWithError(v1, v2 int64) (evalResult, error) { + result := v1 - v2 + + if (result < v1) != (v2 > 0) { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT value is out of range in %v - %v", v1, v2) + } + return evalResult{typ: sqltypes.Int64, ival: result}, nil +} + +func intTimesIntWithError(v1, v2 int64) (evalResult, error) { + result := v1 * v2 + if v1 != 0 && result/v1 != v2 { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT value is out of range in %v * %v", v1, v2) + } + return evalResult{typ: sqltypes.Int64, ival: result}, nil + +} + +func intMinusUintWithError(v1 int64, v2 uint64) (evalResult, error) { + if v1 < 0 || v1 < int64(v2) { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v - %v", v1, v2) + } + return uintMinusUintWithError(uint64(v1), v2) +} + +func uintPlusInt(v1 uint64, v2 int64) evalResult { + return uintPlusUint(v1, uint64(v2)) +} + +func uintPlusIntWithError(v1 uint64, v2 int64) (evalResult, error) { + if v2 < 0 && v1 < uint64(v2) { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v + %v", v1, v2) + } + // convert to int -> uint is because for numeric operators (such as + or -) + // where one of the operands is an unsigned integer, the result is unsigned by default. + return uintPlusUintWithError(v1, uint64(v2)) +} + +func uintMinusIntWithError(v1 uint64, v2 int64) (evalResult, error) { + if int64(v1) < v2 && v2 > 0 { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v - %v", v1, v2) + } + // uint - (- int) = uint + int + if v2 < 0 { + return uintPlusIntWithError(v1, -v2) + } + return uintMinusUintWithError(v1, uint64(v2)) +} + +func uintTimesIntWithError(v1 uint64, v2 int64) (evalResult, error) { + if v2 < 0 || int64(v1) < 0 { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v * %v", v1, v2) + } + return uintTimesUintWithError(v1, uint64(v2)) +} + +func uintPlusUint(v1, v2 uint64) evalResult { + result := v1 + v2 + if result < v2 { + return evalResult{typ: sqltypes.Float64, fval: float64(v1) + float64(v2)} + } + return evalResult{typ: sqltypes.Uint64, uval: result} +} + +func uintPlusUintWithError(v1, v2 uint64) (evalResult, error) { + result := v1 + v2 + if result < v2 { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v + %v", v1, v2) + } + return evalResult{typ: sqltypes.Uint64, uval: result}, nil +} + +func uintMinusUintWithError(v1, v2 uint64) (evalResult, error) { + result := v1 - v2 + if v2 > v1 { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v - %v", v1, v2) + } + + return evalResult{typ: sqltypes.Uint64, uval: result}, nil +} + +func uintTimesUintWithError(v1, v2 uint64) (evalResult, error) { + result := v1 * v2 + if result < v2 || result < v1 { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v * %v", v1, v2) + } + return evalResult{typ: sqltypes.Uint64, uval: result}, nil +} + +func floatPlusAny(v1 float64, v2 evalResult) evalResult { + switch v2.typ { + case sqltypes.Int64: + v2.fval = float64(v2.ival) + case sqltypes.Uint64: + v2.fval = float64(v2.uval) + } + return evalResult{typ: sqltypes.Float64, fval: v1 + v2.fval} +} + +func floatMinusAny(v1 float64, v2 evalResult) evalResult { + switch v2.typ { + case sqltypes.Int64: + v2.fval = float64(v2.ival) + case sqltypes.Uint64: + v2.fval = float64(v2.uval) + } + return evalResult{typ: sqltypes.Float64, fval: v1 - v2.fval} +} + +func floatTimesAny(v1 float64, v2 evalResult) evalResult { + switch v2.typ { + case sqltypes.Int64: + v2.fval = float64(v2.ival) + case sqltypes.Uint64: + v2.fval = float64(v2.uval) + } + return evalResult{typ: sqltypes.Float64, fval: v1 * v2.fval} +} + +func floatDivideAnyWithError(v1 float64, v2 evalResult) (evalResult, error) { + switch v2.typ { + case sqltypes.Int64: + v2.fval = float64(v2.ival) + case sqltypes.Uint64: + v2.fval = float64(v2.uval) + } + result := v1 / v2.fval + divisorLessThanOne := v2.fval < 1 + resultMismatch := v2.fval*result != v1 + + if divisorLessThanOne && resultMismatch { + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT is out of range in %v / %v", v1, v2.fval) + } + + return evalResult{typ: sqltypes.Float64, fval: v1 / v2.fval}, nil +} + +func anyMinusFloat(v1 evalResult, v2 float64) evalResult { + switch v1.typ { + case sqltypes.Int64: + v1.fval = float64(v1.ival) + case sqltypes.Uint64: + v1.fval = float64(v1.uval) + } + return evalResult{typ: sqltypes.Float64, fval: v1.fval - v2} +} + +func castFromNumeric(v evalResult, resultType querypb.Type) sqltypes.Value { + switch { + case sqltypes.IsSigned(resultType): + switch v.typ { + case sqltypes.Int64: + return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, v.ival, 10)) + case sqltypes.Uint64: + return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, int64(v.uval), 10)) + case sqltypes.Float64: + return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, int64(v.fval), 10)) + } + case sqltypes.IsUnsigned(resultType): + switch v.typ { + case sqltypes.Uint64: + return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, v.uval, 10)) + case sqltypes.Int64: + return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, uint64(v.ival), 10)) + case sqltypes.Float64: + return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, uint64(v.fval), 10)) + } + case sqltypes.IsFloat(resultType) || resultType == sqltypes.Decimal: + switch v.typ { + case sqltypes.Int64: + return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, v.ival, 10)) + case sqltypes.Uint64: + return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, v.uval, 10)) + case sqltypes.Float64: + format := byte('g') + if resultType == sqltypes.Decimal { + format = 'f' + } + return sqltypes.MakeTrusted(resultType, strconv.AppendFloat(nil, v.fval, format, -1, 64)) + } + case resultType == sqltypes.VarChar || resultType == sqltypes.VarBinary || resultType == sqltypes.Binary || resultType == sqltypes.Text: + return sqltypes.MakeTrusted(resultType, v.bytes) + } + return sqltypes.NULL +} + +func compareNumeric(v1, v2 evalResult) int { + // Equalize the types. + switch v1.typ { + case sqltypes.Int64: + switch v2.typ { + case sqltypes.Uint64: + if v1.ival < 0 { + return -1 + } + v1 = evalResult{typ: sqltypes.Uint64, uval: uint64(v1.ival)} + case sqltypes.Float64: + v1 = evalResult{typ: sqltypes.Float64, fval: float64(v1.ival)} + } + case sqltypes.Uint64: + switch v2.typ { + case sqltypes.Int64: + if v2.ival < 0 { + return 1 + } + v2 = evalResult{typ: sqltypes.Uint64, uval: uint64(v2.ival)} + case sqltypes.Float64: + v1 = evalResult{typ: sqltypes.Float64, fval: float64(v1.uval)} + } + case sqltypes.Float64: + switch v2.typ { + case sqltypes.Int64: + v2 = evalResult{typ: sqltypes.Float64, fval: float64(v2.ival)} + case sqltypes.Uint64: + v2 = evalResult{typ: sqltypes.Float64, fval: float64(v2.uval)} + } + } + + // Both values are of the same type. + switch v1.typ { + case sqltypes.Int64: + switch { + case v1.ival == v2.ival: + return 0 + case v1.ival < v2.ival: + return -1 + } + case sqltypes.Uint64: + switch { + case v1.uval == v2.uval: + return 0 + case v1.uval < v2.uval: + return -1 + } + case sqltypes.Float64: + switch { + case v1.fval == v2.fval: + return 0 + case v1.fval < v2.fval: + return -1 + } + } + + // v1>v2 + return 1 +} diff --git a/internal/stackql-parser-fork/go/vt/vtgate/evalengine/arithmetic_test.go b/internal/stackql-parser-fork/go/vt/vtgate/evalengine/arithmetic_test.go new file mode 100644 index 00000000..aafe3d4a --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vtgate/evalengine/arithmetic_test.go @@ -0,0 +1,1482 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + "strconv" + "testing" + + "github.com/stackql/stackql-parser/go/test/utils" + + "github.com/stretchr/testify/require" + + "github.com/stackql/stackql-parser/go/sqltypes" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" + "github.com/stackql/stackql-parser/go/vt/vterrors" +) + +func TestArithmetics(t *testing.T) { + type tcase struct { + v1, v2, out sqltypes.Value + err string + } + + tests := []struct { + operator string + f func(a, b sqltypes.Value) (sqltypes.Value, error) + cases []tcase + }{{ + operator: "-", + f: Subtract, + cases: []tcase{{ + // All Nulls + v1: sqltypes.NULL, + v2: sqltypes.NULL, + out: sqltypes.NULL, + }, { + // First value null. + v1: sqltypes.NewInt32(1), + v2: sqltypes.NULL, + out: sqltypes.NULL, + }, { + // Second value null. + v1: sqltypes.NULL, + v2: sqltypes.NewInt32(1), + out: sqltypes.NULL, + }, { + // case with negative value + v1: sqltypes.NewInt64(-1), + v2: sqltypes.NewInt64(-2), + out: sqltypes.NewInt64(1), + }, { + // testing for int64 overflow with min negative value + v1: sqltypes.NewInt64(math.MinInt64), + v2: sqltypes.NewInt64(1), + err: "BIGINT value is out of range in -9223372036854775808 - 1", + }, { + v1: sqltypes.NewUint64(4), + v2: sqltypes.NewInt64(5), + err: "BIGINT UNSIGNED value is out of range in 4 - 5", + }, { + // testing uint - int + v1: sqltypes.NewUint64(7), + v2: sqltypes.NewInt64(5), + out: sqltypes.NewUint64(2), + }, { + v1: sqltypes.NewUint64(math.MaxUint64), + v2: sqltypes.NewInt64(0), + out: sqltypes.NewUint64(math.MaxUint64), + }, { + // testing for int64 overflow + v1: sqltypes.NewInt64(math.MinInt64), + v2: sqltypes.NewUint64(0), + err: "BIGINT UNSIGNED value is out of range in -9223372036854775808 - 0", + }, { + v1: sqltypes.TestValue(querypb.Type_VARCHAR, "c"), + v2: sqltypes.NewInt64(1), + out: sqltypes.NewInt64(-1), + }, { + v1: sqltypes.NewUint64(1), + v2: sqltypes.TestValue(querypb.Type_VARCHAR, "c"), + out: sqltypes.NewUint64(1), + }, { + // testing for error for parsing float value to uint64 + v1: sqltypes.TestValue(querypb.Type_UINT64, "1.2"), + v2: sqltypes.NewInt64(2), + err: "strconv.ParseUint: parsing \"1.2\": invalid syntax", + }, { + // testing for error for parsing float value to uint64 + v1: sqltypes.NewUint64(2), + v2: sqltypes.TestValue(querypb.Type_UINT64, "1.2"), + err: "strconv.ParseUint: parsing \"1.2\": invalid syntax", + }, { + // uint64 - uint64 + v1: sqltypes.NewUint64(8), + v2: sqltypes.NewUint64(4), + out: sqltypes.NewUint64(4), + }, { + // testing for float subtraction: float - int + v1: sqltypes.NewFloat64(1.2), + v2: sqltypes.NewInt64(2), + out: sqltypes.NewFloat64(-0.8), + }, { + // testing for float subtraction: float - uint + v1: sqltypes.NewFloat64(1.2), + v2: sqltypes.NewUint64(2), + out: sqltypes.NewFloat64(-0.8), + }, { + v1: sqltypes.NewInt64(-1), + v2: sqltypes.NewUint64(2), + err: "BIGINT UNSIGNED value is out of range in -1 - 2", + }, { + v1: sqltypes.NewInt64(2), + v2: sqltypes.NewUint64(1), + out: sqltypes.NewUint64(1), + }, { + // testing int64 - float64 method + v1: sqltypes.NewInt64(-2), + v2: sqltypes.NewFloat64(1.0), + out: sqltypes.NewFloat64(-3.0), + }, { + // testing uint64 - float64 method + v1: sqltypes.NewUint64(1), + v2: sqltypes.NewFloat64(-2.0), + out: sqltypes.NewFloat64(3.0), + }, { + // testing uint - int to return uintplusint + v1: sqltypes.NewUint64(1), + v2: sqltypes.NewInt64(-2), + out: sqltypes.NewUint64(3), + }, { + // testing for float - float + v1: sqltypes.NewFloat64(1.2), + v2: sqltypes.NewFloat64(3.2), + out: sqltypes.NewFloat64(-2), + }, { + // testing uint - uint if v2 > v1 + v1: sqltypes.NewUint64(2), + v2: sqltypes.NewUint64(4), + err: "BIGINT UNSIGNED value is out of range in 2 - 4", + }, { + // testing uint - (- int) + v1: sqltypes.NewUint64(1), + v2: sqltypes.NewInt64(-2), + out: sqltypes.NewUint64(3), + }}, + }, { + operator: "+", + f: Add, + cases: []tcase{{ + // All Nulls + v1: sqltypes.NULL, + v2: sqltypes.NULL, + out: sqltypes.NULL, + }, { + // First value null. + v1: sqltypes.NewInt32(1), + v2: sqltypes.NULL, + out: sqltypes.NULL, + }, { + // Second value null. + v1: sqltypes.NULL, + v2: sqltypes.NewInt32(1), + out: sqltypes.NULL, + }, { + // case with negatives + v1: sqltypes.NewInt64(-1), + v2: sqltypes.NewInt64(-2), + out: sqltypes.NewInt64(-3), + }, { + // testing for overflow int64, result will be unsigned int + v1: sqltypes.NewInt64(math.MaxInt64), + v2: sqltypes.NewUint64(2), + out: sqltypes.NewUint64(9223372036854775809), + }, { + v1: sqltypes.NewInt64(-2), + v2: sqltypes.NewUint64(1), + err: "BIGINT UNSIGNED value is out of range in 1 + -2", + }, { + v1: sqltypes.NewInt64(math.MaxInt64), + v2: sqltypes.NewInt64(-2), + out: sqltypes.NewInt64(9223372036854775805), + }, { + // Normal case + v1: sqltypes.NewUint64(1), + v2: sqltypes.NewUint64(2), + out: sqltypes.NewUint64(3), + }, { + // testing for overflow uint64 + v1: sqltypes.NewUint64(math.MaxUint64), + v2: sqltypes.NewUint64(2), + err: "BIGINT UNSIGNED value is out of range in 18446744073709551615 + 2", + }, { + // int64 underflow + v1: sqltypes.NewInt64(math.MinInt64), + v2: sqltypes.NewInt64(-2), + err: "BIGINT value is out of range in -9223372036854775808 + -2", + }, { + // checking int64 max value can be returned + v1: sqltypes.NewInt64(math.MaxInt64), + v2: sqltypes.NewUint64(0), + out: sqltypes.NewUint64(9223372036854775807), + }, { + // testing whether uint64 max value can be returned + v1: sqltypes.NewUint64(math.MaxUint64), + v2: sqltypes.NewInt64(0), + out: sqltypes.NewUint64(math.MaxUint64), + }, { + v1: sqltypes.NewUint64(math.MaxInt64), + v2: sqltypes.NewInt64(1), + out: sqltypes.NewUint64(9223372036854775808), + }, { + v1: sqltypes.NewUint64(1), + v2: sqltypes.TestValue(querypb.Type_VARCHAR, "c"), + out: sqltypes.NewUint64(1), + }, { + v1: sqltypes.NewUint64(1), + v2: sqltypes.TestValue(querypb.Type_VARCHAR, "1.2"), + out: sqltypes.NewFloat64(2.2), + }, { + v1: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + v2: sqltypes.NewInt64(2), + err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + }, { + v1: sqltypes.NewInt64(2), + v2: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + }, { + // testing for uint64 overflow with max uint64 + int value + v1: sqltypes.NewUint64(math.MaxUint64), + v2: sqltypes.NewInt64(2), + err: "BIGINT UNSIGNED value is out of range in 18446744073709551615 + 2", + }}, + }, { + operator: "/", + f: Divide, + cases: []tcase{{ + //All Nulls + v1: sqltypes.NULL, + v2: sqltypes.NULL, + out: sqltypes.NULL, + }, { + // First value null. + v1: sqltypes.NULL, + v2: sqltypes.NewInt32(1), + out: sqltypes.NULL, + }, { + // Second value null. + v1: sqltypes.NewInt32(1), + v2: sqltypes.NULL, + out: sqltypes.NULL, + }, { + // Second arg 0 + v1: sqltypes.NewInt32(5), + v2: sqltypes.NewInt32(0), + out: sqltypes.NULL, + }, { + // Both arguments zero + v1: sqltypes.NewInt32(0), + v2: sqltypes.NewInt32(0), + out: sqltypes.NULL, + }, { + // case with negative value + v1: sqltypes.NewInt64(-1), + v2: sqltypes.NewInt64(-2), + out: sqltypes.NewFloat64(0.5000), + }, { + // float64 division by zero + v1: sqltypes.NewFloat64(2), + v2: sqltypes.NewFloat64(0), + out: sqltypes.NULL, + }, { + // Lower bound for int64 + v1: sqltypes.NewInt64(math.MinInt64), + v2: sqltypes.NewInt64(1), + out: sqltypes.NewFloat64(math.MinInt64), + }, { + // upper bound for uint64 + v1: sqltypes.NewUint64(math.MaxUint64), + v2: sqltypes.NewUint64(1), + out: sqltypes.NewFloat64(math.MaxUint64), + }, { + // testing for error in types + v1: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + v2: sqltypes.NewInt64(2), + err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + }, { + // testing for error in types + v1: sqltypes.NewInt64(2), + v2: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + }, { + // testing for uint/int + v1: sqltypes.NewUint64(4), + v2: sqltypes.NewInt64(5), + out: sqltypes.NewFloat64(0.8), + }, { + // testing for uint/uint + v1: sqltypes.NewUint64(1), + v2: sqltypes.NewUint64(2), + out: sqltypes.NewFloat64(0.5), + }, { + // testing for float64/int64 + v1: sqltypes.TestValue(querypb.Type_FLOAT64, "1.2"), + v2: sqltypes.NewInt64(-2), + out: sqltypes.NewFloat64(-0.6), + }, { + // testing for float64/uint64 + v1: sqltypes.TestValue(querypb.Type_FLOAT64, "1.2"), + v2: sqltypes.NewUint64(2), + out: sqltypes.NewFloat64(0.6), + }, { + // testing for overflow of float64 + v1: sqltypes.NewFloat64(math.MaxFloat64), + v2: sqltypes.NewFloat64(0.5), + err: "BIGINT is out of range in 1.7976931348623157e+308 / 0.5", + }}, + }, { + operator: "*", + f: Multiply, + cases: []tcase{{ + //All Nulls + v1: sqltypes.NULL, + v2: sqltypes.NULL, + out: sqltypes.NULL, + }, { + // First value null. + v1: sqltypes.NewInt32(1), + v2: sqltypes.NULL, + out: sqltypes.NULL, + }, { + // Second value null. + v1: sqltypes.NULL, + v2: sqltypes.NewInt32(1), + out: sqltypes.NULL, + }, { + // case with negative value + v1: sqltypes.NewInt64(-1), + v2: sqltypes.NewInt64(-2), + out: sqltypes.NewInt64(2), + }, { + // testing for int64 overflow with min negative value + v1: sqltypes.NewInt64(math.MinInt64), + v2: sqltypes.NewInt64(1), + out: sqltypes.NewInt64(math.MinInt64), + }, { + // testing for error in types + v1: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + v2: sqltypes.NewInt64(2), + err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + }, { + // testing for error in types + v1: sqltypes.NewInt64(2), + v2: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + }, { + // testing for uint*int + v1: sqltypes.NewUint64(4), + v2: sqltypes.NewInt64(5), + out: sqltypes.NewUint64(20), + }, { + // testing for uint*uint + v1: sqltypes.NewUint64(1), + v2: sqltypes.NewUint64(2), + out: sqltypes.NewUint64(2), + }, { + // testing for float64*int64 + v1: sqltypes.TestValue(querypb.Type_FLOAT64, "1.2"), + v2: sqltypes.NewInt64(-2), + out: sqltypes.NewFloat64(-2.4), + }, { + // testing for float64*uint64 + v1: sqltypes.TestValue(querypb.Type_FLOAT64, "1.2"), + v2: sqltypes.NewUint64(2), + out: sqltypes.NewFloat64(2.4), + }, { + // testing for overflow of int64 + v1: sqltypes.NewInt64(math.MaxInt64), + v2: sqltypes.NewInt64(2), + err: "BIGINT value is out of range in 9223372036854775807 * 2", + }, { + // testing for underflow of uint64*max.uint64 + v1: sqltypes.NewInt64(2), + v2: sqltypes.NewUint64(math.MaxUint64), + err: "BIGINT UNSIGNED value is out of range in 18446744073709551615 * 2", + }, { + v1: sqltypes.NewUint64(math.MaxUint64), + v2: sqltypes.NewUint64(1), + out: sqltypes.NewUint64(math.MaxUint64), + }, { + //Checking whether maxInt value can be passed as uint value + v1: sqltypes.NewUint64(math.MaxInt64), + v2: sqltypes.NewInt64(3), + err: "BIGINT UNSIGNED value is out of range in 9223372036854775807 * 3", + }}, + }} + + for _, test := range tests { + t.Run(test.operator, func(t *testing.T) { + for _, tcase := range test.cases { + name := fmt.Sprintf("%s%s%s", tcase.v1.String(), test.operator, tcase.v2.String()) + t.Run(name, func(t *testing.T) { + got, err := test.f(tcase.v1, tcase.v2) + if tcase.err == "" { + require.NoError(t, err) + require.Equal(t, tcase.out, got) + } else { + require.EqualError(t, err, tcase.err) + } + }) + } + }) + } +} + +func TestNullsafeAdd(t *testing.T) { + tcases := []struct { + v1, v2 sqltypes.Value + out sqltypes.Value + err error + }{{ + // All nulls. + v1: sqltypes.NULL, + v2: sqltypes.NULL, + out: sqltypes.NewInt64(0), + }, { + // First value null. + v1: sqltypes.NewInt32(1), + v2: sqltypes.NULL, + out: sqltypes.NewInt64(1), + }, { + // Second value null. + v1: sqltypes.NULL, + v2: sqltypes.NewInt32(1), + out: sqltypes.NewInt64(1), + }, { + // Normal case. + v1: sqltypes.NewInt64(1), + v2: sqltypes.NewInt64(2), + out: sqltypes.NewInt64(3), + }, { + // Make sure underlying error is returned for LHS. + v1: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + v2: sqltypes.NewInt64(2), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + }, { + // Make sure underlying error is returned for RHS. + v1: sqltypes.NewInt64(2), + v2: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + }, { + // Make sure underlying error is returned while adding. + v1: sqltypes.NewInt64(-1), + v2: sqltypes.NewUint64(2), + out: sqltypes.NewInt64(-9223372036854775808), + }, { + // Make sure underlying error is returned while converting. + v1: sqltypes.NewFloat64(1), + v2: sqltypes.NewFloat64(2), + out: sqltypes.NewInt64(3), + }} + for _, tcase := range tcases { + got := NullsafeAdd(tcase.v1, tcase.v2, querypb.Type_INT64) + + if !reflect.DeepEqual(got, tcase.out) { + t.Errorf("NullsafeAdd(%v, %v): %v, want %v", printValue(tcase.v1), printValue(tcase.v2), printValue(got), printValue(tcase.out)) + } + } +} + +func TestNullsafeCompare(t *testing.T) { + tcases := []struct { + v1, v2 sqltypes.Value + out int + err error + }{{ + // All nulls. + v1: sqltypes.NULL, + v2: sqltypes.NULL, + out: 0, + }, { + // LHS null. + v1: sqltypes.NULL, + v2: sqltypes.NewInt64(1), + out: -1, + }, { + // RHS null. + v1: sqltypes.NewInt64(1), + v2: sqltypes.NULL, + out: 1, + }, { + // LHS Text + v1: sqltypes.TestValue(querypb.Type_VARCHAR, "abcd"), + v2: sqltypes.TestValue(querypb.Type_VARCHAR, "abcd"), + err: vterrors.New(vtrpcpb.Code_UNKNOWN, "types are not comparable: VARCHAR vs VARCHAR"), + }, { + // Make sure underlying error is returned for LHS. + v1: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + v2: sqltypes.NewInt64(2), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + }, { + // Make sure underlying error is returned for RHS. + v1: sqltypes.NewInt64(2), + v2: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + }, { + // Numeric equal. + v1: sqltypes.NewInt64(1), + v2: sqltypes.NewUint64(1), + out: 0, + }, { + // Numeric unequal. + v1: sqltypes.NewInt64(1), + v2: sqltypes.NewUint64(2), + out: -1, + }, { + // Non-numeric equal + v1: sqltypes.TestValue(querypb.Type_VARBINARY, "abcd"), + v2: sqltypes.TestValue(querypb.Type_BINARY, "abcd"), + out: 0, + }, { + // Non-numeric unequal + v1: sqltypes.TestValue(querypb.Type_VARBINARY, "abcd"), + v2: sqltypes.TestValue(querypb.Type_BINARY, "bcde"), + out: -1, + }, { + // Date/Time types + v1: sqltypes.TestValue(querypb.Type_DATETIME, "1000-01-01 00:00:00"), + v2: sqltypes.TestValue(querypb.Type_BINARY, "1000-01-01 00:00:00"), + out: 0, + }, { + // Date/Time types + v1: sqltypes.TestValue(querypb.Type_DATETIME, "2000-01-01 00:00:00"), + v2: sqltypes.TestValue(querypb.Type_BINARY, "1000-01-01 00:00:00"), + out: 1, + }, { + // Date/Time types + v1: sqltypes.TestValue(querypb.Type_DATETIME, "1000-01-01 00:00:00"), + v2: sqltypes.TestValue(querypb.Type_BINARY, "2000-01-01 00:00:00"), + out: -1, + }} + for _, tcase := range tcases { + got, err := NullsafeCompare(tcase.v1, tcase.v2) + if !vterrors.Equals(err, tcase.err) { + t.Errorf("NullsafeCompare(%v, %v) error: %v, want %v", printValue(tcase.v1), printValue(tcase.v2), vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err != nil { + continue + } + + if got != tcase.out { + t.Errorf("NullsafeCompare(%v, %v): %v, want %v", printValue(tcase.v1), printValue(tcase.v2), got, tcase.out) + } + } +} + +func TestCast(t *testing.T) { + tcases := []struct { + typ querypb.Type + v sqltypes.Value + out sqltypes.Value + err error + }{{ + typ: querypb.Type_VARCHAR, + v: sqltypes.NULL, + out: sqltypes.NULL, + }, { + typ: querypb.Type_VARCHAR, + v: sqltypes.TestValue(querypb.Type_VARCHAR, "exact types"), + out: sqltypes.TestValue(querypb.Type_VARCHAR, "exact types"), + }, { + typ: querypb.Type_INT64, + v: sqltypes.TestValue(querypb.Type_INT32, "32"), + out: sqltypes.TestValue(querypb.Type_INT64, "32"), + }, { + typ: querypb.Type_INT24, + v: sqltypes.TestValue(querypb.Type_UINT64, "64"), + out: sqltypes.TestValue(querypb.Type_INT24, "64"), + }, { + typ: querypb.Type_INT24, + v: sqltypes.TestValue(querypb.Type_VARCHAR, "bad int"), + err: vterrors.New(vtrpcpb.Code_UNKNOWN, `strconv.ParseInt: parsing "bad int": invalid syntax`), + }, { + typ: querypb.Type_UINT64, + v: sqltypes.TestValue(querypb.Type_UINT32, "32"), + out: sqltypes.TestValue(querypb.Type_UINT64, "32"), + }, { + typ: querypb.Type_UINT24, + v: sqltypes.TestValue(querypb.Type_INT64, "64"), + out: sqltypes.TestValue(querypb.Type_UINT24, "64"), + }, { + typ: querypb.Type_UINT24, + v: sqltypes.TestValue(querypb.Type_INT64, "-1"), + err: vterrors.New(vtrpcpb.Code_UNKNOWN, `strconv.ParseUint: parsing "-1": invalid syntax`), + }, { + typ: querypb.Type_FLOAT64, + v: sqltypes.TestValue(querypb.Type_INT64, "64"), + out: sqltypes.TestValue(querypb.Type_FLOAT64, "64"), + }, { + typ: querypb.Type_FLOAT32, + v: sqltypes.TestValue(querypb.Type_FLOAT64, "64"), + out: sqltypes.TestValue(querypb.Type_FLOAT32, "64"), + }, { + typ: querypb.Type_FLOAT32, + v: sqltypes.TestValue(querypb.Type_DECIMAL, "1.24"), + out: sqltypes.TestValue(querypb.Type_FLOAT32, "1.24"), + }, { + typ: querypb.Type_FLOAT64, + v: sqltypes.TestValue(querypb.Type_VARCHAR, "1.25"), + out: sqltypes.TestValue(querypb.Type_FLOAT64, "1.25"), + }, { + typ: querypb.Type_FLOAT64, + v: sqltypes.TestValue(querypb.Type_VARCHAR, "bad float"), + err: vterrors.New(vtrpcpb.Code_UNKNOWN, `strconv.ParseFloat: parsing "bad float": invalid syntax`), + }, { + typ: querypb.Type_VARCHAR, + v: sqltypes.TestValue(querypb.Type_INT64, "64"), + out: sqltypes.TestValue(querypb.Type_VARCHAR, "64"), + }, { + typ: querypb.Type_VARBINARY, + v: sqltypes.TestValue(querypb.Type_FLOAT64, "64"), + out: sqltypes.TestValue(querypb.Type_VARBINARY, "64"), + }, { + typ: querypb.Type_VARBINARY, + v: sqltypes.TestValue(querypb.Type_DECIMAL, "1.24"), + out: sqltypes.TestValue(querypb.Type_VARBINARY, "1.24"), + }, { + typ: querypb.Type_VARBINARY, + v: sqltypes.TestValue(querypb.Type_VARCHAR, "1.25"), + out: sqltypes.TestValue(querypb.Type_VARBINARY, "1.25"), + }, { + typ: querypb.Type_VARCHAR, + v: sqltypes.TestValue(querypb.Type_VARBINARY, "valid string"), + out: sqltypes.TestValue(querypb.Type_VARCHAR, "valid string"), + }, { + typ: querypb.Type_VARCHAR, + v: sqltypes.TestValue(sqltypes.Expression, "bad string"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "EXPRESSION(bad string) cannot be cast to VARCHAR"), + }} + for _, tcase := range tcases { + got, err := Cast(tcase.v, tcase.typ) + if !vterrors.Equals(err, tcase.err) { + t.Errorf("Cast(%v) error: %v, want %v", tcase.v, vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err != nil { + continue + } + + if !reflect.DeepEqual(got, tcase.out) { + t.Errorf("Cast(%v): %v, want %v", tcase.v, got, tcase.out) + } + } +} + +func TestToUint64(t *testing.T) { + tcases := []struct { + v sqltypes.Value + out uint64 + err error + }{{ + v: sqltypes.TestValue(querypb.Type_VARCHAR, "abcd"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: 'abcd'"), + }, { + v: sqltypes.NewInt64(-1), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "negative number cannot be converted to unsigned: -1"), + }, { + v: sqltypes.NewInt64(1), + out: 1, + }, { + v: sqltypes.NewUint64(1), + out: 1, + }} + for _, tcase := range tcases { + got, err := ToUint64(tcase.v) + if !vterrors.Equals(err, tcase.err) { + t.Errorf("ToUint64(%v) error: %v, want %v", tcase.v, vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err != nil { + continue + } + + if got != tcase.out { + t.Errorf("ToUint64(%v): %v, want %v", tcase.v, got, tcase.out) + } + } +} + +func TestToInt64(t *testing.T) { + tcases := []struct { + v sqltypes.Value + out int64 + err error + }{{ + v: sqltypes.TestValue(querypb.Type_VARCHAR, "abcd"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: 'abcd'"), + }, { + v: sqltypes.NewUint64(18446744073709551615), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unsigned number overflows int64 value: 18446744073709551615"), + }, { + v: sqltypes.NewInt64(1), + out: 1, + }, { + v: sqltypes.NewUint64(1), + out: 1, + }} + for _, tcase := range tcases { + got, err := ToInt64(tcase.v) + if !vterrors.Equals(err, tcase.err) { + t.Errorf("ToInt64(%v) error: %v, want %v", tcase.v, vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err != nil { + continue + } + + if got != tcase.out { + t.Errorf("ToInt64(%v): %v, want %v", tcase.v, got, tcase.out) + } + } +} + +func TestToFloat64(t *testing.T) { + tcases := []struct { + v sqltypes.Value + out float64 + err error + }{{ + v: sqltypes.TestValue(querypb.Type_VARCHAR, "abcd"), + out: 0, + }, { + v: sqltypes.TestValue(querypb.Type_VARCHAR, "1.2"), + out: 1.2, + }, { + v: sqltypes.NewInt64(1), + out: 1, + }, { + v: sqltypes.NewUint64(1), + out: 1, + }, { + v: sqltypes.NewFloat64(1.2), + out: 1.2, + }, { + v: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + }} + for _, tcase := range tcases { + t.Run(tcase.v.String(), func(t *testing.T) { + got, err := ToFloat64(tcase.v) + if tcase.err != nil { + require.EqualError(t, err, tcase.err.Error()) + } else { + require.Equal(t, tcase.out, got) + } + }) + } +} + +func TestToNative(t *testing.T) { + testcases := []struct { + in sqltypes.Value + out interface{} + }{{ + in: sqltypes.NULL, + out: nil, + }, { + in: sqltypes.TestValue(querypb.Type_INT8, "1"), + out: int64(1), + }, { + in: sqltypes.TestValue(querypb.Type_INT16, "1"), + out: int64(1), + }, { + in: sqltypes.TestValue(querypb.Type_INT24, "1"), + out: int64(1), + }, { + in: sqltypes.TestValue(querypb.Type_INT32, "1"), + out: int64(1), + }, { + in: sqltypes.TestValue(querypb.Type_INT64, "1"), + out: int64(1), + }, { + in: sqltypes.TestValue(querypb.Type_UINT8, "1"), + out: uint64(1), + }, { + in: sqltypes.TestValue(querypb.Type_UINT16, "1"), + out: uint64(1), + }, { + in: sqltypes.TestValue(querypb.Type_UINT24, "1"), + out: uint64(1), + }, { + in: sqltypes.TestValue(querypb.Type_UINT32, "1"), + out: uint64(1), + }, { + in: sqltypes.TestValue(querypb.Type_UINT64, "1"), + out: uint64(1), + }, { + in: sqltypes.TestValue(querypb.Type_FLOAT32, "1"), + out: float64(1), + }, { + in: sqltypes.TestValue(querypb.Type_FLOAT64, "1"), + out: float64(1), + }, { + in: sqltypes.TestValue(querypb.Type_TIMESTAMP, "2012-02-24 23:19:43"), + out: []byte("2012-02-24 23:19:43"), + }, { + in: sqltypes.TestValue(querypb.Type_DATE, "2012-02-24"), + out: []byte("2012-02-24"), + }, { + in: sqltypes.TestValue(querypb.Type_TIME, "23:19:43"), + out: []byte("23:19:43"), + }, { + in: sqltypes.TestValue(querypb.Type_DATETIME, "2012-02-24 23:19:43"), + out: []byte("2012-02-24 23:19:43"), + }, { + in: sqltypes.TestValue(querypb.Type_YEAR, "1"), + out: uint64(1), + }, { + in: sqltypes.TestValue(querypb.Type_DECIMAL, "1"), + out: []byte("1"), + }, { + in: sqltypes.TestValue(querypb.Type_TEXT, "a"), + out: []byte("a"), + }, { + in: sqltypes.TestValue(querypb.Type_BLOB, "a"), + out: []byte("a"), + }, { + in: sqltypes.TestValue(querypb.Type_VARCHAR, "a"), + out: []byte("a"), + }, { + in: sqltypes.TestValue(querypb.Type_VARBINARY, "a"), + out: []byte("a"), + }, { + in: sqltypes.TestValue(querypb.Type_CHAR, "a"), + out: []byte("a"), + }, { + in: sqltypes.TestValue(querypb.Type_BINARY, "a"), + out: []byte("a"), + }, { + in: sqltypes.TestValue(querypb.Type_BIT, "1"), + out: []byte("1"), + }, { + in: sqltypes.TestValue(querypb.Type_ENUM, "a"), + out: []byte("a"), + }, { + in: sqltypes.TestValue(querypb.Type_SET, "a"), + out: []byte("a"), + }} + for _, tcase := range testcases { + v, err := ToNative(tcase.in) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(v, tcase.out) { + t.Errorf("%v.ToNative = %#v, want %#v", tcase.in, v, tcase.out) + } + } + + // Test Expression failure. + _, err := ToNative(sqltypes.TestValue(querypb.Type_EXPRESSION, "aa")) + want := vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "EXPRESSION(aa) cannot be converted to a go type") + if !vterrors.Equals(err, want) { + t.Errorf("ToNative(EXPRESSION): %v, want %v", vterrors.Print(err), vterrors.Print(want)) + } +} + +var mustMatch = utils.MustMatchFn( + []interface{}{ // types with unexported fields + evalResult{}, + }, + []string{}, // ignored fields +) + +func TestNewNumeric(t *testing.T) { + tcases := []struct { + v sqltypes.Value + out evalResult + err error + }{{ + v: sqltypes.NewInt64(1), + out: evalResult{typ: querypb.Type_INT64, ival: 1}, + }, { + v: sqltypes.NewUint64(1), + out: evalResult{typ: querypb.Type_UINT64, uval: 1}, + }, { + v: sqltypes.NewFloat64(1), + out: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + }, { + // For non-number type, Int64 is the default. + v: sqltypes.TestValue(querypb.Type_VARCHAR, "1"), + out: evalResult{typ: querypb.Type_INT64, ival: 1}, + }, { + // If Int64 can't work, we use Float64. + v: sqltypes.TestValue(querypb.Type_VARCHAR, "1.2"), + out: evalResult{typ: querypb.Type_FLOAT64, fval: 1.2}, + }, { + // Only valid Int64 allowed if type is Int64. + v: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + }, { + // Only valid Uint64 allowed if type is Uint64. + v: sqltypes.TestValue(querypb.Type_UINT64, "1.2"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseUint: parsing \"1.2\": invalid syntax"), + }, { + // Only valid Float64 allowed if type is Float64. + v: sqltypes.TestValue(querypb.Type_FLOAT64, "abcd"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseFloat: parsing \"abcd\": invalid syntax"), + }, { + v: sqltypes.TestValue(querypb.Type_VARCHAR, "abcd"), + out: evalResult{typ: querypb.Type_FLOAT64, fval: 0}, + }} + for _, tcase := range tcases { + got, err := newEvalResult(tcase.v) + if !vterrors.Equals(err, tcase.err) { + t.Errorf("newEvalResult(%s) error: %v, want %v", printValue(tcase.v), vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err == nil { + continue + } + + mustMatch(t, tcase.out, got, "newEvalResult") + } +} + +func TestNewIntegralNumeric(t *testing.T) { + tcases := []struct { + v sqltypes.Value + out evalResult + err error + }{{ + v: sqltypes.NewInt64(1), + out: evalResult{typ: querypb.Type_INT64, ival: 1}, + }, { + v: sqltypes.NewUint64(1), + out: evalResult{typ: querypb.Type_UINT64, uval: 1}, + }, { + v: sqltypes.NewFloat64(1), + out: evalResult{typ: querypb.Type_INT64, ival: 1}, + }, { + // For non-number type, Int64 is the default. + v: sqltypes.TestValue(querypb.Type_VARCHAR, "1"), + out: evalResult{typ: querypb.Type_INT64, ival: 1}, + }, { + // If Int64 can't work, we use Uint64. + v: sqltypes.TestValue(querypb.Type_VARCHAR, "18446744073709551615"), + out: evalResult{typ: querypb.Type_UINT64, uval: 18446744073709551615}, + }, { + // Only valid Int64 allowed if type is Int64. + v: sqltypes.TestValue(querypb.Type_INT64, "1.2"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + }, { + // Only valid Uint64 allowed if type is Uint64. + v: sqltypes.TestValue(querypb.Type_UINT64, "1.2"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseUint: parsing \"1.2\": invalid syntax"), + }, { + v: sqltypes.TestValue(querypb.Type_VARCHAR, "abcd"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: 'abcd'"), + }} + for _, tcase := range tcases { + got, err := newIntegralNumeric(tcase.v) + if err != nil && !vterrors.Equals(err, tcase.err) { + t.Errorf("newIntegralNumeric(%s) error: %v, want %v", printValue(tcase.v), vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err == nil { + continue + } + + mustMatch(t, tcase.out, got, "newIntegralNumeric") + } +} + +func TestAddNumeric(t *testing.T) { + tcases := []struct { + v1, v2 evalResult + out evalResult + err error + }{{ + v1: evalResult{typ: querypb.Type_INT64, ival: 1}, + v2: evalResult{typ: querypb.Type_INT64, ival: 2}, + out: evalResult{typ: querypb.Type_INT64, ival: 3}, + }, { + v1: evalResult{typ: querypb.Type_INT64, ival: 1}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 2}, + out: evalResult{typ: querypb.Type_UINT64, uval: 3}, + }, { + v1: evalResult{typ: querypb.Type_INT64, ival: 1}, + v2: evalResult{typ: querypb.Type_FLOAT64, fval: 2}, + out: evalResult{typ: querypb.Type_FLOAT64, fval: 3}, + }, { + v1: evalResult{typ: querypb.Type_UINT64, uval: 1}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 2}, + out: evalResult{typ: querypb.Type_UINT64, uval: 3}, + }, { + v1: evalResult{typ: querypb.Type_UINT64, uval: 1}, + v2: evalResult{typ: querypb.Type_FLOAT64, fval: 2}, + out: evalResult{typ: querypb.Type_FLOAT64, fval: 3}, + }, { + v1: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + v2: evalResult{typ: querypb.Type_FLOAT64, fval: 2}, + out: evalResult{typ: querypb.Type_FLOAT64, fval: 3}, + }, { + // Int64 overflow. + v1: evalResult{typ: querypb.Type_INT64, ival: 9223372036854775807}, + v2: evalResult{typ: querypb.Type_INT64, ival: 2}, + out: evalResult{typ: querypb.Type_FLOAT64, fval: 9223372036854775809}, + }, { + // Int64 underflow. + v1: evalResult{typ: querypb.Type_INT64, ival: -9223372036854775807}, + v2: evalResult{typ: querypb.Type_INT64, ival: -2}, + out: evalResult{typ: querypb.Type_FLOAT64, fval: -9223372036854775809}, + }, { + v1: evalResult{typ: querypb.Type_INT64, ival: -1}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 2}, + out: evalResult{typ: querypb.Type_FLOAT64, fval: 18446744073709551617}, + }, { + // Uint64 overflow. + v1: evalResult{typ: querypb.Type_UINT64, uval: 18446744073709551615}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 2}, + out: evalResult{typ: querypb.Type_FLOAT64, fval: 18446744073709551617}, + }} + for _, tcase := range tcases { + got := addNumeric(tcase.v1, tcase.v2) + + mustMatch(t, tcase.out, got, "addNumeric") + } +} + +func TestPrioritize(t *testing.T) { + ival := evalResult{typ: querypb.Type_INT64, ival: -1} + uval := evalResult{typ: querypb.Type_UINT64, uval: 1} + fval := evalResult{typ: querypb.Type_FLOAT64, fval: 1.2} + textIntval := evalResult{typ: querypb.Type_VARBINARY, bytes: []byte("-1")} + textFloatval := evalResult{typ: querypb.Type_VARBINARY, bytes: []byte("1.2")} + + tcases := []struct { + v1, v2 evalResult + out1, out2 evalResult + }{{ + v1: ival, + v2: uval, + out1: uval, + out2: ival, + }, { + v1: ival, + v2: fval, + out1: fval, + out2: ival, + }, { + v1: uval, + v2: ival, + out1: uval, + out2: ival, + }, { + v1: uval, + v2: fval, + out1: fval, + out2: uval, + }, { + v1: fval, + v2: ival, + out1: fval, + out2: ival, + }, { + v1: fval, + v2: uval, + out1: fval, + out2: uval, + }, { + v1: textIntval, + v2: ival, + out1: ival, + out2: ival, + }, { + v1: ival, + v2: textFloatval, + out1: fval, + out2: ival, + }} + for _, tcase := range tcases { + t.Run(tcase.v1.Value().String()+" - "+tcase.v2.Value().String(), func(t *testing.T) { + got1, got2 := makeNumericAndprioritize(tcase.v1, tcase.v2) + mustMatch(t, tcase.out1, got1, "makeNumericAndprioritize") + mustMatch(t, tcase.out2, got2, "makeNumericAndprioritize") + }) + } +} + +func TestCastFromNumeric(t *testing.T) { + tcases := []struct { + typ querypb.Type + v evalResult + out sqltypes.Value + err error + }{{ + typ: querypb.Type_INT64, + v: evalResult{typ: querypb.Type_INT64, ival: 1}, + out: sqltypes.NewInt64(1), + }, { + typ: querypb.Type_INT64, + v: evalResult{typ: querypb.Type_UINT64, uval: 1}, + out: sqltypes.NewInt64(1), + }, { + typ: querypb.Type_INT64, + v: evalResult{typ: querypb.Type_FLOAT64, fval: 1.2e-16}, + out: sqltypes.NewInt64(0), + }, { + typ: querypb.Type_UINT64, + v: evalResult{typ: querypb.Type_INT64, ival: 1}, + out: sqltypes.NewUint64(1), + }, { + typ: querypb.Type_UINT64, + v: evalResult{typ: querypb.Type_UINT64, uval: 1}, + out: sqltypes.NewUint64(1), + }, { + typ: querypb.Type_UINT64, + v: evalResult{typ: querypb.Type_FLOAT64, fval: 1.2e-16}, + out: sqltypes.NewUint64(0), + }, { + typ: querypb.Type_FLOAT64, + v: evalResult{typ: querypb.Type_INT64, ival: 1}, + out: sqltypes.TestValue(querypb.Type_FLOAT64, "1"), + }, { + typ: querypb.Type_FLOAT64, + v: evalResult{typ: querypb.Type_UINT64, uval: 1}, + out: sqltypes.TestValue(querypb.Type_FLOAT64, "1"), + }, { + typ: querypb.Type_FLOAT64, + v: evalResult{typ: querypb.Type_FLOAT64, fval: 1.2e-16}, + out: sqltypes.TestValue(querypb.Type_FLOAT64, "1.2e-16"), + }, { + typ: querypb.Type_DECIMAL, + v: evalResult{typ: querypb.Type_INT64, ival: 1}, + out: sqltypes.TestValue(querypb.Type_DECIMAL, "1"), + }, { + typ: querypb.Type_DECIMAL, + v: evalResult{typ: querypb.Type_UINT64, uval: 1}, + out: sqltypes.TestValue(querypb.Type_DECIMAL, "1"), + }, { + // For float, we should not use scientific notation. + typ: querypb.Type_DECIMAL, + v: evalResult{typ: querypb.Type_FLOAT64, fval: 1.2e-16}, + out: sqltypes.TestValue(querypb.Type_DECIMAL, "0.00000000000000012"), + }} + for _, tcase := range tcases { + got := castFromNumeric(tcase.v, tcase.typ) + + if !reflect.DeepEqual(got, tcase.out) { + t.Errorf("castFromNumeric(%v, %v): %v, want %v", tcase.v, tcase.typ, printValue(got), printValue(tcase.out)) + } + } +} + +func TestCompareNumeric(t *testing.T) { + tcases := []struct { + v1, v2 evalResult + out int + }{{ + v1: evalResult{typ: querypb.Type_INT64, ival: 1}, + v2: evalResult{typ: querypb.Type_INT64, ival: 1}, + out: 0, + }, { + v1: evalResult{typ: querypb.Type_INT64, ival: 1}, + v2: evalResult{typ: querypb.Type_INT64, ival: 2}, + out: -1, + }, { + v1: evalResult{typ: querypb.Type_INT64, ival: 2}, + v2: evalResult{typ: querypb.Type_INT64, ival: 1}, + out: 1, + }, { + // Special case. + v1: evalResult{typ: querypb.Type_INT64, ival: -1}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 1}, + out: -1, + }, { + v1: evalResult{typ: querypb.Type_INT64, ival: 1}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 1}, + out: 0, + }, { + v1: evalResult{typ: querypb.Type_INT64, ival: 1}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 2}, + out: -1, + }, { + v1: evalResult{typ: querypb.Type_INT64, ival: 2}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 1}, + out: 1, + }, { + v1: evalResult{typ: querypb.Type_INT64, ival: 1}, + v2: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + out: 0, + }, { + v1: evalResult{typ: querypb.Type_INT64, ival: 1}, + v2: evalResult{typ: querypb.Type_FLOAT64, fval: 2}, + out: -1, + }, { + v1: evalResult{typ: querypb.Type_INT64, ival: 2}, + v2: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + out: 1, + }, { + // Special case. + v1: evalResult{typ: querypb.Type_UINT64, uval: 1}, + v2: evalResult{typ: querypb.Type_INT64, ival: -1}, + out: 1, + }, { + v1: evalResult{typ: querypb.Type_UINT64, uval: 1}, + v2: evalResult{typ: querypb.Type_INT64, ival: 1}, + out: 0, + }, { + v1: evalResult{typ: querypb.Type_UINT64, uval: 1}, + v2: evalResult{typ: querypb.Type_INT64, ival: 2}, + out: -1, + }, { + v1: evalResult{typ: querypb.Type_UINT64, uval: 2}, + v2: evalResult{typ: querypb.Type_INT64, ival: 1}, + out: 1, + }, { + v1: evalResult{typ: querypb.Type_UINT64, uval: 1}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 1}, + out: 0, + }, { + v1: evalResult{typ: querypb.Type_UINT64, uval: 1}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 2}, + out: -1, + }, { + v1: evalResult{typ: querypb.Type_UINT64, uval: 2}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 1}, + out: 1, + }, { + v1: evalResult{typ: querypb.Type_UINT64, uval: 1}, + v2: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + out: 0, + }, { + v1: evalResult{typ: querypb.Type_UINT64, uval: 1}, + v2: evalResult{typ: querypb.Type_FLOAT64, fval: 2}, + out: -1, + }, { + v1: evalResult{typ: querypb.Type_UINT64, uval: 2}, + v2: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + out: 1, + }, { + v1: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + v2: evalResult{typ: querypb.Type_INT64, ival: 1}, + out: 0, + }, { + v1: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + v2: evalResult{typ: querypb.Type_INT64, ival: 2}, + out: -1, + }, { + v1: evalResult{typ: querypb.Type_FLOAT64, fval: 2}, + v2: evalResult{typ: querypb.Type_INT64, ival: 1}, + out: 1, + }, { + v1: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 1}, + out: 0, + }, { + v1: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 2}, + out: -1, + }, { + v1: evalResult{typ: querypb.Type_FLOAT64, fval: 2}, + v2: evalResult{typ: querypb.Type_UINT64, uval: 1}, + out: 1, + }, { + v1: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + v2: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + out: 0, + }, { + v1: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + v2: evalResult{typ: querypb.Type_FLOAT64, fval: 2}, + out: -1, + }, { + v1: evalResult{typ: querypb.Type_FLOAT64, fval: 2}, + v2: evalResult{typ: querypb.Type_FLOAT64, fval: 1}, + out: 1, + }} + for _, tcase := range tcases { + got := compareNumeric(tcase.v1, tcase.v2) + if got != tcase.out { + t.Errorf("equalNumeric(%v, %v): %v, want %v", tcase.v1, tcase.v2, got, tcase.out) + } + } +} + +func TestMin(t *testing.T) { + tcases := []struct { + v1, v2 sqltypes.Value + min sqltypes.Value + err error + }{{ + v1: sqltypes.NULL, + v2: sqltypes.NULL, + min: sqltypes.NULL, + }, { + v1: sqltypes.NewInt64(1), + v2: sqltypes.NULL, + min: sqltypes.NewInt64(1), + }, { + v1: sqltypes.NULL, + v2: sqltypes.NewInt64(1), + min: sqltypes.NewInt64(1), + }, { + v1: sqltypes.NewInt64(1), + v2: sqltypes.NewInt64(2), + min: sqltypes.NewInt64(1), + }, { + v1: sqltypes.NewInt64(2), + v2: sqltypes.NewInt64(1), + min: sqltypes.NewInt64(1), + }, { + v1: sqltypes.NewInt64(1), + v2: sqltypes.NewInt64(1), + min: sqltypes.NewInt64(1), + }, { + v1: sqltypes.TestValue(querypb.Type_VARCHAR, "aa"), + v2: sqltypes.TestValue(querypb.Type_VARCHAR, "aa"), + err: vterrors.New(vtrpcpb.Code_UNKNOWN, "types are not comparable: VARCHAR vs VARCHAR"), + }} + for _, tcase := range tcases { + v, err := Min(tcase.v1, tcase.v2) + if !vterrors.Equals(err, tcase.err) { + t.Errorf("Min error: %v, want %v", vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err != nil { + continue + } + + if !reflect.DeepEqual(v, tcase.min) { + t.Errorf("Min(%v, %v): %v, want %v", tcase.v1, tcase.v2, v, tcase.min) + } + } +} + +func TestMax(t *testing.T) { + tcases := []struct { + v1, v2 sqltypes.Value + max sqltypes.Value + err error + }{{ + v1: sqltypes.NULL, + v2: sqltypes.NULL, + max: sqltypes.NULL, + }, { + v1: sqltypes.NewInt64(1), + v2: sqltypes.NULL, + max: sqltypes.NewInt64(1), + }, { + v1: sqltypes.NULL, + v2: sqltypes.NewInt64(1), + max: sqltypes.NewInt64(1), + }, { + v1: sqltypes.NewInt64(1), + v2: sqltypes.NewInt64(2), + max: sqltypes.NewInt64(2), + }, { + v1: sqltypes.NewInt64(2), + v2: sqltypes.NewInt64(1), + max: sqltypes.NewInt64(2), + }, { + v1: sqltypes.NewInt64(1), + v2: sqltypes.NewInt64(1), + max: sqltypes.NewInt64(1), + }, { + v1: sqltypes.TestValue(querypb.Type_VARCHAR, "aa"), + v2: sqltypes.TestValue(querypb.Type_VARCHAR, "aa"), + err: vterrors.New(vtrpcpb.Code_UNKNOWN, "types are not comparable: VARCHAR vs VARCHAR"), + }} + for _, tcase := range tcases { + v, err := Max(tcase.v1, tcase.v2) + if !vterrors.Equals(err, tcase.err) { + t.Errorf("Max error: %v, want %v", vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err != nil { + continue + } + + if !reflect.DeepEqual(v, tcase.max) { + t.Errorf("Max(%v, %v): %v, want %v", tcase.v1, tcase.v2, v, tcase.max) + } + } +} + +func printValue(v sqltypes.Value) string { + return fmt.Sprintf("%v:%q", v.Type(), v.ToBytes()) +} + +// These benchmarks show that using existing ASCII representations +// for numbers is about 6x slower than using native representations. +// However, 229ns is still a negligible time compared to the cost of +// other operations. The additional complexity of introducing native +// types is currently not worth it. So, we'll stay with the existing +// ASCII representation for now. Using interfaces is more expensive +// than native representation of values. This is probably because +// interfaces also allocate memory, and also perform type assertions. +// Actual benchmark is based on NoNative. So, the numbers are similar. +// Date: 6/4/17 +// Version: go1.8 +// BenchmarkAddActual-8 10000000 263 ns/op +// BenchmarkAddNoNative-8 10000000 228 ns/op +// BenchmarkAddNative-8 50000000 40.0 ns/op +// BenchmarkAddGoInterface-8 30000000 52.4 ns/op +// BenchmarkAddGoNonInterface-8 2000000000 1.00 ns/op +// BenchmarkAddGo-8 2000000000 1.00 ns/op +func BenchmarkAddActual(b *testing.B) { + v1 := sqltypes.MakeTrusted(querypb.Type_INT64, []byte("1")) + v2 := sqltypes.MakeTrusted(querypb.Type_INT64, []byte("12")) + for i := 0; i < b.N; i++ { + v1 = NullsafeAdd(v1, v2, querypb.Type_INT64) + } +} + +func BenchmarkAddNoNative(b *testing.B) { + v1 := sqltypes.MakeTrusted(querypb.Type_INT64, []byte("1")) + v2 := sqltypes.MakeTrusted(querypb.Type_INT64, []byte("12")) + for i := 0; i < b.N; i++ { + iv1, _ := ToInt64(v1) + iv2, _ := ToInt64(v2) + v1 = sqltypes.MakeTrusted(querypb.Type_INT64, strconv.AppendInt(nil, iv1+iv2, 10)) + } +} + +func BenchmarkAddNative(b *testing.B) { + v1 := makeNativeInt64(1) + v2 := makeNativeInt64(12) + for i := 0; i < b.N; i++ { + iv1 := int64(binary.BigEndian.Uint64(v1.Raw())) + iv2 := int64(binary.BigEndian.Uint64(v2.Raw())) + v1 = makeNativeInt64(iv1 + iv2) + } +} + +func makeNativeInt64(v int64) sqltypes.Value { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(v)) + return sqltypes.MakeTrusted(querypb.Type_INT64, buf) +} + +func BenchmarkAddGoInterface(b *testing.B) { + var v1, v2 interface{} + v1 = int64(1) + v2 = int64(2) + for i := 0; i < b.N; i++ { + v1 = v1.(int64) + v2.(int64) + } +} + +func BenchmarkAddGoNonInterface(b *testing.B) { + v1 := evalResult{typ: querypb.Type_INT64, ival: 1} + v2 := evalResult{typ: querypb.Type_INT64, ival: 12} + for i := 0; i < b.N; i++ { + if v1.typ != querypb.Type_INT64 { + b.Error("type assertion failed") + } + if v2.typ != querypb.Type_INT64 { + b.Error("type assertion failed") + } + v1 = evalResult{typ: querypb.Type_INT64, ival: v1.ival + v2.ival} + } +} + +func BenchmarkAddGo(b *testing.B) { + v1 := int64(1) + v2 := int64(2) + for i := 0; i < b.N; i++ { + v1 += v2 + } +} diff --git a/internal/stackql-parser-fork/go/vt/vtgate/evalengine/expressions.go b/internal/stackql-parser-fork/go/vt/vtgate/evalengine/expressions.go new file mode 100644 index 00000000..d369267a --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vtgate/evalengine/expressions.go @@ -0,0 +1,294 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "fmt" + "strconv" + + "github.com/stackql/stackql-parser/go/sqltypes" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" + vtrpcpb "github.com/stackql/stackql-parser/go/vt/proto/vtrpc" + "github.com/stackql/stackql-parser/go/vt/vterrors" +) + +type ( + evalResult struct { + typ querypb.Type + ival int64 + uval uint64 + fval float64 + bytes []byte + } + //ExpressionEnv contains the environment that the expression + //evaluates in, such as the current row and bindvars + ExpressionEnv struct { + BindVars map[string]*querypb.BindVariable + Row []sqltypes.Value + } + + // EvalResult is used so we don't have to expose all parts of the private struct + EvalResult = evalResult + + // Expr is the interface that all evaluating expressions must implement + Expr interface { + Evaluate(env ExpressionEnv) (EvalResult, error) + Type(env ExpressionEnv) querypb.Type + String() string + } + + //BinaryExpr allows binary expressions to not have to evaluate child expressions - this is done by the BinaryOp + BinaryExpr interface { + Evaluate(left, right EvalResult) (EvalResult, error) + Type(left querypb.Type) querypb.Type + String() string + } + + // Expressions + Literal struct{ Val EvalResult } + BindVariable struct{ Key string } + Column struct{ Offset int } + BinaryOp struct { + Expr BinaryExpr + Left, Right Expr + } + + // Binary ops + Addition struct{} + Subtraction struct{} + Multiplication struct{} + Division struct{} +) + +// Value allows for retrieval of the value we expose for public consumption +func (e EvalResult) Value() sqltypes.Value { + return castFromNumeric(e, e.typ) +} + +// NewLiteralInt returns a literal expression +func NewLiteralInt(val []byte) (Expr, error) { + ival, err := strconv.ParseInt(string(val), 10, 64) + if err != nil { + return nil, err + } + return &Literal{evalResult{typ: sqltypes.Int64, ival: ival}}, nil +} + +// NewLiteralFloat returns a literal expression +func NewLiteralFloat(val []byte) (Expr, error) { + fval, err := strconv.ParseFloat(string(val), 64) + if err != nil { + return nil, err + } + return &Literal{evalResult{typ: sqltypes.Float64, fval: fval}}, nil +} + +// NewLiteralFloat returns a literal expression +func NewLiteralString(val []byte) (Expr, error) { + return &Literal{evalResult{typ: sqltypes.VarBinary, bytes: val}}, nil +} + +var _ Expr = (*Literal)(nil) +var _ Expr = (*BindVariable)(nil) +var _ Expr = (*BinaryOp)(nil) +var _ Expr = (*Column)(nil) + +var _ BinaryExpr = (*Addition)(nil) +var _ BinaryExpr = (*Subtraction)(nil) +var _ BinaryExpr = (*Multiplication)(nil) +var _ BinaryExpr = (*Division)(nil) + +// Evaluate implements the Expr interface +func (b *BinaryOp) Evaluate(env ExpressionEnv) (EvalResult, error) { + lVal, err := b.Left.Evaluate(env) + if err != nil { + return EvalResult{}, err + } + rVal, err := b.Right.Evaluate(env) + if err != nil { + return EvalResult{}, err + } + return b.Expr.Evaluate(lVal, rVal) +} + +// Evaluate implements the Expr interface +func (l *Literal) Evaluate(ExpressionEnv) (EvalResult, error) { + return l.Val, nil +} + +// Evaluate implements the Expr interface +func (b *BindVariable) Evaluate(env ExpressionEnv) (EvalResult, error) { + val, ok := env.BindVars[b.Key] + if !ok { + return EvalResult{}, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Bind variable not found") + } + return evaluateByType(val) +} + +// Evaluate implements the Expr interface +func (c *Column) Evaluate(env ExpressionEnv) (EvalResult, error) { + value := env.Row[c.Offset] + numeric, err := newEvalResult(value) + return numeric, err +} + +// Evaluate implements the BinaryOp interface +func (a *Addition) Evaluate(left, right EvalResult) (EvalResult, error) { + return addNumericWithError(left, right) +} + +// Evaluate implements the BinaryOp interface +func (s *Subtraction) Evaluate(left, right EvalResult) (EvalResult, error) { + return subtractNumericWithError(left, right) +} + +// Evaluate implements the BinaryOp interface +func (m *Multiplication) Evaluate(left, right EvalResult) (EvalResult, error) { + return multiplyNumericWithError(left, right) +} + +// Evaluate implements the BinaryOp interface +func (d *Division) Evaluate(left, right EvalResult) (EvalResult, error) { + return divideNumericWithError(left, right) +} + +// Type implements the BinaryExpr interface +func (a *Addition) Type(left querypb.Type) querypb.Type { + return left +} + +// Type implements the BinaryExpr interface +func (m *Multiplication) Type(left querypb.Type) querypb.Type { + return left +} + +// Type implements the BinaryExpr interface +func (d *Division) Type(querypb.Type) querypb.Type { + return sqltypes.Float64 +} + +// Type implements the BinaryExpr interface +func (s *Subtraction) Type(left querypb.Type) querypb.Type { + return left +} + +// Type implements the Expr interface +func (b *BinaryOp) Type(env ExpressionEnv) querypb.Type { + ltype := b.Left.Type(env) + rtype := b.Right.Type(env) + typ := mergeNumericalTypes(ltype, rtype) + return b.Expr.Type(typ) +} + +// Type implements the Expr interface +func (b *BindVariable) Type(env ExpressionEnv) querypb.Type { + e := env.BindVars + return e[b.Key].Type +} + +// Type implements the Expr interface +func (l *Literal) Type(ExpressionEnv) querypb.Type { + return l.Val.typ +} + +// Type implements the Expr interface +func (c *Column) Type(ExpressionEnv) querypb.Type { + return sqltypes.Float64 +} + +// String implements the BinaryExpr interface +func (d *Division) String() string { + return "/" +} + +// String implements the BinaryExpr interface +func (m *Multiplication) String() string { + return "*" +} + +// String implements the BinaryExpr interface +func (s *Subtraction) String() string { + return "-" +} + +// String implements the BinaryExpr interface +func (a *Addition) String() string { + return "+" +} + +// String implements the Expr interface +func (b *BinaryOp) String() string { + return b.Left.String() + " " + b.Expr.String() + " " + b.Right.String() +} + +// String implements the Expr interface +func (b *BindVariable) String() string { + return ":" + b.Key +} + +// String implements the Expr interface +func (l *Literal) String() string { + return l.Val.Value().String() +} + +// String implements the Expr interface +func (c *Column) String() string { + return fmt.Sprintf("[%d]", c.Offset) +} + +func mergeNumericalTypes(ltype, rtype querypb.Type) querypb.Type { + switch ltype { + case sqltypes.Int64: + if rtype == sqltypes.Uint64 || rtype == sqltypes.Float64 { + return rtype + } + case sqltypes.Uint64: + if rtype == sqltypes.Float64 { + return rtype + } + } + return ltype +} + +func evaluateByType(val *querypb.BindVariable) (EvalResult, error) { + switch val.Type { + case sqltypes.Int64: + ival, err := strconv.ParseInt(string(val.Value), 10, 64) + if err != nil { + ival = 0 + } + return evalResult{typ: sqltypes.Int64, ival: ival}, nil + case sqltypes.Uint64: + uval, err := strconv.ParseUint(string(val.Value), 10, 64) + if err != nil { + uval = 0 + } + return evalResult{typ: sqltypes.Uint64, uval: uval}, nil + case sqltypes.Float64: + fval, err := strconv.ParseFloat(string(val.Value), 64) + if err != nil { + fval = 0 + } + return evalResult{typ: sqltypes.Float64, fval: fval}, nil + case sqltypes.VarChar, sqltypes.Text, sqltypes.VarBinary: + return evalResult{typ: sqltypes.VarBinary, bytes: val.Value}, nil + case sqltypes.Null: + return evalResult{typ: sqltypes.Null}, nil + } + return evalResult{}, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Type is not supported: %s", val.Type.String()) +} diff --git a/internal/stackql-parser-fork/go/vt/vtgate/evalengine/expressions_test.go b/internal/stackql-parser-fork/go/vt/vtgate/evalengine/expressions_test.go new file mode 100644 index 00000000..872d9b10 --- /dev/null +++ b/internal/stackql-parser-fork/go/vt/vtgate/evalengine/expressions_test.go @@ -0,0 +1,107 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stackql/stackql-parser/go/sqltypes" + + querypb "github.com/stackql/stackql-parser/go/vt/proto/query" +) + +// more tests in go/sqlparser/expressions_test.go + +func TestBinaryOpTypes(t *testing.T) { + type testcase struct { + l, r, e querypb.Type + } + type ops struct { + op BinaryExpr + testcases []testcase + } + + tests := []ops{ + { + op: &Addition{}, + testcases: []testcase{ + {sqltypes.Int64, sqltypes.Int64, sqltypes.Int64}, + {sqltypes.Uint64, sqltypes.Int64, sqltypes.Uint64}, + {sqltypes.Float64, sqltypes.Int64, sqltypes.Float64}, + {sqltypes.Int64, sqltypes.Uint64, sqltypes.Int64}, + {sqltypes.Uint64, sqltypes.Uint64, sqltypes.Uint64}, + {sqltypes.Float64, sqltypes.Uint64, sqltypes.Float64}, + {sqltypes.Int64, sqltypes.Float64, sqltypes.Int64}, + {sqltypes.Uint64, sqltypes.Float64, sqltypes.Uint64}, + {sqltypes.Float64, sqltypes.Float64, sqltypes.Float64}, + }, + }, { + op: &Subtraction{}, + testcases: []testcase{ + {sqltypes.Int64, sqltypes.Int64, sqltypes.Int64}, + {sqltypes.Uint64, sqltypes.Int64, sqltypes.Uint64}, + {sqltypes.Float64, sqltypes.Int64, sqltypes.Float64}, + {sqltypes.Int64, sqltypes.Uint64, sqltypes.Int64}, + {sqltypes.Uint64, sqltypes.Uint64, sqltypes.Uint64}, + {sqltypes.Float64, sqltypes.Uint64, sqltypes.Float64}, + {sqltypes.Int64, sqltypes.Float64, sqltypes.Int64}, + {sqltypes.Uint64, sqltypes.Float64, sqltypes.Uint64}, + {sqltypes.Float64, sqltypes.Float64, sqltypes.Float64}, + }, + }, { + op: &Multiplication{}, + testcases: []testcase{ + {sqltypes.Int64, sqltypes.Int64, sqltypes.Int64}, + {sqltypes.Uint64, sqltypes.Int64, sqltypes.Uint64}, + {sqltypes.Float64, sqltypes.Int64, sqltypes.Float64}, + {sqltypes.Int64, sqltypes.Uint64, sqltypes.Int64}, + {sqltypes.Uint64, sqltypes.Uint64, sqltypes.Uint64}, + {sqltypes.Float64, sqltypes.Uint64, sqltypes.Float64}, + {sqltypes.Int64, sqltypes.Float64, sqltypes.Int64}, + {sqltypes.Uint64, sqltypes.Float64, sqltypes.Uint64}, + {sqltypes.Float64, sqltypes.Float64, sqltypes.Float64}, + }, + }, { + op: &Division{}, + testcases: []testcase{ + {sqltypes.Int64, sqltypes.Int64, sqltypes.Float64}, + {sqltypes.Uint64, sqltypes.Int64, sqltypes.Float64}, + {sqltypes.Float64, sqltypes.Int64, sqltypes.Float64}, + {sqltypes.Int64, sqltypes.Uint64, sqltypes.Float64}, + {sqltypes.Uint64, sqltypes.Uint64, sqltypes.Float64}, + {sqltypes.Float64, sqltypes.Uint64, sqltypes.Float64}, + {sqltypes.Int64, sqltypes.Float64, sqltypes.Float64}, + {sqltypes.Uint64, sqltypes.Float64, sqltypes.Float64}, + {sqltypes.Float64, sqltypes.Float64, sqltypes.Float64}, + }, + }, + } + + for _, op := range tests { + for _, tc := range op.testcases { + name := fmt.Sprintf("%s %s %s", tc.l.String(), reflect.TypeOf(op.op).String(), tc.r.String()) + t.Run(name, func(t *testing.T) { + result := op.op.Type(tc.l) + assert.Equal(t, tc.e, result) + }) + } + } +} diff --git a/internal/stackql-parser-fork/log/.gitignore b/internal/stackql-parser-fork/log/.gitignore new file mode 100644 index 00000000..c96a04f0 --- /dev/null +++ b/internal/stackql-parser-fork/log/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore \ No newline at end of file diff --git a/internal/stackql-parser-fork/misc/git/commit-msg b/internal/stackql-parser-fork/misc/git/commit-msg new file mode 100755 index 00000000..605d3d02 --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/commit-msg @@ -0,0 +1,20 @@ +#!/bin/bash + +# Runs any hooks in misc/git/commit-msg.*, and exits if any of them fail. +set -e + +# This is necessary because the Emacs extensions don't set GIT_DIR. +if [ -z "$GIT_DIR" ]; then + DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + GIT_DIR="${DIR}/.." +fi + +# This is necessary because the Atom packages don't set GOPATH +if [ -z "$GOPATH" ]; then + GOPATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../../../../.." && pwd ) + export GOPATH +fi + +for hook in $GIT_DIR/../misc/git/commit-msg.*; do + $hook $@ +done diff --git a/internal/stackql-parser-fork/misc/git/commit-msg.bugnumber b/internal/stackql-parser-fork/misc/git/commit-msg.bugnumber new file mode 100755 index 00000000..05d6dada --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/commit-msg.bugnumber @@ -0,0 +1,65 @@ +#!/bin/bash + +# This script is run during "git commit" after the commit message was entered. +# +# If it does not find a BUG= line in this commit or previous new +# commits from this branch, it prompts the user to add one. +# refers to a Google internal bug number. Therefore, the script +# prompts only users who have @google.com in their Git configured email. + +bug_marker="BUG=" + +# Return early if a bug marker is already present: +# a) Check current commit message. +msg_file="$1" +if grep -qE "^${bug_marker}[0-9]{8,}$|\bb\/[0-9]{8,}\b" "$msg_file"; then + exit 0 +fi +# b) Check other commits in the branch as well. +if [[ -n "$(git log --no-merges -E --grep="^${bug_marker}[0-9]{8,}$|\bb\/[0-9]{8,}\b" master..)" ]]; then + exit 0 +fi + +# No bug number found. Ask user to input a bug number. + +# git doesn't give us access to user input, so let's steal it. +exec < /dev/tty +if [[ $? -ne 0 ]]; then + # non-interactive shell (e.g. called from Eclipse). Give up here. + exit 0 +fi + +git_email=$(git config --get user.email) +if [[ "$git_email" != *@google.com ]]; then + # This script applies only to internal developers. + exit 0 +fi + +echo "No ${bug_marker} marker was found in this or previous commits in this branch." +echo +echo "As an internal developer, please always try to add a bug number to your commits." +echo +while [[ -z "$bug" ]]; do + read -r -p 'You can enter a bug number now: [press enter to skip] ' + if [[ -z "$REPLY" ]]; then + bug="skipped" + break + fi + # Example: 28221285 + if [[ "$REPLY" =~ ^[0-9]{8,}$ ]]; then + bug="$REPLY" + break + fi + + echo "You entered an invalid bug number: $REPLY" + echo + echo "Please try again. Do not enter anything to skip this step." +done +if [[ "$bug" == "skipped" ]]; then + exit 0 +fi + +# Add the bug number to the commit message. +bug_marker_line="${bug_marker}$bug" +echo >> "$msg_file" +echo "$bug_marker_line" >> "$msg_file" diff --git a/internal/stackql-parser-fork/misc/git/commit-msg.signoff b/internal/stackql-parser-fork/misc/git/commit-msg.signoff new file mode 100755 index 00000000..cf9e4dc0 --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/commit-msg.signoff @@ -0,0 +1,63 @@ +#!/bin/bash + +# This script is run during "git commit" after the commit message was entered. +# +# If it does not find a Signed-off-by: line in this commit, +# it prints a message about using the -s flag and a link +# to an explanation of the DCO. +# +# If it detects an interactive session, it prompts the user +# to acknowledge signoff now, and adds the line if so. + +msg_file="$1" + +git_email="$(git config --get user.email)" +git_name="$(git config --get user.name)" +signoff="$(grep -E --max-count=1 "^Signed-off-by: " "$msg_file")" + +if [[ "$signoff" =~ Signed-off-by:\ (.*)\ \<(.*)\> ]]; then + if [[ "${BASH_REMATCH[1]}" == "${git_name}" && "${BASH_REMATCH[2]}" == "${git_email}" ]]; then + # Everything checks out! + exit 0 + fi +fi + +# No signoff found, or the email doesn't match. Print some instructions. +echo +echo "===================================================================" +echo "No 'Signed-off-by:' line was found, or it didn't match the" +echo "expected author: ${git_name} <${git_email}>" +echo +echo "This project uses a Developer Certificate of Origin" +echo "instead of a Contributor License Agreement." +echo "For more information, see: https://wiki.linuxfoundation.org/dco" +echo +echo "Please certify each contribution meets the requirements in the" +echo "'DCO' file in the root of this repository by committing with" +echo "the --signoff flag (or the short form: -s):" +echo +echo " git commit --signoff" + +# git doesn't give us access to user input, so let's steal it. +exec < /dev/tty +if [[ $? -ne 0 ]]; then + # non-interactive shell (e.g. called from Eclipse). Give up here. + exit 1 +fi + +# Offer to add the signoff line. +signoff="Signed-off-by: ${git_name} <${git_email}>" +echo +echo "Alternatively, you can acknowledge your signoff and continue below:" +echo +echo " ${signoff}" +echo +echo -n "Do you want to add the above signoff and continue? [y/N] " +read reply + +if [[ "${reply}" != "y" ]]; then + exit 1 +fi + +echo >> "${msg_file}" +echo "${signoff}" >> "${msg_file}" diff --git a/internal/stackql-parser-fork/misc/git/hooks/checkstyle b/internal/stackql-parser-fork/misc/git/hooks/checkstyle new file mode 100755 index 00000000..777dd512 --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/hooks/checkstyle @@ -0,0 +1,44 @@ +#!/bin/bash + +set -e + +function get_module() { + local path=$1; + while true; do + path=$(dirname $path); + if [ -f "$path/pom.xml" ]; then + echo "$path"; + return; + elif [[ "./" =~ "$path" ]]; then + return; + fi + done +} + +cd java; + +modules=(); + +for file in $(git diff --relative --name-only --cached \*.java); do + module=$(get_module "$file"); + if [ "" != "$module" ] \ + && [[ ! " ${modules[@]} " =~ " $module " ]]; then + modules+=("$module"); + fi +done; + +if [ ${#modules[@]} -eq 0 ]; then + exit; +fi + +modules_arg=$(printf ",%s" "${modules[@]}"); +modules_arg=${modules_arg:1}; + +export MAVEN_OPTS="-client + -XX:+TieredCompilation + -XX:TieredStopAtLevel=1 + -Xverify:none"; + +mvn -q -pl "$modules_arg" checkstyle:check; + +cd -; diff --git a/internal/stackql-parser-fork/misc/git/hooks/gofmt b/internal/stackql-parser-fork/misc/git/hooks/gofmt new file mode 100755 index 00000000..c1c56cd1 --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/hooks/gofmt @@ -0,0 +1,63 @@ +#!/bin/bash +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# git gofmt pre-commit hook +# +# To use, store as .git/hooks/pre-commit inside your repository and make sure +# it has execute permissions. +# +# This script does not handle file names that contain spaces. +gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '^go/.*\.go$') + +[ -z "$gofiles" ] && exit 0 +unformatted=$(gofmt -s -l $gofiles 2>&1) +[ -z "$unformatted" ] && exit 0 + +# Some files are not gofmt'd. Print command to fix them and fail. + +# Deduplicate files first in case a file has multiple errors. +files=$( + # Split the "gofmt" output on newlines only. + OLDIFS=$IFS + IFS=' +' + for line in $unformatted; do + # Strip everything after the first ':', including it. + # Example output for $line: + # go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go:241:60: expected ';', found 'IDENT' wg + echo ${line/:*/} + done | + # Remove duplicates. + sort -u + IFS=$OLDIFS +) + +echo >&2 +echo >&2 "Go files must be formatted with gofmt. Please run:" +echo >&2 +echo >&2 -n " gofmt -s -w" + +for f in $files; do + # Print " \" after the "gofmt" above and each filename (except for the last one). + echo >&2 " \\" + echo >&2 -n " $PWD/$f" +done +echo >&2 + +echo >&2 +echo >&2 "If gofmt fails and outputs errors, you have to fix them manually." +echo >&2 + +exit 1 diff --git a/internal/stackql-parser-fork/misc/git/hooks/goimports b/internal/stackql-parser-fork/misc/git/hooks/goimports new file mode 100755 index 00000000..786e7ab5 --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/hooks/goimports @@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# git goimports pre-commit hook +# +# To use, store as .git/hooks/pre-commit inside your repository and make sure +# it has execute permissions. +# +# This script does not handle file names that contain spaces. +gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '^go/.*\.go$') + +[ -z "$gofiles" ] && exit 0 +unformatted=$(goimports -l=true $gofiles 2>&1 | awk -F: '{print $1}') +[ -z "$unformatted" ] && exit 0 + +# Some files are not goimports'd. Print message and fail. + +echo >&2 "Go files must be formatted with goimports. Please run:" +echo >&2 +echo -n >&2 " goimports -w" +for fn in $unformatted; do + echo -n >&2 " $PWD/$fn" +done +echo + +exit 1 diff --git a/internal/stackql-parser-fork/misc/git/hooks/golangci-lint b/internal/stackql-parser-fork/misc/git/hooks/golangci-lint new file mode 100755 index 00000000..1d060ba0 --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/hooks/golangci-lint @@ -0,0 +1,36 @@ +#!/bin/bash +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Unfortunately golangci-lint does not work well on checking just modified files. +# We will enable it for everything here, but with most of the linters disabled. +# See: https://github.com/vitessio/vitess/issues/5503 + +gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '^go/.*\.go$' | grep -v '^go/vt/proto/' | grep -v 'go/vt/sqlparser/sql.go') + +# xargs -n1 because dirname on MacOS does not support multiple arguments. +gopackages=$(echo $gofiles | xargs -n1 dirname | sort -u) + +GOLANGCI_LINT=$(command -v golangci-lint >/dev/null 2>&1) +if [ $? -eq 1 ]; then + echo "Downloading golangci-lint..." + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.27.0 +fi + +#golangci-lint run --disable=ineffassign,unused,gosimple,staticcheck,errcheck,structcheck,varcheck,deadcode +for gopackage in $gopackages +do + echo "Linting $gopackage" + golangci-lint run --disable=errcheck --timeout=10m $gopackage +done diff --git a/internal/stackql-parser-fork/misc/git/hooks/golint b/internal/stackql-parser-fork/misc/git/hooks/golint new file mode 100755 index 00000000..a2a3382a --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/hooks/golint @@ -0,0 +1,79 @@ +#!/bin/bash +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# git golint pre-commit hook +# +# To use, store as .git/hooks/pre-commit inside your repository and make sure +# it has execute permissions. + +if [ -z "$GOPATH" ]; then + echo "ERROR: pre-commit hook for golint: \$GOPATH is empty. Please run 'source dev.env' to set the correct \$GOPATH." + exit 1 +fi + +# This script does not handle file names that contain spaces. +gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '^go/.*\.go$' | grep -v '^go/vt/proto/' | grep -v 'go/vt/sqlparser/sql.go') + +errors= + +# Run on one file at a time because a single invocation of golint +# with multiple files requires the files to all be in one package. +gofiles_with_warnings=() +for gofile in $gofiles +do + errcount=$(go run golang.org/x/lint/golint $gofile | wc -l) + if [ "$errcount" -gt "0" ]; then + errors=YES + echo "$errcount suggestions for:" + echo "go run golang.org/x/lint/golint $gofile" + gofiles_with_warnings+=($gofile) + fi +done + +[ -z "$errors" ] && exit 0 + +# git doesn't give us access to user input, so let's steal it. +exec < /dev/tty +if [[ $? -eq 0 ]]; then + # interactive shell. Prompt the user. + echo + echo "Lint suggestions were found. They're not enforced, but we're pausing" + echo "to let you know before they get clobbered in the scrollback buffer." + echo + read -r -p 'Press enter to cancel, "s" to step through the warnings or type "ack" to continue: ' + if [ "$REPLY" = "ack" ]; then + exit 0 + fi + if [ "$REPLY" = "s" ]; then + first_file="true" + for gofile in "${gofiles_with_warnings[@]}" + do + echo + if [ "$first_file" != "true" ]; then + echo "Press enter to show the warnings for the next file." + read + fi + go run golang.org/x/lint/golint $gofile + first_file="false" + done + fi +else + # non-interactive shell (e.g. called from Eclipse). Just display the errors. + for gofile in "${gofiles_with_warnings[@]}" + do + go run golang.org/x/lint/golint $gofile + done +fi +exit 1 diff --git a/internal/stackql-parser-fork/misc/git/hooks/govet b/internal/stackql-parser-fork/misc/git/hooks/govet new file mode 100755 index 00000000..72951834 --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/hooks/govet @@ -0,0 +1,54 @@ +#!/bin/bash +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# git go vet pre-commit hook +# +# To use, store as .git/hooks/pre-commit inside your repository and make sure +# it has execute permissions. + +if [ -z "$GOPATH" ]; then + echo "ERROR: pre-commit hook for go vet: \$GOPATH is empty. Please run 'source dev.env' to set the correct \$GOPATH." + exit 1 +fi + +# This script does not handle file names that contain spaces. +gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '^go/.*\.go$' | grep -v '^go/vt/proto/' | grep -v 'go/vt/sqlparser/sql.go') +if [ "$gofiles" = "" ]; then + exit 0 +fi + +# xargs -n1 because dirname on MacOS does not support multiple arguments. +gopackages=$(echo $gofiles | xargs -n1 dirname | sort -u) + +errors= + +# If any checks are found to be useless, they can be disabled here. +# See the output of "go doc cmd/vet" for a list of flags. +vetflags="" + +# Run on one package at a time +for gopackage in $gopackages +do + if ! go vet $vetflags "vitess.io/vitess/$gopackage" 2>&1; then + errors=YES + fi +done + +[ -z "$errors" ] && exit 0 + +echo +echo "Please fix the go vet warnings above. To disable certain checks, change vetflags in misc/git/hooks/govet." +exit 1 + diff --git a/internal/stackql-parser-fork/misc/git/hooks/shellcheck b/internal/stackql-parser-fork/misc/git/hooks/shellcheck new file mode 100755 index 00000000..af9d8f8b --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/hooks/shellcheck @@ -0,0 +1,48 @@ +#!/bin/bash +# This file is based on the Go linter precommit hook +# "golint". Therefore, both files are very similar. + +# This script does not handle file names that contain spaces. +shfiles=$(git diff --cached --name-only --diff-filter=ACM | grep '.*\.sh$') +if [ -z "$shfiles" ] ; then + # No .sh files modified. + exit 0 +fi + +if [ -z "$(command -v shellcheck)" ]; then + echo "shellcheck not found, please run: brew or apt-get install shellcheck" + exit 0 +fi + +errors= +for file in $shfiles +do + # The -e SC1090,SC1091 suppressing warnings about trying to find + # files imported with "source foo.sh". We only want to lint + # the files modified as part of this current diff. + errors+=$(shellcheck -e SC1090,SC1091 "$file" 2>&1) +done + +[ -z "$errors" ] && exit 0 + +# git doesn't give us access to user input, so let's steal it. +if exec < /dev/tty; then + # interactive shell. Prompt the user. + echo + echo "shellcheck suggestions were found. They're not enforced, but we're pausing" + echo "to let you know before they get clobbered in the scrollback buffer." + echo + read -r -p 'Press enter to cancel, "s" to show all warnings or type "ack" to continue: ' + if [ "$REPLY" = "ack" ]; then + exit 0 + fi + if [ "$REPLY" = "s" ]; then + echo + echo "$errors" + fi +else + # non-interactive shell (e.g. called from Eclipse). Just display the errors. + echo "$errors" +fi + +exit 1 diff --git a/internal/stackql-parser-fork/misc/git/hooks/staticcheck b/internal/stackql-parser-fork/misc/git/hooks/staticcheck new file mode 100755 index 00000000..f0ea8d5d --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/hooks/staticcheck @@ -0,0 +1,70 @@ +#!/bin/bash + +# git staticcheck pre-commit hook + +if [ -z "$GOPATH" ]; then + echo "ERROR: pre-commit hook for staticcheck: \$GOPATH is empty. Please run 'source dev.env' to set the correct \$GOPATH." + exit 1 +fi + +# This script does not handle file names that contain spaces. +# Exclude auto-generated files (from proto or yacc compile). +gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '^go/.*\.go$' | grep -v '^go/vt/proto/' | grep -v 'go/vt/sqlparser/sql.go') +if [ "$gofiles" = "" ]; then + exit 0 +fi + +# xargs -n1 because dirname on MacOS does not support multiple arguments. +gopackages=$(echo $gofiles | xargs -n1 dirname | sort -u) + +warnings= + +# Run on one package at a time +gopackages_with_warnings=() +for gopackage in $gopackages +do + warningcount="$(go run honnef.co/go/tools/cmd/staticcheck "vitess.io/vitess/$gopackage" | wc -l)" + if [ "$warningcount" -gt "0" ]; then + warnings=YES + echo "$warningcount reports for:" + echo "go run honnef.co/go/tools/cmd/staticcheck vitess.io/vitess/$gopackage" + gopackages_with_warnings+=($gopackage) + fi +done + +[ -z "$warnings" ] && exit 0 + +# git doesn't give us access to user input, so let's steal it. +exec < /dev/tty +if [[ $? -eq 0 ]]; then + # interactive shell. Prompt the user. + echo + echo "Suggestions from the go 'staticcheck' program were found." + echo "They're not enforced, but we're pausing to let you know" + echo "before they get clobbered in the scrollback buffer." + echo + read -r -p 'Press enter to cancel, "s" to step through the warnings or type "ack" to continue: ' + if [ "$REPLY" = "ack" ]; then + exit 0 + fi + if [ "$REPLY" = "s" ]; then + first_file="true" + for gopackage in "${gopackages_with_warnings[@]}" + do + echo + if [ "$first_file" != "true" ]; then + echo "Press enter to show the warnings for the next file." + read + fi + go run honnef.co/go/tools/cmd/staticcheck "vitess.io/vitess/$gopackage" + first_file="false" + done + fi +else + # non-interactive shell (e.g. called from Eclipse). Just display the warnings. + for gopackage in "${gopackages_with_warnings[@]}" + do + go run honnef.co/go/tools/cmd/staticcheck "vitess.io/vitess/$gopackage" + done +fi +exit 1 diff --git a/internal/stackql-parser-fork/misc/git/hooks/tslint b/internal/stackql-parser-fork/misc/git/hooks/tslint new file mode 100755 index 00000000..2256ce28 --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/hooks/tslint @@ -0,0 +1,56 @@ +#!/bin/bash +# +# Precommit hook which runs 'tslint' to lint TypeScript code. +# +# It gets only triggered when a file below $vtctld_web_src was changed. +vtctld_web="web/vtctld2" + +git diff --cached --name-only --diff-filter=ACM | grep -q "^${vtctld_web}/src" +if [ $? -ne 0 ]; then + # No potential TypeScript file changed. Return early. + exit 0 +fi + +if [ -z "$(which tslint)" ]; then + echo "tslint not found, please run: npm install -g tslint typescript" + exit 1 +fi + +cd $vtctld_web +if [[ $? != 0 ]]; then + echo "Failed to change to the vtctld web directory ($vtctld_web)" + exit 1 +fi + +# Check for lint errors. +# Suppress npm logs to avoid that it creates a npm-debug.log file in $CWD. +errors=$(npm --loglevel=silent run lint 2>&1) +if [ $? -eq 0 ]; then + # No lint errors. Return early. + exit 0 +fi + +# Ask the user how to proceed. + +# git doesn't give us access to user input, so let's steal it. +exec < /dev/tty +if [[ $? -eq 0 ]]; then + # interactive shell. Prompt the user. + echo + echo "tslint suggestions were found. They're not enforced, but we're pausing" + echo "to let you know before they get clobbered in the scrollback buffer." + echo + read -r -p 'Press enter to cancel, "s" to show all warnings or type "ack" to continue: ' + if [ "$REPLY" = "ack" ]; then + exit 0 + fi + if [ "$REPLY" = "s" ]; then + echo + echo "$errors" + fi +else + # non-interactive shell (e.g. called from Eclipse). Just display the errors. + echo "$errors" +fi + +exit 1 diff --git a/internal/stackql-parser-fork/misc/git/hooks/visitorgen b/internal/stackql-parser-fork/misc/git/hooks/visitorgen new file mode 100755 index 00000000..65c04d61 --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/hooks/visitorgen @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# this script, which should run before committing code, makes sure that the visitor is re-generated when the ast changes + +go run ./go/vt/sqlparser/visitorgen/main -compareOnly=true -input=go/vt/sqlparser/ast.go -output=go/vt/sqlparser/rewriter.go \ No newline at end of file diff --git a/internal/stackql-parser-fork/misc/git/pre-commit b/internal/stackql-parser-fork/misc/git/pre-commit new file mode 100755 index 00000000..84480d8c --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/pre-commit @@ -0,0 +1,20 @@ +#!/bin/bash + +# Runs any hooks in misc/git/hooks, and exits if any of them fail. +set -e + +# This is necessary because the Emacs extensions don't set GIT_DIR. +if [ -z "$GIT_DIR" ]; then + DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + GIT_DIR="${DIR}/.." +fi + +# This is necessary because the Atom packages don't set GOPATH +if [ -z "$GOPATH" ]; then + GOPATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../../../../.." && pwd ) + export GOPATH +fi + +for hook in $GIT_DIR/../misc/git/hooks/*; do + $hook +done diff --git a/internal/stackql-parser-fork/misc/git/prepare-commit-msg.bugnumber b/internal/stackql-parser-fork/misc/git/prepare-commit-msg.bugnumber new file mode 100755 index 00000000..dc73f2b7 --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/prepare-commit-msg.bugnumber @@ -0,0 +1,44 @@ +#!/bin/bash + +# This script is run during "git commit" before the commit message editor +# is shown. +# +# It automatically adds a BUG= line to the default commit +# message if the branch name starts with "b". +# Note that refers to a Google internal bug number. + +branch="$(git rev-parse --abbrev-ref HEAD)" +# Examples: 28221285, b28221285, 28221285_feature, b28221285_feature +if [[ "$branch" =~ ^b?([0-9]{8,}) ]]; then + bug=${BASH_REMATCH[1]} +fi + +if [[ -z "$bug" ]]; then + # No bug found in branch name. Exit early. + exit 0 +fi + +bug_marker_line="BUG=$bug" +bug_url="b/${bug}" + +# Check current commit message (e.g. in case of an --amend). +msg_file="$1" +if grep -q "$bug_marker_line" "$msg_file"; then + exit 0 +fi + +# Check other commits in the branch as well. +if [[ -n "$(git log --no-merges -E --grep="^$bug_marker_line$|$bug_url" master..)" ]]; then + echo "Note: Bug number found in branch name ($bug) but not adding it to this commit message because previous commits already include it." + exit 0 +fi + +# Add the bug number to the commit message. +type="$2" +# TODO(mberlin): React on other types as well? +# https://git-scm.com/docs/githooks lists these types: +# template, merge, squash +if [[ -z "$type" || "$type" == "commit" || "$type" == "message" ]]; then + echo >> "$msg_file" + echo "$bug_marker_line" >> "$msg_file" +fi diff --git a/internal/stackql-parser-fork/misc/git/ps1 b/internal/stackql-parser-fork/misc/git/ps1 new file mode 100755 index 00000000..2ef136f8 --- /dev/null +++ b/internal/stackql-parser-fork/misc/git/ps1 @@ -0,0 +1,12 @@ +# Source this file in your shell to get the current git branch name in +# the prompt. + +# parse_git_branch echoes the name of the current branch followed by a +# space (if its not nil). +function parse_git_branch { + branch=$(git branch --no-color 2> /dev/null |grep '*'|awk '{print $2}') + if [ ! -z $branch ]; then + echo " $branch" + fi +} +PS1='\A [\j] (\u \[\e[1;34m\]\h\[\e[m\]):\w\[\e[0;31m\]$(parse_git_branch)\[\e[m\]\$ ' \ No newline at end of file diff --git a/internal/stackql-parser-fork/misc/gofmt-all b/internal/stackql-parser-fork/misc/gofmt-all new file mode 100755 index 00000000..7106ab3b --- /dev/null +++ b/internal/stackql-parser-fork/misc/gofmt-all @@ -0,0 +1,3 @@ +#!/bin/bash + +find . -name '*.go' -exec gofmt -s -w {} \; diff --git a/internal/stackql-parser-fork/misc/parse_cover.py b/internal/stackql-parser-fork/misc/parse_cover.py new file mode 100755 index 00000000..f028c0e3 --- /dev/null +++ b/internal/stackql-parser-fork/misc/parse_cover.py @@ -0,0 +1,45 @@ +#!/usr/bin/python + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# this is a small helper script to parse test coverage and display stats. +import re +import sys + +coverage_pattern = re.compile(r"coverage: (\d+).(\d+)% of statements") + +no_test_file_count = 0 +coverage_count = 0 +coverage_sum = 0.0 + +for line in sys.stdin: + print line, + sys.stdout.flush + + if line.find('[no test files]') != -1: + no_test_file_count += 1 + continue + + m = coverage_pattern.search(line) + if m != None: + coverage_count += 1 + coverage_sum += float(m.group(1) + "." + m.group(2)) + continue + +directories_covered = coverage_count * 100 / (no_test_file_count + coverage_count) +average_coverage = coverage_sum / coverage_count + +print "Directory test coverage: %u%%" % directories_covered +print "Average test coverage: %u%%" % int(average_coverage) diff --git a/internal/stackql-parser-fork/proto/README.md b/internal/stackql-parser-fork/proto/README.md new file mode 100644 index 00000000..50c62dbe --- /dev/null +++ b/internal/stackql-parser-fork/proto/README.md @@ -0,0 +1,54 @@ +# Vitess Protobuf Definitions + +This directory contains all Vitess protobuf definitions. + +Our protobuf messages are both used as wire format (e.g. `query.proto`) and for +storage (e.g. `topodata.proto`). + +RPC messages and service definitions are in separate files (e.g. `vtgate.proto` +and `vtgateservice.proto`) on purpose because our internal deployment does not +use gRPC. + +## Style Guide + +Before creating new messages or services, please make yourself familiar with the +style of the existing definitions first. + +Additionally, new definitions must adhere to the Google Cloud API Design Guide: +https://cloud.google.com/apis/design/ + +### Comments + +We are more strict than the Design Guide on the format for comments. Similar to +comments for Go types or fields, protobuf comments must start with the name. +For example: +```protobuf +// TabletAlias is a globally unique tablet identifier. +message TabletAlias { + // cell is the cell (or datacenter) the tablet is in. + string cell = 1; + ... +} +``` + +Note that the [Design Guide also has the following ask](https://cloud.google.com/apis/design/documentation#field_and_parameter_descriptions): + +> If the field value is required, input only, output only, it must be documented +> at the start of the field description. By default, all fields and parameters +> are optional. + +Here's an example which combines this ask with our stricter comments style: + +```protobuf +// ExecuteKeyspaceIdsRequest is the payload to ExecuteKeyspaceIds. +message ExecuteKeyspaceIdsRequest { + ... + // Required. keyspace to target the query to. + string keyspace = 4; + ... +} +``` + +Note that most of our existing files (as of March 2017) do not have e.g. +`"Required."` comments. Nonetheless, new files should follow this where +applicable. diff --git a/internal/stackql-parser-fork/proto/automation.proto b/internal/stackql-parser-fork/proto/automation.proto new file mode 100644 index 00000000..17e2aac2 --- /dev/null +++ b/internal/stackql-parser-fork/proto/automation.proto @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Protobuf data structures for the automation framework. + +// Messages (e.g. Task) are used both for checkpoint data and API access +// (e.g. retrieving the current status of a pending cluster operation). + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/automation"; + +package automation; + +enum ClusterOperationState { + UNKNOWN_CLUSTER_OPERATION_STATE = 0; + CLUSTER_OPERATION_NOT_STARTED = 1; + CLUSTER_OPERATION_RUNNING = 2; + CLUSTER_OPERATION_DONE = 3; +} + +message ClusterOperation { + string id = 1; + // TaskContainer are processed sequentially, one at a time. + repeated TaskContainer serial_tasks = 2; + // Cached value. This has to be re-evaluated e.g. after a checkpoint load because running tasks may have already finished. + ClusterOperationState state = 3; + // Error of the first task which failed. Set after state advanced to CLUSTER_OPERATION_DONE. If empty, all tasks succeeded. Cached value, see state above. + string error = 4; +} + +// TaskContainer holds one or more task which may be executed in parallel. +// "concurrency", if > 0, limits the amount of concurrently executed tasks. +message TaskContainer { + repeated Task parallel_tasks = 1; + int32 concurrency = 2; +} + +enum TaskState { + UNKNOWN_TASK_STATE = 0; + NOT_STARTED = 1; + RUNNING = 2; + DONE = 3; +} + +// Task represents a specific task which should be automatically executed. +message Task { + // Task specification. + string name = 1; + map parameters = 2; + + // Runtime data. + string id = 3; + TaskState state = 4; + // Set after state advanced to DONE. + string output = 5; + // Set after state advanced to DONE. If empty, the task did succeed. + string error = 6; +} + +message EnqueueClusterOperationRequest { + string name = 1; + map parameters = 2; +} + +message EnqueueClusterOperationResponse { + string id = 1; +} + +message GetClusterOperationStateRequest { + string id = 1; +} + +message GetClusterOperationStateResponse { + ClusterOperationState state = 1; +} + +message GetClusterOperationDetailsRequest { + string id = 1; +} + +message GetClusterOperationDetailsResponse { + // Full snapshot of the execution e.g. including output of each task. + ClusterOperation cluster_op = 2; +} diff --git a/internal/stackql-parser-fork/proto/automationservice.proto b/internal/stackql-parser-fork/proto/automationservice.proto new file mode 100644 index 00000000..bdf7ab02 --- /dev/null +++ b/internal/stackql-parser-fork/proto/automationservice.proto @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Protobuf service for the automation framework. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/automationservice"; + +package automationservice; + +import "automation.proto"; + +service Automation { + // Start a cluster operation. + rpc EnqueueClusterOperation(automation.EnqueueClusterOperationRequest) returns (automation.EnqueueClusterOperationResponse) {}; + + // TODO(mberlin): Polling this is bad. Implement a subscribe mechanism to wait for changes? + // Get all details of an active cluster operation. + rpc GetClusterOperationDetails(automation.GetClusterOperationDetailsRequest) returns (automation.GetClusterOperationDetailsResponse) {}; +} diff --git a/internal/stackql-parser-fork/proto/binlogdata.proto b/internal/stackql-parser-fork/proto/binlogdata.proto new file mode 100644 index 00000000..27888108 --- /dev/null +++ b/internal/stackql-parser-fork/proto/binlogdata.proto @@ -0,0 +1,422 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains all the types and servers necessary to make +// RPC calls to VtTablet for the binlog protocol, used by filtered +// replication only. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/binlogdata"; + +package binlogdata; + +import "vtrpc.proto"; +import "query.proto"; +import "topodata.proto"; + +// Charset is the per-statement charset info from a QUERY_EVENT binlog entry. +message Charset { + // @@session.character_set_client + int32 client = 1; + // @@session.collation_connection + int32 conn = 2; + // @@session.collation_server + int32 server = 3; +} + +// BinlogTransaction describes a transaction inside the binlogs. +// It is streamed by vttablet for filtered replication, used during resharding. +message BinlogTransaction { + message Statement { + enum Category { + BL_UNRECOGNIZED = 0; + BL_BEGIN = 1; + BL_COMMIT = 2; + BL_ROLLBACK = 3; + // BL_DML is deprecated. + BL_DML_DEPRECATED = 4; + BL_DDL = 5; + BL_SET = 6; + BL_INSERT = 7; + BL_UPDATE = 8; + BL_DELETE = 9; + } + + // what type of statement is this? + Category category = 1; + + // charset of this statement, if different from pre-negotiated default. + Charset charset = 2; + + // the sql + bytes sql = 3; + } + + // the statements in this transaction + repeated Statement statements = 1; + + // DEPRECATED (replaced by event_token): the timestamp of the statements. + // int64 timestamp = 2; + reserved 2; + + // DEPRECATED (replaced by event_token): the Transaction ID after + // this statement was applied. + // string transaction_id = 3; + reserved 3; + + // The Event Token for this event. + query.EventToken event_token = 4; +} + +// StreamKeyRangeRequest is the payload to StreamKeyRange +message StreamKeyRangeRequest { + // where to start + string position = 1; + + // what to get + topodata.KeyRange key_range = 2; + + // default charset on the player side + Charset charset = 3; +} + +// StreamKeyRangeResponse is the response from StreamKeyRange +message StreamKeyRangeResponse{ + BinlogTransaction binlog_transaction = 1; +} + +// StreamTablesRequest is the payload to StreamTables +message StreamTablesRequest { + // where to start + string position = 1; + + // what to get + repeated string tables = 2; + + // default charset on the player side + Charset charset = 3; +} + +// StreamTablesResponse is the response from StreamTables +message StreamTablesResponse { + BinlogTransaction binlog_transaction = 1; +} + +// Rule represents one rule in a Filter. +message Rule { + // Match can be a table name or a regular expression. + // If it starts with a '/', it's a regular expression. + // For example, "t" matches a table named "t", whereas + // "/t.*" matches all tables that begin with 't'. + string match = 1; + // Filter: If empty, all columns and rows of the matching tables + // are sent. If it's a keyrange like "-80", only rows that + // match the keyrange are sent. + // If Match is a table name instead of a regular expression, + // the Filter can also be a select expression like this: + // "select * from t", same as an empty Filter, or + // "select * from t where in_keyrange('-80')", same as "-80", or + // "select col1, col2 from t where in_keyrange(col1, 'hash', '-80'), or + // What is allowed in a select expression depends on whether + // it's a vstreamer or vreplication request. For more details, + // please refer to the specific package documentation. + // On the vreplication side, Filter can also accept a special + // "exclude" value, which will cause the matched tables + // to be excluded. + // TODO(sougou): support this on vstreamer side also. + string filter = 2; +} + +// Filter represents a list of ordered rules. The first +// match wins. +message Filter { + repeated Rule rules = 1; + enum FieldEventMode { + ERR_ON_MISMATCH = 0; + BEST_EFFORT = 1; + } + // FieldEventMode specifies the behavior if there is a mismatch + // between the current schema and the fields in the binlog. This + // can happen if the binlog position is before a DDL that would + // cause the fields to change. If vstreamer detects such + // an inconsistency, the behavior depends on the FieldEventMode. + // If the value is ERR_ON_MISMATCH (default), then it errors out. + // If it's BEST_EFFORT, it sends a field event with fake column + // names as "@1", "@2", etc. + FieldEventMode fieldEventMode = 2; +} + +// OnDDLAction lists the possible actions for DDLs. +enum OnDDLAction { + IGNORE = 0; + STOP = 1; + EXEC = 2; + EXEC_IGNORE = 3; +} + +// BinlogSource specifies the source and filter parameters for +// Filtered Replication. KeyRange and Tables are legacy. Filter +// is the new way to specify the filtering rules. +message BinlogSource { + // the source keyspace + string keyspace = 1; + + // the source shard + string shard = 2; + + // the source tablet type + topodata.TabletType tablet_type = 3; + + // KeyRange is set if the request is for a keyrange + topodata.KeyRange key_range = 4; + + // Tables is set if the request is for a list of tables + repeated string tables = 5; + + // Filter is set if we're using the generalized representation + // for the filter. + Filter filter = 6; + + // OnDdl specifies the action to be taken when a DDL is encountered. + OnDDLAction on_ddl = 7; + + // Source is an external mysql. This attribute should be set to the username + // to use in the connection + string external_mysql = 8; + + // StopAfterCopy specifies if vreplication should be stopped + // after copying is done. + bool stop_after_copy = 9; +} + +// VEventType enumerates the event types. Many of these types +// will not be encountered in RBR mode. +enum VEventType { + UNKNOWN = 0; + GTID = 1; + BEGIN = 2; + COMMIT = 3; + ROLLBACK = 4; + DDL = 5; + // INSERT, REPLACE, UPDATE, DELETE and SET will not be seen in RBR mode. + INSERT = 6; + REPLACE = 7; + UPDATE = 8; + DELETE = 9; + SET = 10; + // OTHER is a dummy event. If encountered, the current GTID must be + // recorded by the client to be able to resume. + OTHER = 11; + ROW = 12; + FIELD = 13; + // HEARTBEAT is sent if there is inactivity. If a client does not + // receive events beyond the hearbeat interval, it can assume that it's + // lost connection to the vstreamer. + HEARTBEAT = 14; + // VGTID is generated by VTGate's VStream that combines multiple + // GTIDs. + VGTID = 15; + JOURNAL = 16; + VERSION = 17; + LASTPK = 18; +} + +// RowChange represents one row change. +// If Before is set and not After, it's a delete. +// If After is set and not Before, it's an insert. +// If both are set, it's an update. +message RowChange { + query.Row before = 1; + query.Row after = 2; +} + +// RowEvent represent row events for one table. +message RowEvent { + string table_name = 1; + repeated RowChange row_changes = 2; +} + +// FieldEvent represents the field info for a table. +message FieldEvent { + string table_name = 1; + repeated query.Field fields = 2; +} + +// ShardGtid contains the GTID position for one shard. +// It's used in a request for requesting a starting position. +// It's used in a response to transmit the current position +// of a shard. It's also used in a Journal to indicate the +// list of targets and shard positions to migrate to. +message ShardGtid { + string keyspace = 1; + string shard = 2; + string gtid = 3; + repeated TableLastPK table_p_ks = 4; +} + +// A VGtid is a list of ShardGtids. +message VGtid { + repeated ShardGtid shard_gtids = 1; +} + +// KeyspaceShard represents a keyspace and shard. +message KeyspaceShard { + string keyspace = 1; + string shard = 2; +} + +// MigrationType specifies the type of migration for the Journal. +enum MigrationType { + TABLES = 0; + SHARDS = 1; +} + +// Journal contains the metadata for a journal event. +// The commit of a journal event indicates the point of no return +// for a migration. +message Journal { + // Id represents a unique journal id. + int64 id = 1; + MigrationType migration_type = 2; + // Tables is set if the journal represents a TABLES migration. + repeated string tables = 3; + // LocalPosition is the source position at which the migration happened. + string local_position = 4; + // ShardGtids is the list of targets to which the migration took place. + repeated ShardGtid shard_gtids = 5; + // Participants is the list of source participants for a migration. + // Every participant is expected to have an identical journal entry. + // While streaming, the client must wait for the journal entry to + // be received from all pariticipants, and then replace them with new + // streams specified by ShardGtid. + // If a stream does not have all participants, a consistent migration + // is not possible. + repeated KeyspaceShard participants = 6; + // SourceWorkflows is the list of workflows in the source shard that + // were migrated to the target. If a migration fails after a Journal + // is committed, this information is used to start the target streams + // that were created prior to the creation of the journal. + repeated string source_workflows = 7; +} + +// VEvent represents a vstream event. +// A FieldEvent is sent once for every table, just before +// the first event for that table. The client is expected +// to cache this information and match it against the RowEvent +// which contains the table name. +// A GTID event always precedes a commitable event, which can be +// COMMIT, DDL or OTHER. +// OTHER events are non-material events that have no additional metadata. +message VEvent { + VEventType type = 1; + // Timestamp is the binlog timestamp in seconds. + // The value should be ignored if 0. + int64 timestamp = 2; + // Gtid is set if the event type is GTID. + string gtid = 3; + // Ddl is set if the event type is DDL. + string ddl = 4; + // RowEvent is set if the event type is ROW. + RowEvent row_event = 5; + // FieldEvent is set if the event type is FIELD. + FieldEvent field_event = 6; + // Vgtid is set if the event type is VGTID. + // This event is only generated by VTGate's VStream function. + VGtid vgtid = 7; + // Journal is set if the event type is JOURNAL. + Journal journal = 8; + // Dml is set if the event type is INSERT, REPLACE, UPDATE or DELETE. + string dml = 9; + // CurrentTime specifies the current time when the message was sent. + // This can be used to compenssate for clock skew. + int64 current_time = 20; + // LastPK is the last PK for a table + LastPKEvent last_p_k_event = 21; +} + +message MinimalTable { + string name = 1; + repeated query.Field fields = 2; + repeated int64 p_k_columns = 3; +} + +message MinimalSchema { + repeated MinimalTable tables = 1; +} + +// VStreamRequest is the payload for VStreamer +message VStreamRequest { + vtrpc.CallerID effective_caller_id = 1; + query.VTGateCallerID immediate_caller_id = 2; + query.Target target = 3; + + string position = 4; + Filter filter = 5; + repeated TableLastPK table_last_p_ks = 6; +} + +// VStreamResponse is the response from VStreamer +message VStreamResponse { + repeated VEvent events = 1; +} + +// VStreamRowsRequest is the payload for VStreamRows +message VStreamRowsRequest { + vtrpc.CallerID effective_caller_id = 1; + query.VTGateCallerID immediate_caller_id = 2; + query.Target target = 3; + + string query = 4; + query.QueryResult lastpk = 5; +} + +// VStreamRowsResponse is the response from VStreamRows +message VStreamRowsResponse { + repeated query.Field fields = 1; + repeated query.Field pkfields = 2; + string gtid = 3; + repeated query.Row rows = 4; + query.Row lastpk = 5; +} + +message LastPKEvent { + TableLastPK table_last_p_k = 1; + bool completed = 2; +} + +message TableLastPK { + string table_name = 1; + query.QueryResult lastpk = 3; +} + +// VStreamResultsRequest is the payload for VStreamResults +// The ids match VStreamRows, in case we decide to merge the two. +// The ids match VStreamRows, in case we decide to merge the two. +message VStreamResultsRequest { + vtrpc.CallerID effective_caller_id = 1; + query.VTGateCallerID immediate_caller_id = 2; + query.Target target = 3; + + string query = 4; +} + +// VStreamResultsResponse is the response from VStreamResults +// The ids match VStreamRows, in case we decide to merge the two. +message VStreamResultsResponse { + repeated query.Field fields = 1; + string gtid = 3; + repeated query.Row rows = 4; +} diff --git a/internal/stackql-parser-fork/proto/binlogservice.proto b/internal/stackql-parser-fork/proto/binlogservice.proto new file mode 100644 index 00000000..6d919f52 --- /dev/null +++ b/internal/stackql-parser-fork/proto/binlogservice.proto @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains the UpdateStream service definition, necessary +// to make RPC calls to VtTablet for the binlog protocol, used by +// filtered replication only. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/binlogservice"; + +package binlogservice; + +import "binlogdata.proto"; + +// UpdateStream is the RPC version of binlog.UpdateStream. +service UpdateStream { + // StreamKeyRange returns the binlog transactions related to + // the specified Keyrange. + rpc StreamKeyRange(binlogdata.StreamKeyRangeRequest) returns (stream binlogdata.StreamKeyRangeResponse) {}; + + // StreamTables returns the binlog transactions related to + // the specified Tables. + rpc StreamTables(binlogdata.StreamTablesRequest) returns (stream binlogdata.StreamTablesResponse) {}; +} diff --git a/internal/stackql-parser-fork/proto/logutil.proto b/internal/stackql-parser-fork/proto/logutil.proto new file mode 100644 index 00000000..57186566 --- /dev/null +++ b/internal/stackql-parser-fork/proto/logutil.proto @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package contains the data structures for the logging service. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/logutil"; + +package logutil; + +import "vttime.proto"; + +// Level is the level of the log messages. +enum Level { + // The usual logging levels. + // Should be logged using logging facility. + INFO = 0; + WARNING = 1; + ERROR = 2; + + // For messages that may contains non-logging events. + // Should be logged to console directly. + CONSOLE = 3; +} + +// Event is a single logging event +message Event { + vttime.Time time = 1; + Level level = 2; + string file = 3; + int64 line = 4; + string value = 5; +} diff --git a/internal/stackql-parser-fork/proto/mysqlctl.proto b/internal/stackql-parser-fork/proto/mysqlctl.proto new file mode 100644 index 00000000..274f82e7 --- /dev/null +++ b/internal/stackql-parser-fork/proto/mysqlctl.proto @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains the service definition for making management API +// calls to mysqlctld. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/mysqlctl"; + +package mysqlctl; + +message StartRequest{ + repeated string mysqld_args = 1; +} + +message StartResponse{} + +message ShutdownRequest{ + bool wait_for_mysqld = 1; +} + +message ShutdownResponse{} + +message RunMysqlUpgradeRequest{} + +message RunMysqlUpgradeResponse{} + +message ReinitConfigRequest{} + +message ReinitConfigResponse{} + +message RefreshConfigRequest{} + +message RefreshConfigResponse{} + +// MysqlCtl is the service definition +service MysqlCtl { + rpc Start(StartRequest) returns (StartResponse) {}; + rpc Shutdown(ShutdownRequest) returns (ShutdownResponse) {}; + rpc RunMysqlUpgrade(RunMysqlUpgradeRequest) returns (RunMysqlUpgradeResponse) {}; + rpc ReinitConfig(ReinitConfigRequest) returns (ReinitConfigResponse) {}; + rpc RefreshConfig(RefreshConfigRequest) returns (RefreshConfigResponse) {}; +} diff --git a/internal/stackql-parser-fork/proto/query.proto b/internal/stackql-parser-fork/proto/query.proto new file mode 100644 index 00000000..7ad8ff13 --- /dev/null +++ b/internal/stackql-parser-fork/proto/query.proto @@ -0,0 +1,866 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains all the types necessary to make +// RPC calls to Vttablet. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/query"; + +package query; + +option java_package="io.vitess.proto"; + +import "topodata.proto"; +import "vtrpc.proto"; + +// Target describes what the client expects the tablet is. +// If the tablet does not match, an error is returned. +message Target { + string keyspace = 1; + string shard = 2; + topodata.TabletType tablet_type = 3; + // cell is used for routing queries between vtgate and vttablets. It + // is not used when Target is part of the Session sent by the client. + string cell = 4; +} + +// VTGateCallerID is sent by VTGate to VTTablet to describe the +// caller. If possible, this information is secure. For instance, +// if using unique certificates that guarantee that VTGate->VTTablet +// traffic cannot be spoofed, then VTTablet can trust this information, +// and VTTablet will use it for tablet ACLs, for instance. +// Because of this security guarantee, this is different than the CallerID +// structure, which is not secure at all, because it is provided +// by the Vitess client. +message VTGateCallerID { + string username = 1; + repeated string groups = 2; +} + +// EventToken is a structure that describes a point in time in a +// replication stream on one shard. The most recent known replication +// position can be retrieved from vttablet when executing a query. It +// is also sent with the replication streams from the binlog service. +message EventToken { + // timestamp is the MySQL timestamp of the statements. Seconds since Epoch. + int64 timestamp = 1; + + // The shard name that applied the statements. Note this is not set when + // streaming from a vttablet. It is only used on the client -> vtgate link. + string shard = 2; + + // The position on the replication stream after this statement was applied. + // It is not the transaction ID / GTID, but the position / GTIDSet. + string position = 3; +} + +// Flags sent from the MySQL C API +enum MySqlFlag { + EMPTY = 0; + NOT_NULL_FLAG = 1; + PRI_KEY_FLAG = 2; + UNIQUE_KEY_FLAG = 4; + MULTIPLE_KEY_FLAG = 8; + BLOB_FLAG = 16; + UNSIGNED_FLAG = 32; + ZEROFILL_FLAG = 64; + BINARY_FLAG = 128; + ENUM_FLAG = 256; + AUTO_INCREMENT_FLAG = 512; + TIMESTAMP_FLAG = 1024; + SET_FLAG = 2048; + NO_DEFAULT_VALUE_FLAG = 4096; + ON_UPDATE_NOW_FLAG = 8192; + NUM_FLAG = 32768; + PART_KEY_FLAG = 16384; + GROUP_FLAG = 32768; + UNIQUE_FLAG = 65536; + BINCMP_FLAG = 131072; + + option allow_alias = true; +} + +// Flag allows us to qualify types by their common properties. +enum Flag { + NONE = 0; + ISINTEGRAL = 256; + ISUNSIGNED = 512; + ISFLOAT = 1024; + ISQUOTED = 2048; + ISTEXT = 4096; + ISBINARY = 8192; +} + +// Type defines the various supported data types in bind vars +// and query results. +enum Type { + // NULL_TYPE specifies a NULL type. + NULL_TYPE = 0; + // INT8 specifies a TINYINT type. + // Properties: 1, IsNumber. + INT8 = 257; + // UINT8 specifies a TINYINT UNSIGNED type. + // Properties: 2, IsNumber, IsUnsigned. + UINT8 = 770; + // INT16 specifies a SMALLINT type. + // Properties: 3, IsNumber. + INT16 = 259; + // UINT16 specifies a SMALLINT UNSIGNED type. + // Properties: 4, IsNumber, IsUnsigned. + UINT16 = 772; + // INT24 specifies a MEDIUMINT type. + // Properties: 5, IsNumber. + INT24 = 261; + // UINT24 specifies a MEDIUMINT UNSIGNED type. + // Properties: 6, IsNumber, IsUnsigned. + UINT24 = 774; + // INT32 specifies a INTEGER type. + // Properties: 7, IsNumber. + INT32 = 263; + // UINT32 specifies a INTEGER UNSIGNED type. + // Properties: 8, IsNumber, IsUnsigned. + UINT32 = 776; + // INT64 specifies a BIGINT type. + // Properties: 9, IsNumber. + INT64 = 265; + // UINT64 specifies a BIGINT UNSIGNED type. + // Properties: 10, IsNumber, IsUnsigned. + UINT64 = 778; + // FLOAT32 specifies a FLOAT type. + // Properties: 11, IsFloat. + FLOAT32 = 1035; + // FLOAT64 specifies a DOUBLE or REAL type. + // Properties: 12, IsFloat. + FLOAT64 = 1036; + // TIMESTAMP specifies a TIMESTAMP type. + // Properties: 13, IsQuoted. + TIMESTAMP = 2061; + // DATE specifies a DATE type. + // Properties: 14, IsQuoted. + DATE = 2062; + // TIME specifies a TIME type. + // Properties: 15, IsQuoted. + TIME = 2063; + // DATETIME specifies a DATETIME type. + // Properties: 16, IsQuoted. + DATETIME = 2064; + // YEAR specifies a YEAR type. + // Properties: 17, IsNumber, IsUnsigned. + YEAR = 785; + // DECIMAL specifies a DECIMAL or NUMERIC type. + // Properties: 18, None. + DECIMAL = 18; + // TEXT specifies a TEXT type. + // Properties: 19, IsQuoted, IsText. + TEXT = 6163; + // BLOB specifies a BLOB type. + // Properties: 20, IsQuoted, IsBinary. + BLOB = 10260; + // VARCHAR specifies a VARCHAR type. + // Properties: 21, IsQuoted, IsText. + VARCHAR = 6165; + // VARBINARY specifies a VARBINARY type. + // Properties: 22, IsQuoted, IsBinary. + VARBINARY = 10262; + // CHAR specifies a CHAR type. + // Properties: 23, IsQuoted, IsText. + CHAR = 6167; + // BINARY specifies a BINARY type. + // Properties: 24, IsQuoted, IsBinary. + BINARY = 10264; + // BIT specifies a BIT type. + // Properties: 25, IsQuoted. + BIT = 2073; + // ENUM specifies an ENUM type. + // Properties: 26, IsQuoted. + ENUM = 2074; + // SET specifies a SET type. + // Properties: 27, IsQuoted. + SET = 2075; + // TUPLE specifies a tuple. This cannot + // be returned in a QueryResult, but it can + // be sent as a bind var. + // Properties: 28, None. + TUPLE = 28; + // GEOMETRY specifies a GEOMETRY type. + // Properties: 29, IsQuoted. + GEOMETRY = 2077; + // JSON specifies a JSON type. + // Properties: 30, IsQuoted. + JSON = 2078; + // EXPRESSION specifies a SQL expression. + // This type is for internal use only. + // Properties: 31, None. + EXPRESSION = 31; +} + +// Value represents a typed value. +message Value { + Type type = 1; + bytes value = 2; +} + +// BindVariable represents a single bind variable in a Query. +message BindVariable { + Type type = 1; + bytes value = 2; + // values are set if type is TUPLE. + repeated Value values = 3; +} + +// BoundQuery is a query with its bind variables +message BoundQuery { + // sql is the SQL query to execute + string sql = 1; + + // bind_variables is a map of all bind variables to expand in the query. + // nil values are not allowed. Use NULL_TYPE to express a NULL value. + map bind_variables = 2; +} + +// ExecuteOptions is passed around for all Execute calls. +message ExecuteOptions { + // 1 used to be exclude_field_names, which was replaced by + // IncludedFields enum below + // 2 used to be include_event_token + // 3 used to be compare_event_token + reserved 1, 2, 3; + + enum IncludedFields { + TYPE_AND_NAME = 0; + TYPE_ONLY = 1; + ALL = 2; + } + + // Controls what fields are returned in Field message responses from mysql, i.e. + // field name, table name, etc. This is an optimization for high-QPS queries where + // the client knows what it's getting + IncludedFields included_fields = 4; + + // client_rows_found specifies if rows_affected should return + // rows found instead of rows affected. Behavior is defined + // by MySQL's CLIENT_FOUND_ROWS flag. + bool client_found_rows = 5; + + enum Workload { + UNSPECIFIED = 0; + OLTP = 1; + OLAP = 2; + DBA = 3; + } + + // workload specifies the type of workload: + // OLTP: DMLs allowed, results have row count limit, and + // query timeouts are shorter. + // OLAP: DMLS not allowed, no limit on row count, timeouts + // can be as high as desired. + // DBA: no limit on rowcount or timeout, all queries allowed + // but intended for long DMLs and DDLs. + Workload workload = 6; + + // sql_select_limit sets an implicit limit on all select statements. Since + // vitess also sets a rowcount limit on queries, the smallest value wins. + int64 sql_select_limit = 8; + + enum TransactionIsolation { + DEFAULT = 0; + REPEATABLE_READ = 1; + READ_COMMITTED = 2; + READ_UNCOMMITTED = 3; + SERIALIZABLE = 4; + + // This is not an "official" transaction level but it will do a + // START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY + CONSISTENT_SNAPSHOT_READ_ONLY = 5; + + // This not an "official" transaction level, it will send queries to mysql + // without wrapping them in a transaction + AUTOCOMMIT = 6; + } + + TransactionIsolation transaction_isolation = 9; + + // skip_query_plan_cache specifies if the query plan should be cached by vitess. + // By default all query plans are cached. + bool skip_query_plan_cache = 10; +} + +// Field describes a single column returned by a query +message Field { + // name of the field as returned by mysql C API + string name = 1; + + // vitess-defined type. Conversion function is in sqltypes package. + Type type = 2; + + // Remaining fields from mysql C API. + // These fields are only populated when ExecuteOptions.included_fields + // is set to IncludedFields.ALL. + string table = 3; + string org_table = 4; + string database = 5; + string org_name = 6; + + // column_length is really a uint32. All 32 bits can be used. + uint32 column_length = 7; + + // charset is actually a uint16. Only the lower 16 bits are used. + uint32 charset = 8; + + // decimals is actually a uint8. Only the lower 8 bits are used. + uint32 decimals = 9; + + // flags is actually a uint16. Only the lower 16 bits are used. + uint32 flags = 10; +} + +// Row is a database row. +message Row { + // lengths contains the length of each value in values. + // A length of -1 means that the field is NULL. While + // reading values, you have to accummulate the length + // to know the offset where the next value begins in values. + repeated sint64 lengths = 1; + // values contains a concatenation of all values in the row. + bytes values = 2; +} + +// QueryResult is returned by Execute and ExecuteStream. +// +// As returned by Execute, len(fields) is always equal to len(row) +// (for each row in rows). +// +// As returned by StreamExecute, the first QueryResult has the fields +// set, and subsequent QueryResult have rows set. And as Execute, +// len(QueryResult[0].fields) is always equal to len(row) (for each +// row in rows for each QueryResult in QueryResult[1:]). +message QueryResult { + // This used to be ResultExtras. + reserved 5; + + repeated Field fields = 1; + uint64 rows_affected = 2; + uint64 insert_id = 3; + repeated Row rows = 4; +} + +// QueryWarning is used to convey out of band query execution warnings +// by storing in the vtgate.Session +message QueryWarning { + uint32 code = 1; + string message = 2; +} + +// StreamEvent describes a set of transformations that happened as a +// single transactional unit on a server. It is streamed back by the +// Update Stream calls. +message StreamEvent { + // One individual Statement in a transaction. + message Statement { + // The category of one statement. + enum Category { + Error = 0; + DML = 1; + DDL = 2; + } + Category category = 1; + + // table_name, primary_key_fields and primary_key_values are set for DML. + string table_name = 2; + repeated Field primary_key_fields = 3; + repeated Row primary_key_values = 4; + + // sql is set for all queries. + // FIXME(alainjobart) we may not need it for DMLs. + bytes sql = 5; + } + + // The statements in this transaction. + repeated Statement statements = 1; + + // The Event Token for this event. + EventToken event_token = 2; +} + +// ExecuteRequest is the payload to Execute +message ExecuteRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + BoundQuery query = 4; + int64 transaction_id = 5; + ExecuteOptions options = 6; + int64 reserved_id = 7; +} + +// ExecuteResponse is the returned value from Execute +message ExecuteResponse { + QueryResult result = 1; +} + +// ResultWithError represents a query response +// in the form of result or error but not both. +// TODO: To be used in ExecuteBatchResponse and BeginExecuteBatchResponse. +message ResultWithError { + // error contains an query level error, only set if result is unset. + vtrpc.RPCError error = 1; + + // result contains the query result, only set if error is unset. + query.QueryResult result = 2; +} + +// ExecuteBatchRequest is the payload to ExecuteBatch +message ExecuteBatchRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + repeated BoundQuery queries = 4; + bool as_transaction = 5; + int64 transaction_id = 6; + ExecuteOptions options = 7; +} + +// ExecuteBatchResponse is the returned value from ExecuteBatch +message ExecuteBatchResponse { + repeated QueryResult results = 1; +} + +// StreamExecuteRequest is the payload to StreamExecute +message StreamExecuteRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + BoundQuery query = 4; + ExecuteOptions options = 5; + int64 transaction_id = 6; +} + +// StreamExecuteResponse is the returned value from StreamExecute +message StreamExecuteResponse { + QueryResult result = 1; +} + +// BeginRequest is the payload to Begin +message BeginRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + ExecuteOptions options = 4; +} + +// BeginResponse is the returned value from Begin +message BeginResponse { + int64 transaction_id = 1; + topodata.TabletAlias tablet_alias = 2; +} + +// CommitRequest is the payload to Commit +message CommitRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + int64 transaction_id = 4; +} + +// CommitResponse is the returned value from Commit +message CommitResponse { + int64 reserved_id = 1; +} + +// RollbackRequest is the payload to Rollback +message RollbackRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + int64 transaction_id = 4; +} + +// RollbackResponse is the returned value from Rollback +message RollbackResponse { + int64 reserved_id = 1; +} + +// PrepareRequest is the payload to Prepare +message PrepareRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + int64 transaction_id = 4; + string dtid = 5; +} + +// PrepareResponse is the returned value from Prepare +message PrepareResponse {} + +// CommitPreparedRequest is the payload to CommitPrepared +message CommitPreparedRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + string dtid = 4; +} + +// CommitPreparedResponse is the returned value from CommitPrepared +message CommitPreparedResponse {} + +// RollbackPreparedRequest is the payload to RollbackPrepared +message RollbackPreparedRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + int64 transaction_id = 4; + string dtid = 5; +} + +// RollbackPreparedResponse is the returned value from RollbackPrepared +message RollbackPreparedResponse {} + +// CreateTransactionRequest is the payload to CreateTransaction +message CreateTransactionRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + string dtid = 4; + repeated Target participants = 5; +} + +// CreateTransactionResponse is the returned value from CreateTransaction +message CreateTransactionResponse {} + +// StartCommitRequest is the payload to StartCommit +message StartCommitRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + int64 transaction_id = 4; + string dtid = 5; +} + +// StartCommitResponse is the returned value from StartCommit +message StartCommitResponse {} + +// SetRollbackRequest is the payload to SetRollback +message SetRollbackRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + int64 transaction_id = 4; + string dtid = 5; +} + +// SetRollbackResponse is the returned value from SetRollback +message SetRollbackResponse {} + +// ConcludeTransactionRequest is the payload to ConcludeTransaction +message ConcludeTransactionRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + string dtid = 4; +} + +// ConcludeTransactionResponse is the returned value from ConcludeTransaction +message ConcludeTransactionResponse {} + +// ReadTransactionRequest is the payload to ReadTransaction +message ReadTransactionRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + string dtid = 4; +} + +// ReadTransactionResponse is the returned value from ReadTransaction +message ReadTransactionResponse { + TransactionMetadata metadata = 1; +} + +// BeginExecuteRequest is the payload to BeginExecute +message BeginExecuteRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + BoundQuery query = 4; + ExecuteOptions options = 5; + int64 reserved_id = 6; +} + +// BeginExecuteResponse is the returned value from BeginExecute +message BeginExecuteResponse { + // error contains an application level error if necessary. Note the + // transaction_id may be set, even when an error is returned, if the begin + // worked but the execute failed. + vtrpc.RPCError error = 1; + + QueryResult result = 2; + + // transaction_id might be non-zero even if an error is present. + int64 transaction_id = 3; + topodata.TabletAlias tablet_alias = 4; +} + +// BeginExecuteBatchRequest is the payload to BeginExecuteBatch +message BeginExecuteBatchRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + repeated BoundQuery queries = 4; + bool as_transaction = 5; + ExecuteOptions options = 6; +} + +// BeginExecuteBatchResponse is the returned value from BeginExecuteBatch +message BeginExecuteBatchResponse { + // error contains an application level error if necessary. Note the + // transaction_id may be set, even when an error is returned, if the begin + // worked but the execute failed. + vtrpc.RPCError error = 1; + + repeated QueryResult results = 2; + + // transaction_id might be non-zero even if an error is present. + int64 transaction_id = 3; + topodata.TabletAlias tablet_alias = 4; +} + +// MessageStreamRequest is the request payload for MessageStream. +message MessageStreamRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + // name is the message table name. + string name = 4; +} + +// MessageStreamResponse is a response for MessageStream. +message MessageStreamResponse { + QueryResult result = 1; +} + +// MessageAckRequest is the request payload for MessageAck. +message MessageAckRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + // name is the message table name. + string name = 4; + repeated Value ids = 5; +} + +// MessageAckResponse is the response for MessageAck. +message MessageAckResponse { + // result contains the result of the ack operation. + // Since this acts like a DML, only + // RowsAffected is returned in the result. + QueryResult result = 1; +} + +// ReserveExecuteRequest is the payload to ReserveExecute +message ReserveExecuteRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + BoundQuery query = 4; + int64 transaction_id = 5; + ExecuteOptions options = 6; + repeated string pre_queries = 7; +} + +// ReserveExecuteResponse is the returned value from ReserveExecute +message ReserveExecuteResponse { + vtrpc.RPCError error = 1; + QueryResult result = 2; + + // The following fields might be non-zero even if an error is present. + int64 reserved_id = 3; + topodata.TabletAlias tablet_alias = 4; +} + +// ReserveBeginExecuteRequest is the payload to ReserveBeginExecute +message ReserveBeginExecuteRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + BoundQuery query = 4; + ExecuteOptions options = 5; + repeated string pre_queries = 6; +} + +// ReserveBeginExecuteResponse is the returned value from ReserveBeginExecute +message ReserveBeginExecuteResponse { + // error contains an application level error if necessary. Note the + // transaction_id may be set, even when an error is returned, if the begin + // worked but the execute failed. + vtrpc.RPCError error = 1; + QueryResult result = 2; + // The following fields might be non-zero even if an error is present. + int64 transaction_id = 3; + int64 reserved_id = 4; + topodata.TabletAlias tablet_alias = 5; +} + +// ReleaseRequest is the payload to Release +message ReleaseRequest { + vtrpc.CallerID effective_caller_id = 1; + VTGateCallerID immediate_caller_id = 2; + Target target = 3; + int64 transaction_id = 4; + int64 reserved_id = 5; +} + +// ReleaseResponse is the returned value from Release +message ReleaseResponse { +} + +// StreamHealthRequest is the payload for StreamHealth +message StreamHealthRequest { +} + +// RealtimeStats contains information about the tablet status. +// It is only valid for a single tablet. +message RealtimeStats { + // health_error is the last error we got from health check, + // or empty is the server is healthy. This is used for subset selection, + // we do not send queries to servers that are not healthy. + string health_error = 1; + + // seconds_behind_master is populated for replicas only. It indicates + // how far behind on (MySQL) replication a replica currently is. It is used + // by clients for subset selection (so we don't try to send traffic + // to tablets that are too far behind). + // NOTE: This field must not be evaluated if "health_error" is not empty. + // TODO(mberlin): Let's switch it to int64 instead? + uint32 seconds_behind_master = 2; + + // bin_log_players_count is the number of currently running binlog players. + // if the value is 0, it means that filtered replication is currently not + // running on the tablet. If >0, filtered replication is running. + // NOTE: This field must not be evaluated if "health_error" is not empty. + int32 binlog_players_count = 3; + + // seconds_behind_master_filtered_replication is populated for the receiving + // master of an ongoing filtered replication only. + // It specifies how far the receiving master lags behind the sending master. + // NOTE: This field must not be evaluated if "health_error" is not empty. + // NOTE: This field must not be evaluated if "bin_log_players_count" is 0. + int64 seconds_behind_master_filtered_replication = 4; + + // cpu_usage is used for load-based balancing + double cpu_usage = 5; + + // qps is the average QPS (queries per second) rate in the last XX seconds + // where XX is usually 60 (See query_service_stats.go). + double qps = 6; +} + +// AggregateStats contains information about the health of a group of +// tablets for a Target. It is used to propagate stats from a vtgate +// to another, or from the Gateway layer of a vtgate to the routing +// layer. +message AggregateStats { + // healthy_tablet_count is the number of healthy tablets in the group. + int32 healthy_tablet_count = 1; + + // unhealthy_tablet_count is the number of unhealthy tablets in the group. + int32 unhealthy_tablet_count = 2; + + // seconds_behind_master_min is the minimum of the + // seconds_behind_master values of the healthy tablets. It is unset + // if the tablet type is master. + uint32 seconds_behind_master_min = 3; + + // seconds_behind_master_max is the maximum of the + // seconds_behind_master values of the healthy tablets. It is unset + // if the tablet type is master. + uint32 seconds_behind_master_max = 4; +} + +// StreamHealthResponse is streamed by StreamHealth on a regular basis. +// It is expected to be used between a vtgate and vttablet: +// - target describes the tablet. +// - realtime_stats is set. +// - aggregate_stats is not set (deprecated) +message StreamHealthResponse { + // target is the current server type. Only queries with that exact Target + // record will be accepted (the cell may not match, however). + Target target = 1; + + // serving is true iff the tablet is serving. A tablet may not be serving + // if filtered replication is enabled on a master for instance, + // or if a replica should not be used because the keyspace is being resharded. + bool serving = 2; + + // tablet_externally_reparented_timestamp can be interpreted as the + // last time we knew that this tablet was the MASTER of this shard + // (if StreamHealthResponse describes a group of tablets, between + // two vtgates, only one master will be present in the group, and + // this is this master's value). + // + // It is used by vtgate when determining the current MASTER of a shard. + // If vtgate sees more than one MASTER tablet, this timestamp is used + // as tiebreaker where the MASTER with the highest timestamp wins. + // Another usage of this timestamp is in go/vt/vtgate/buffer to detect the end + // of a reparent (failover) and stop buffering. + // + // In practice, this field is set to: + // a) the last time the RPC tabletmanager.TabletExternallyReparented was + // called on this tablet (usually done by an external failover tool e.g. + // Orchestrator). The failover tool can call this as long as we are the + // master i.e. even ages after the last reparent occurred. + // OR + // b) the last time an active reparent was executed through a vtctl command + // (InitShardMaster, PlannedReparentShard, EmergencyReparentShard) + // OR + // c) the last time vttablet was started and it initialized its tablet type + // as MASTER because it was recorded as the shard's current master in the + // topology (see go/vt/vttablet/tabletmanager/init_tablet.go) + // OR + // d) 0 if the vttablet was never a MASTER. + int64 tablet_externally_reparented_timestamp = 3; + + // realtime_stats contains information about the tablet status. + // It is only filled in if the information is about a tablet. + RealtimeStats realtime_stats = 4; + + reserved 6; + // Deprecated + // AggregateStats constrains information about the group of tablet status. + // It is only filled in if the information is about a group of tablets. + // AggregateStats aggregate_stats = 6; + + // tablet_alias is the alias of the sending tablet. The discovery/healthcheck.go + // code uses it to verify that it's talking to the correct tablet and that it + // hasn't changed in the meantime e.g. due to tablet restarts where ports or + // ips have been reused but assigned differently. + topodata.TabletAlias tablet_alias = 5; +} + +// TransactionState represents the state of a distributed transaction. +enum TransactionState { + UNKNOWN = 0; + PREPARE = 1; + COMMIT = 2; + ROLLBACK = 3; +} + +// TransactionMetadata contains the metadata for a distributed transaction. +message TransactionMetadata { + string dtid = 1; + TransactionState state = 2; + int64 time_created = 3; + repeated Target participants = 4; +} diff --git a/internal/stackql-parser-fork/proto/queryservice.proto b/internal/stackql-parser-fork/proto/queryservice.proto new file mode 100644 index 00000000..00b260eb --- /dev/null +++ b/internal/stackql-parser-fork/proto/queryservice.proto @@ -0,0 +1,104 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains the service VtTablet exposes for queries. + +syntax = "proto3"; + +package queryservice; +option go_package = "vitess.io/vitess/go/vt/proto/queryservice"; + +import "query.proto"; +import "binlogdata.proto"; + +// Query defines the tablet query service, implemented by vttablet. +service Query { + // Execute executes the specified SQL query (might be in a + // transaction context, if Query.transaction_id is set). + rpc Execute(query.ExecuteRequest) returns (query.ExecuteResponse) {}; + + // ExecuteBatch executes a list of queries, and returns the result + // for each query. + rpc ExecuteBatch(query.ExecuteBatchRequest) returns (query.ExecuteBatchResponse) {}; + + // StreamExecute executes a streaming query. Use this method if the + // query returns a large number of rows. The first QueryResult will + // contain the Fields, subsequent QueryResult messages will contain + // the rows. + rpc StreamExecute(query.StreamExecuteRequest) returns (stream query.StreamExecuteResponse) {}; + + // Begin a transaction. + rpc Begin(query.BeginRequest) returns (query.BeginResponse) {}; + + // Commit a transaction. + rpc Commit(query.CommitRequest) returns (query.CommitResponse) {}; + + // Rollback a transaction. + rpc Rollback(query.RollbackRequest) returns (query.RollbackResponse) {}; + + // Prepare preares a transaction. + rpc Prepare(query.PrepareRequest) returns (query.PrepareResponse) {}; + + // CommitPrepared commits a prepared transaction. + rpc CommitPrepared(query.CommitPreparedRequest) returns (query.CommitPreparedResponse) {}; + + // RollbackPrepared rolls back a prepared transaction. + rpc RollbackPrepared(query.RollbackPreparedRequest) returns (query.RollbackPreparedResponse) {}; + + // CreateTransaction creates the metadata for a 2pc transaction. + rpc CreateTransaction(query.CreateTransactionRequest) returns (query.CreateTransactionResponse) {}; + + // StartCommit initiates a commit for a 2pc transaction. + rpc StartCommit(query.StartCommitRequest) returns (query.StartCommitResponse) {}; + + // SetRollback marks the 2pc transaction for rollback. + rpc SetRollback(query.SetRollbackRequest) returns (query.SetRollbackResponse) {}; + + // ConcludeTransaction marks the 2pc transaction as resolved. + rpc ConcludeTransaction(query.ConcludeTransactionRequest) returns (query.ConcludeTransactionResponse) {}; + + // ReadTransaction returns the 2pc transaction info. + rpc ReadTransaction(query.ReadTransactionRequest) returns (query.ReadTransactionResponse) {}; + + // BeginExecute executes a begin and the specified SQL query. + rpc BeginExecute(query.BeginExecuteRequest) returns (query.BeginExecuteResponse) {}; + + // BeginExecuteBatch executes a begin and a list of queries. + rpc BeginExecuteBatch(query.BeginExecuteBatchRequest) returns (query.BeginExecuteBatchResponse) {}; + + // MessageStream streams messages from a message table. + rpc MessageStream(query.MessageStreamRequest) returns (stream query.MessageStreamResponse) {}; + + // MessageAck acks messages for a table. + rpc MessageAck(query.MessageAckRequest) returns (query.MessageAckResponse) {}; + + rpc ReserveExecute(query.ReserveExecuteRequest) returns (query.ReserveExecuteResponse) {}; + rpc ReserveBeginExecute(query.ReserveBeginExecuteRequest) returns (query.ReserveBeginExecuteResponse) {}; + rpc Release(query.ReleaseRequest) returns (query.ReleaseResponse) {}; + + // StreamHealth runs a streaming RPC to the tablet, that returns the + // current health of the tablet on a regular basis. + rpc StreamHealth(query.StreamHealthRequest) returns (stream query.StreamHealthResponse) {}; + + // VStream streams vreplication events. + rpc VStream(binlogdata.VStreamRequest) returns (stream binlogdata.VStreamResponse) {}; + + // VStreamRows streams rows from the specified starting point. + rpc VStreamRows(binlogdata.VStreamRowsRequest) returns (stream binlogdata.VStreamRowsResponse) {}; + + // VStreamResults streams results along with the gtid of the snapshot. + rpc VStreamResults(binlogdata.VStreamResultsRequest) returns (stream binlogdata.VStreamResultsResponse) {}; +} diff --git a/internal/stackql-parser-fork/proto/replicationdata.proto b/internal/stackql-parser-fork/proto/replicationdata.proto new file mode 100644 index 00000000..3a13ce4c --- /dev/null +++ b/internal/stackql-parser-fork/proto/replicationdata.proto @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file defines the replication related structures we use. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/replicationdata"; + +package replicationdata; + +// Status is the replication status for MySQL/MariaDB/File-based. Returned by a +// flavor-specific command and parsed into a Position and fields. +message Status { + string position = 1; + bool io_thread_running = 2; + bool sql_thread_running = 3; + uint32 seconds_behind_master = 4; + string master_host = 5; + int32 master_port = 6; + int32 master_connect_retry = 7; + // RelayLogPosition will be empty for flavors that do not support returning the full GTIDSet from the relay log, such as MariaDB. + string relay_log_position = 8; + string file_position = 9; + string file_relay_log_position = 10; + uint32 master_server_id = 11; + string master_uuid = 12; +} diff --git a/internal/stackql-parser-fork/proto/tableacl.proto b/internal/stackql-parser-fork/proto/tableacl.proto new file mode 100644 index 00000000..d041c78f --- /dev/null +++ b/internal/stackql-parser-fork/proto/tableacl.proto @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Table ACL proto definitions. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/tableacl"; + +package tableacl; + +// TableGroupSpec defines ACLs for a group of tables. +message TableGroupSpec { + string name = 1; + // either tables or a table name prefixes (if it ends in a %) + repeated string table_names_or_prefixes = 2; + repeated string readers = 3; + repeated string writers = 4; + repeated string admins = 5; +} + +message Config { + repeated TableGroupSpec table_groups = 1; +} diff --git a/internal/stackql-parser-fork/proto/tabletmanagerdata.proto b/internal/stackql-parser-fork/proto/tabletmanagerdata.proto new file mode 100644 index 00000000..784590ee --- /dev/null +++ b/internal/stackql-parser-fork/proto/tabletmanagerdata.proto @@ -0,0 +1,520 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains all the types and servers necessary to make +// RPC calls to VtTablet for the management API. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/tabletmanagerdata"; + +package tabletmanagerdata; + +import "query.proto"; +import "topodata.proto"; +import "replicationdata.proto"; +import "logutil.proto"; + +// +// Data structures +// + +message TableDefinition { + // the table name + string name = 1; + + // the SQL to run to create the table + string schema = 2; + + // the columns in the order that will be used to dump and load the data + repeated string columns = 3; + + // the primary key columns in the primary key order + repeated string primary_key_columns = 4; + + // type is either mysqlctl.TableBaseTable or mysqlctl.TableView + string type = 5; + + // how much space the data file takes. + uint64 data_length = 6; + + // approximate number of rows + uint64 row_count = 7; + + // column names along with their types. + // NOTE: this is a superset of columns. + repeated query.Field fields = 8; +} + +message SchemaDefinition { + string database_schema = 1; + repeated TableDefinition table_definitions = 2; + string version = 3; +} + +message SchemaChangeResult { + // before_schema holds the schema before each change. + SchemaDefinition before_schema = 1; + // after_schema holds the schema after each change. + SchemaDefinition after_schema = 2; +} + +// UserPermission describes a single row in the mysql.user table +// Primary key is Host+User +// PasswordChecksum is the crc64 of the password, for security reasons +message UserPermission { + string host = 1; + string user = 2; + uint64 password_checksum = 3; + map privileges = 4; +} + +// DbPermission describes a single row in the mysql.db table +// Primary key is Host+Db+User +message DbPermission { + string host = 1; + string db = 2; + string user = 3; + map privileges = 4; +} + +// Permissions have all the rows in mysql.{user,db} tables, +// (all rows are sorted by primary key) +message Permissions { + repeated UserPermission user_permissions = 1; + repeated DbPermission db_permissions = 2; +} + +// +// RPC payloads +// + +message PingRequest { + string payload = 1; +} + +message PingResponse { + string payload = 1; +} + +message SleepRequest { + // duration is in nanoseconds + int64 duration = 1; +} + +message SleepResponse { +} + +message ExecuteHookRequest { + string name = 1; + repeated string parameters = 2; + map extra_env = 3; +} + +message ExecuteHookResponse { + int64 exit_status = 1; + string stdout = 2; + string stderr = 3; +} + +message GetSchemaRequest { + repeated string tables = 1; + bool include_views = 2; + repeated string exclude_tables = 3; +} + +message GetSchemaResponse { + SchemaDefinition schema_definition = 1; +} + +message GetPermissionsRequest { +} + +message GetPermissionsResponse { + Permissions permissions = 1; +} + +message SetReadOnlyRequest { +} + +message SetReadOnlyResponse { +} + +message SetReadWriteRequest { +} + +message SetReadWriteResponse { +} + +message ChangeTypeRequest { + topodata.TabletType tablet_type = 1; +} + +message ChangeTypeResponse { +} + +message RefreshStateRequest { +} + +message RefreshStateResponse { +} + +message RunHealthCheckRequest { +} + +message RunHealthCheckResponse { +} + +message IgnoreHealthErrorRequest { + string pattern = 1; +} + +message IgnoreHealthErrorResponse { +} + +message ReloadSchemaRequest { + // wait_position allows scheduling a schema reload to occur after a + // given DDL has replicated to this server, by specifying a replication + // position to wait for. Leave empty to trigger the reload immediately. + string wait_position = 1; +} + +message ReloadSchemaResponse { +} + +message PreflightSchemaRequest { + repeated string changes = 1; +} + +message PreflightSchemaResponse { + // change_results has for each change the schema before and after it. + // The number of elements is identical to the length of "changes" in the request. + repeated SchemaChangeResult change_results = 1; +} + +message ApplySchemaRequest { + string sql = 1; + bool force = 2; + bool allow_replication = 3; + SchemaDefinition before_schema = 4; + SchemaDefinition after_schema = 5; +} + +message ApplySchemaResponse { + SchemaDefinition before_schema = 1; + SchemaDefinition after_schema = 2; +} + +message LockTablesRequest { +} + +message LockTablesResponse { +} + +message UnlockTablesRequest { +} + +message UnlockTablesResponse { +} + +message ExecuteFetchAsDbaRequest { + bytes query = 1; + string db_name = 2; + uint64 max_rows = 3; + bool disable_binlogs = 4; + bool reload_schema = 5; +} + +message ExecuteFetchAsDbaResponse { + query.QueryResult result = 1; +} + +message ExecuteFetchAsAllPrivsRequest { + bytes query = 1; + string db_name = 2; + uint64 max_rows = 3; + bool reload_schema = 4; +} + +message ExecuteFetchAsAllPrivsResponse { + query.QueryResult result = 1; +} + +message ExecuteFetchAsAppRequest { + bytes query = 1; + uint64 max_rows = 2; +} + +message ExecuteFetchAsAppResponse { + query.QueryResult result = 1; +} + +message ReplicationStatusRequest { +} + +message ReplicationStatusResponse { + replicationdata.Status status = 1; +} + +message MasterPositionRequest { +} + +message MasterPositionResponse { + string position = 1; +} + +message WaitForPositionRequest { + string position = 1; +} + +message WaitForPositionResponse { +} + +message StopReplicationRequest { +} + +message StopReplicationResponse { +} + +message StopReplicationMinimumRequest { + string position = 1; + int64 wait_timeout = 2; +} + +message StopReplicationMinimumResponse { + string position = 1; +} + +message StartReplicationRequest { +} + +message StartReplicationResponse { +} + +message StartReplicationUntilAfterRequest { + string position = 1; + int64 wait_timeout = 2; +} + +message StartReplicationUntilAfterResponse { +} + +message GetReplicasRequest { +} + +message GetReplicasResponse { + repeated string addrs = 1; +} + +message ResetReplicationRequest { +} + +message ResetReplicationResponse { +} + +message VReplicationExecRequest { + string query = 1; +} + +message VReplicationExecResponse { + query.QueryResult result = 1; +} + +message VReplicationWaitForPosRequest { + int64 id = 1; + string position = 2; +} + +message VReplicationWaitForPosResponse { +} + +message InitMasterRequest { +} + +message InitMasterResponse { + string position = 1; +} + +message PopulateReparentJournalRequest { + int64 time_created_ns = 1; + string action_name = 2; + topodata.TabletAlias master_alias = 3; + string replication_position = 4; +} + +message PopulateReparentJournalResponse { +} + +message InitReplicaRequest { + topodata.TabletAlias parent = 1; + string replication_position = 2; + int64 time_created_ns = 3; +} + +message InitReplicaResponse { +} + +message DemoteMasterRequest { +} + +message DemoteMasterResponse { + string position = 1; +} + +message UndoDemoteMasterRequest { +} + +message UndoDemoteMasterResponse { +} + +message ReplicaWasPromotedRequest { +} + +message ReplicaWasPromotedResponse { +} + +message SetMasterRequest { + topodata.TabletAlias parent = 1; + int64 time_created_ns = 2; + bool force_start_replication = 3; + string wait_position = 4; +} + +message SetMasterResponse { +} + +message ReplicaWasRestartedRequest { + // the parent alias the tablet should have + topodata.TabletAlias parent = 1; +} + +message ReplicaWasRestartedResponse { +} + +message StopReplicationAndGetStatusRequest { +} + +message StopReplicationAndGetStatusResponse { + replicationdata.Status status = 1; +} + +message PromoteReplicaRequest { +} + +message PromoteReplicaResponse { + string position = 1; +} + +// Backup / Restore related messages + +message BackupRequest { + int64 concurrency = 1; + bool allowMaster = 2; +} + +message BackupResponse { + logutil.Event event = 1; +} + +message RestoreFromBackupRequest { +} + +message RestoreFromBackupResponse { + logutil.Event event = 1; +} + +// Deprecated +message SlaveStatusRequest { +} + +// Deprecated +message SlaveStatusResponse { + replicationdata.Status status = 1; +} + +// Deprecated +message StopSlaveRequest { +} + +// Deprecated +message StopSlaveResponse { +} + +// Deprecated +message StopSlaveMinimumRequest { + string position = 1; + int64 wait_timeout = 2; +} + +// Deprecated +message StopSlaveMinimumResponse { + string position = 1; +} + +// Deprecated +message StartSlaveRequest { +} + +// Deprecated +message StartSlaveResponse { +} + +// Deprecated +message StartSlaveUntilAfterRequest { + string position = 1; + int64 wait_timeout = 2; +} + +// Deprecated +message StartSlaveUntilAfterResponse { +} + +// Deprecated +message GetSlavesRequest { +} + +// Deprecated +message GetSlavesResponse { + repeated string addrs = 1; +} + +// Deprecated +message InitSlaveRequest { + topodata.TabletAlias parent = 1; + string replication_position = 2; + int64 time_created_ns = 3; +} + +// Deprecated +message InitSlaveResponse { +} + +// Deprecated +message SlaveWasPromotedRequest { +} + +// Deprecated +message SlaveWasPromotedResponse { +} + +// Deprecated +message SlaveWasRestartedRequest { + // the parent alias the tablet should have + topodata.TabletAlias parent = 1; +} + +// Deprecated +message SlaveWasRestartedResponse { +} diff --git a/internal/stackql-parser-fork/proto/tabletmanagerservice.proto b/internal/stackql-parser-fork/proto/tabletmanagerservice.proto new file mode 100644 index 00000000..e150ac3f --- /dev/null +++ b/internal/stackql-parser-fork/proto/tabletmanagerservice.proto @@ -0,0 +1,189 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains the service definition for making management API +// calls to VtTablet. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/tabletmanagerservice"; + +package tabletmanagerservice; + +import "tabletmanagerdata.proto"; + +// TabletManager is a service definition for tabletmanagerdata.TabletManager. +service TabletManager { + // + // Various read-only methods + // + + // Ping returns the input payload + rpc Ping(tabletmanagerdata.PingRequest) returns (tabletmanagerdata.PingResponse) {}; + + // Sleep sleeps for the provided duration + rpc Sleep(tabletmanagerdata.SleepRequest) returns (tabletmanagerdata.SleepResponse) {}; + + // ExecuteHook executes the hook remotely + rpc ExecuteHook(tabletmanagerdata.ExecuteHookRequest) returns (tabletmanagerdata.ExecuteHookResponse) {}; + + // GetSchema asks the tablet for its schema + rpc GetSchema(tabletmanagerdata.GetSchemaRequest) returns (tabletmanagerdata.GetSchemaResponse) {}; + + // GetPermissions asks the tablet for its permissions + rpc GetPermissions(tabletmanagerdata.GetPermissionsRequest) returns (tabletmanagerdata.GetPermissionsResponse) {}; + + // + // Various read-write methods + // + + rpc SetReadOnly(tabletmanagerdata.SetReadOnlyRequest) returns (tabletmanagerdata.SetReadOnlyResponse) {}; + + rpc SetReadWrite(tabletmanagerdata.SetReadWriteRequest) returns (tabletmanagerdata.SetReadWriteResponse) {}; + + // ChangeType asks the remote tablet to change its type + rpc ChangeType(tabletmanagerdata.ChangeTypeRequest) returns (tabletmanagerdata.ChangeTypeResponse) {}; + + rpc RefreshState(tabletmanagerdata.RefreshStateRequest) returns (tabletmanagerdata.RefreshStateResponse) {}; + + rpc RunHealthCheck(tabletmanagerdata.RunHealthCheckRequest) returns (tabletmanagerdata.RunHealthCheckResponse) {}; + + rpc IgnoreHealthError(tabletmanagerdata.IgnoreHealthErrorRequest) returns (tabletmanagerdata.IgnoreHealthErrorResponse) {}; + + rpc ReloadSchema(tabletmanagerdata.ReloadSchemaRequest) returns (tabletmanagerdata.ReloadSchemaResponse) {}; + + rpc PreflightSchema(tabletmanagerdata.PreflightSchemaRequest) returns (tabletmanagerdata.PreflightSchemaResponse) {}; + + rpc ApplySchema(tabletmanagerdata.ApplySchemaRequest) returns (tabletmanagerdata.ApplySchemaResponse) {}; + + rpc LockTables(tabletmanagerdata.LockTablesRequest) returns (tabletmanagerdata.LockTablesResponse) {}; + + rpc UnlockTables(tabletmanagerdata.UnlockTablesRequest) returns (tabletmanagerdata.UnlockTablesResponse) {}; + + rpc ExecuteFetchAsDba(tabletmanagerdata.ExecuteFetchAsDbaRequest) returns (tabletmanagerdata.ExecuteFetchAsDbaResponse) {}; + + rpc ExecuteFetchAsAllPrivs(tabletmanagerdata.ExecuteFetchAsAllPrivsRequest) returns (tabletmanagerdata.ExecuteFetchAsAllPrivsResponse) {}; + + rpc ExecuteFetchAsApp(tabletmanagerdata.ExecuteFetchAsAppRequest) returns (tabletmanagerdata.ExecuteFetchAsAppResponse) {}; + + // + // Replication related methods + // + + // ReplicationStatus returns the current replication status. + rpc ReplicationStatus(tabletmanagerdata.ReplicationStatusRequest) returns (tabletmanagerdata.ReplicationStatusResponse) {}; + + // MasterPosition returns the current master position + rpc MasterPosition(tabletmanagerdata.MasterPositionRequest) returns (tabletmanagerdata.MasterPositionResponse) {}; + + // WaitForPosition waits for the position to be reached + rpc WaitForPosition(tabletmanagerdata.WaitForPositionRequest) returns (tabletmanagerdata.WaitForPositionResponse) {}; + + // StopReplication makes mysql stop its replication + rpc StopReplication(tabletmanagerdata.StopReplicationRequest) returns (tabletmanagerdata.StopReplicationResponse) {}; + + // StopReplicationMinimum stops the mysql replication after it reaches + // the provided minimum point + rpc StopReplicationMinimum(tabletmanagerdata.StopReplicationMinimumRequest) returns (tabletmanagerdata.StopReplicationMinimumResponse) {}; + + // StartReplication starts the mysql replication + rpc StartReplication(tabletmanagerdata.StartReplicationRequest) returns (tabletmanagerdata.StartReplicationResponse) {}; + + // StartReplicationUnitAfter starts the mysql replication until and including + // the provided position + rpc StartReplicationUntilAfter(tabletmanagerdata.StartReplicationUntilAfterRequest) returns (tabletmanagerdata.StartReplicationUntilAfterResponse) {}; + + // GetReplicas asks for the list of mysql replicas + rpc GetReplicas(tabletmanagerdata.GetReplicasRequest) returns (tabletmanagerdata.GetReplicasResponse) {}; + + // VReplication API + rpc VReplicationExec(tabletmanagerdata.VReplicationExecRequest) returns(tabletmanagerdata.VReplicationExecResponse) {}; + rpc VReplicationWaitForPos(tabletmanagerdata.VReplicationWaitForPosRequest) returns(tabletmanagerdata.VReplicationWaitForPosResponse) {}; + + // + // Reparenting related functions + // + + // ResetReplication makes the target not replicating + rpc ResetReplication(tabletmanagerdata.ResetReplicationRequest) returns (tabletmanagerdata.ResetReplicationResponse) {}; + + // InitMaster initializes the tablet as a master + rpc InitMaster(tabletmanagerdata.InitMasterRequest) returns (tabletmanagerdata.InitMasterResponse) {}; + + // PopulateReparentJournal tells the tablet to add an entry to its + // reparent journal + rpc PopulateReparentJournal(tabletmanagerdata.PopulateReparentJournalRequest) returns (tabletmanagerdata.PopulateReparentJournalResponse) {}; + + // InitReplica tells the tablet to reparent to the master unconditionally + rpc InitReplica(tabletmanagerdata.InitReplicaRequest) returns (tabletmanagerdata.InitReplicaResponse) {}; + + // DemoteMaster tells the soon-to-be-former master it's gonna change + rpc DemoteMaster(tabletmanagerdata.DemoteMasterRequest) returns (tabletmanagerdata.DemoteMasterResponse) {}; + + // UndoDemoteMaster reverts all changes made by DemoteMaster + rpc UndoDemoteMaster(tabletmanagerdata.UndoDemoteMasterRequest) returns (tabletmanagerdata.UndoDemoteMasterResponse) {}; + + // ReplicaWasPromoted tells the remote tablet it is now the master + rpc ReplicaWasPromoted(tabletmanagerdata.ReplicaWasPromotedRequest) returns (tabletmanagerdata.ReplicaWasPromotedResponse) {}; + + // SetMaster tells the replica to reparent + rpc SetMaster(tabletmanagerdata.SetMasterRequest) returns (tabletmanagerdata.SetMasterResponse) {}; + + // ReplicaWasRestarted tells the remote tablet its master has changed + rpc ReplicaWasRestarted(tabletmanagerdata.ReplicaWasRestartedRequest) returns (tabletmanagerdata.ReplicaWasRestartedResponse) {}; + + // StopReplicationAndGetStatus stops MySQL replication, and returns the + // replication status + rpc StopReplicationAndGetStatus(tabletmanagerdata.StopReplicationAndGetStatusRequest) returns (tabletmanagerdata.StopReplicationAndGetStatusResponse) {}; + + // PromoteReplica makes the replica the new master + rpc PromoteReplica(tabletmanagerdata.PromoteReplicaRequest) returns (tabletmanagerdata.PromoteReplicaResponse) {}; + + // + // Backup related methods + // + + rpc Backup(tabletmanagerdata.BackupRequest) returns (stream tabletmanagerdata.BackupResponse) {}; + + // RestoreFromBackup deletes all local data and restores it from the latest backup. + rpc RestoreFromBackup(tabletmanagerdata.RestoreFromBackupRequest) returns (stream tabletmanagerdata.RestoreFromBackupResponse) {}; + + // Deprecated - remove after 7.0 + rpc SlaveStatus(tabletmanagerdata.SlaveStatusRequest) returns (tabletmanagerdata.SlaveStatusResponse) {}; + + // Deprecated + rpc StopSlave(tabletmanagerdata.StopSlaveRequest) returns (tabletmanagerdata.StopSlaveResponse) {}; + + // Deprecated + rpc StopSlaveMinimum(tabletmanagerdata.StopSlaveMinimumRequest) returns (tabletmanagerdata.StopSlaveMinimumResponse) {}; + + // Deprecated + rpc StartSlave(tabletmanagerdata.StartSlaveRequest) returns (tabletmanagerdata.StartSlaveResponse) {}; + + // Deprecated + rpc StartSlaveUntilAfter(tabletmanagerdata.StartSlaveUntilAfterRequest) returns (tabletmanagerdata.StartSlaveUntilAfterResponse) {}; + + // Deprecated + rpc GetSlaves(tabletmanagerdata.GetSlavesRequest) returns (tabletmanagerdata.GetSlavesResponse) {}; + + // Deprecated + rpc InitSlave(tabletmanagerdata.InitSlaveRequest) returns (tabletmanagerdata.InitSlaveResponse) {}; + + // Deprecated + rpc SlaveWasPromoted(tabletmanagerdata.SlaveWasPromotedRequest) returns (tabletmanagerdata.SlaveWasPromotedResponse) {}; + + // Deprecated + rpc SlaveWasRestarted(tabletmanagerdata.SlaveWasRestartedRequest) returns (tabletmanagerdata.SlaveWasRestartedResponse) {}; +} diff --git a/internal/stackql-parser-fork/proto/throttlerdata.proto b/internal/stackql-parser-fork/proto/throttlerdata.proto new file mode 100644 index 00000000..199656a1 --- /dev/null +++ b/internal/stackql-parser-fork/proto/throttlerdata.proto @@ -0,0 +1,185 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Data structures for the throttler RPC interface. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/throttlerdata"; + +package throttlerdata; + +// MaxRatesRequest is the payload for the MaxRates RPC. +message MaxRatesRequest { +} + +// MaxRatesResponse is returned by the MaxRates RPC. +message MaxRatesResponse { + // max_rates returns the max rate for each throttler. It's keyed by the + // throttler name. + map rates = 1; +} + +// SetMaxRateRequest is the payload for the SetMaxRate RPC. +message SetMaxRateRequest { + int64 rate = 1; +} + +// SetMaxRateResponse is returned by the SetMaxRate RPC. +message SetMaxRateResponse { + // names is the list of throttler names which were updated. + repeated string names = 1; +} + +// Configuration holds the configuration parameters for the +// MaxReplicationLagModule which adaptively adjusts the throttling rate based on +// the observed replication lag across all replicas. +message Configuration { + // target_replication_lag_sec is the replication lag (in seconds) the + // MaxReplicationLagModule tries to aim for. + // If it is within the target, it tries to increase the throttler + // rate, otherwise it will lower it based on an educated guess of the + // replica's throughput. + int64 target_replication_lag_sec = 1; + + // max_replication_lag_sec is meant as a last resort. + // By default, the module tries to find out the system maximum capacity while + // trying to keep the replication lag around "target_replication_lag_sec". + // Usually, we'll wait min_duration_between_(increases|decreases)_sec to see + // the effect of a throttler rate change on the replication lag. + // But if the lag goes above this field's value we will go into an "emergency" + // state and throttle more aggressively (see "emergency_decrease" below). + // This is the only way to ensure that the system will recover. + int64 max_replication_lag_sec = 2; + + // initial_rate is the rate at which the module will start. + int64 initial_rate = 3; + + // max_increase defines by how much we will increase the rate + // e.g. 0.05 increases the rate by 5% while 1.0 by 100%. + // Note that any increase will let the system wait for at least + // (1 / MaxIncrease) seconds. If we wait for shorter periods of time, we + // won't notice if the rate increase also increases the replication lag. + // (If the system was already at its maximum capacity (e.g. 1k QPS) and we + // increase the rate by e.g. 5% to 1050 QPS, it will take 20 seconds until + // 1000 extra queries are buffered and the lag increases by 1 second.) + double max_increase = 4; + + // emergency_decrease defines by how much we will decrease the current rate + // if the observed replication lag is above "max_replication_lag_sec". + // E.g. 0.50 decreases the current rate by 50%. + double emergency_decrease = 5; + + // min_duration_between_increases_sec specifies how long we'll wait at least + // for the last rate increase to have an effect on the system. + int64 min_duration_between_increases_sec = 6; + + // max_duration_between_increases_sec specifies how long we'll wait at most + // for the last rate increase to have an effect on the system. + int64 max_duration_between_increases_sec = 7; + + // min_duration_between_decreases_sec specifies how long we'll wait at least + // for the last rate decrease to have an effect on the system. + int64 min_duration_between_decreases_sec = 8; + + // spread_backlog_across_sec is used when we set the throttler rate after + // we guessed the rate of a replica and determined its backlog. + // For example, at a guessed rate of 100 QPS and a lag of 10s, the replica has + // a backlog of 1000 queries. + // When we set the new, decreased throttler rate, we factor in how long it + // will take the replica to go through the backlog (in addition to new + // requests). This field specifies over which timespan we plan to spread this. + // For example, for a backlog of 1000 queries spread over 5s means that we + // have to further reduce the rate by 200 QPS or the backlog will not be + // processed within the 5 seconds. + int64 spread_backlog_across_sec = 9; + + // ignore_n_slowest_replicas will ignore replication lag updates from the + // N slowest REPLICA tablets. Under certain circumstances, replicas are still + // considered e.g. a) if the lag is at most max_replication_lag_sec, b) there + // are less than N+1 replicas or c) the lag increased on each replica such + // that all replicas were ignored in a row. + int32 ignore_n_slowest_replicas = 10; + + // ignore_n_slowest_rdonlys does the same thing as ignore_n_slowest_replicas + // but for RDONLY tablets. Note that these two settings are independent. + int32 ignore_n_slowest_rdonlys = 11; + + // age_bad_rate_after_sec is the duration after which an unchanged bad rate + // will "age out" and increase by "bad_rate_increase". + // Bad rates are tracked by the code in memory.go and serve as an upper bound + // for future rate changes. This ensures that the adaptive throttler does not + // try known too high (bad) rates over and over again. + // To avoid that temporary degradations permanently reduce the maximum rate, + // a stable bad rate "ages out" after "age_bad_rate_after_sec". + int64 age_bad_rate_after_sec = 12; + + // bad_rate_increase defines the percentage by which a bad rate will be + // increased when it's aging out. + double bad_rate_increase = 13; + + // max_rate_approach_threshold is the fraction of the current rate limit that the actual + // rate must exceed for the throttler to increase the limit when the replication lag + // is below target_replication_lag_sec. For example, assuming the actual replication lag + // is below target_replication_lag_sec, if the current rate limit is 100, then the actual + // rate must exceed 100*max_rate_approach_threshold for the throttler to increase the current + // limit. + double max_rate_approach_threshold = 14; +} + +// GetConfigurationRequest is the payload for the GetConfiguration RPC. +message GetConfigurationRequest { + // throttler_name specifies which throttler to select. If empty, all active + // throttlers will be selected. + string throttler_name = 1; +} + +// GetConfigurationResponse is returned by the GetConfiguration RPC. +message GetConfigurationResponse { + // max_rates returns the configurations for each throttler. + // It's keyed by the throttler name. + map configurations = 1; +} + +// UpdateConfigurationRequest is the payload for the UpdateConfiguration RPC. +message UpdateConfigurationRequest { + // throttler_name specifies which throttler to update. If empty, all active + // throttlers will be updated. + string throttler_name = 1; + // configuration is the new (partial) configuration. + Configuration configuration = 2; + // copy_zero_values specifies whether fields with zero values should be copied + // as well. + bool copy_zero_values = 3; +} + +// UpdateConfigurationResponse is returned by the UpdateConfiguration RPC. +message UpdateConfigurationResponse { + // names is the list of throttler names which were updated. + repeated string names = 1; +} + +// ResetConfigurationRequest is the payload for the ResetConfiguration RPC. +message ResetConfigurationRequest { + // throttler_name specifies which throttler to reset. If empty, all active + // throttlers will be reset. + string throttler_name = 1; +} + +// ResetConfigurationResponse is returned by the ResetConfiguration RPC. +message ResetConfigurationResponse { + // names is the list of throttler names which were updated. + repeated string names = 1; +} diff --git a/internal/stackql-parser-fork/proto/throttlerservice.proto b/internal/stackql-parser-fork/proto/throttlerservice.proto new file mode 100644 index 00000000..7c717990 --- /dev/null +++ b/internal/stackql-parser-fork/proto/throttlerservice.proto @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// gRPC RPC interface for the internal resharding throttler (go/vt/throttler) +// which is used by the resharding clone process (vtworker) and filtered +// replication (vttablet). + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/throttlerservice"; + +package throttlerservice; + +import "throttlerdata.proto"; + +// Throttler defines the throttler RPC calls. +service Throttler { + // MaxRates returns the current max rate for each throttler of the process. + rpc MaxRates (throttlerdata.MaxRatesRequest) returns (throttlerdata.MaxRatesResponse) {}; + + // SetMaxRate allows to change the current max rate for all throttlers + // of the process. + rpc SetMaxRate (throttlerdata.SetMaxRateRequest) returns (throttlerdata.SetMaxRateResponse) {}; + + // GetConfiguration returns the configuration of the MaxReplicationlag module + // for the given throttler or all throttlers if "throttler_name" is empty. + rpc GetConfiguration (throttlerdata.GetConfigurationRequest) returns (throttlerdata.GetConfigurationResponse) {}; + + // UpdateConfiguration (partially) updates the configuration of the + // MaxReplicationlag module for the given throttler or all throttlers if + // "throttler_name" is empty. + // If "copy_zero_values" is true, fields with zero values will be copied + // as well. + rpc UpdateConfiguration (throttlerdata.UpdateConfigurationRequest) returns (throttlerdata.UpdateConfigurationResponse) {}; + + // ResetConfiguration resets the configuration of the MaxReplicationlag module + // to the initial configuration for the given throttler or all throttlers if + // "throttler_name" is empty. + rpc ResetConfiguration (throttlerdata.ResetConfigurationRequest) returns (throttlerdata.ResetConfigurationResponse) {}; +} diff --git a/internal/stackql-parser-fork/proto/topodata.proto b/internal/stackql-parser-fork/proto/topodata.proto new file mode 100644 index 00000000..f56d8997 --- /dev/null +++ b/internal/stackql-parser-fork/proto/topodata.proto @@ -0,0 +1,408 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains the Vitess topology related data structures. +// Very few of these structures are exchanged over the wire (only +// TabletType and KeyRange), but they are all used by the topology +// service. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/topodata"; + +option java_package="io.vitess.proto"; + +package topodata; + +import "vttime.proto"; + +// KeyRange describes a range of sharding keys, when range-based +// sharding is used. +message KeyRange { + bytes start = 1; + bytes end = 2; +} + +// KeyspaceType describes the type of the keyspace +enum KeyspaceType { + // NORMAL is the default value + NORMAL = 0; + + // SNAPSHOT is when we are creating a snapshot keyspace + SNAPSHOT = 1; +} + +// KeyspaceIdType describes the type of the sharding key for a +// range-based sharded keyspace. +enum KeyspaceIdType { + // UNSET is the default value, when range-based sharding is not used. + UNSET = 0; + + // UINT64 is when uint64 value is used. + // This is represented as 'unsigned bigint' in mysql + UINT64 = 1; + + // BYTES is when an array of bytes is used. + // This is represented as 'varbinary' in mysql + BYTES = 2; +} + +// TabletAlias is a globally unique tablet identifier. +message TabletAlias { + // cell is the cell (or datacenter) the tablet is in + string cell = 1; + + // uid is a unique id for this tablet within the shard + // (this is the MySQL server id as well). + uint32 uid = 2; +} + +// TabletType represents the type of a given tablet. +enum TabletType { + option allow_alias = true; // so we can have RDONLY and BATCH co-exist + + // UNKNOWN is not a valid value. + UNKNOWN = 0; + + // MASTER is the master server for the shard. Only MASTER allows DMLs. + MASTER = 1; + + // REPLICA replicates from master. It is used to serve live traffic. + // A REPLICA can be promoted to MASTER. A demoted MASTER will go to REPLICA. + REPLICA = 2; + + // RDONLY (old name) / BATCH (new name) is used to serve traffic for + // long-running jobs. It is a separate type from REPLICA so + // long-running queries don't affect web-like traffic. + RDONLY = 3; + BATCH = 3; + + // SPARE is a type of servers that cannot serve queries, but is available + // in case an extra server is needed. + SPARE = 4; + + // EXPERIMENTAL is like SPARE, except it can serve queries. This + // type can be used for usages not planned by Vitess, like online + // export to another storage engine. + EXPERIMENTAL = 5; + + // BACKUP is the type a server goes to when taking a backup. No queries + // can be served in BACKUP mode. + BACKUP = 6; + + // RESTORE is the type a server uses when restoring a backup, at + // startup time. No queries can be served in RESTORE mode. + RESTORE = 7; + + // DRAINED is the type a server goes into when used by Vitess tools + // to perform an offline action. It is a serving type (as + // the tools processes may need to run queries), but it's not used + // to route queries from Vitess users. In this state, + // this tablet is dedicated to the process that uses it. + DRAINED = 8; +} + +// Tablet represents information about a running instance of vttablet. +message Tablet { + // alias is the unique name of the tablet. + TabletAlias alias = 1; + + // Fully qualified domain name of the host. + string hostname = 2; + + // Map of named ports. Normally this should include vt and grpc. + // Going forward, the mysql port will be stored in mysql_port + // instead of here. + // For accessing mysql port, use topoproto.MysqlPort to fetch, and + // topoproto.SetMysqlPort to set. These wrappers will ensure + // legacy behavior is supported. + map port_map = 4; + + // Keyspace name. + string keyspace = 5; + + // Shard name. If range based sharding is used, it should match + // key_range. + string shard = 6; + + // If range based sharding is used, range for the tablet's shard. + KeyRange key_range = 7; + + // type is the current type of the tablet. + TabletType type = 8; + + // It this is set, it is used as the database name instead of the + // normal "vt_" + keyspace. + string db_name_override = 9; + + // tablet tags + map tags = 10; + + // MySQL hostname. + string mysql_hostname = 12; + + // MySQL port. Use topoproto.MysqlPort and topoproto.SetMysqlPort + // to access this variable. The functions provide support + // for legacy behavior. + int32 mysql_port = 13; + + // master_term_start_time is the time (in UTC) at which the current term of + // the current tablet began as master. If this tablet is not currently the + // master, this value is ignored. + // + // A new master term begins any time an authoritative decision is communicated + // about which tablet should be the master, such as via Vitess + // replication-management commands like PlannedReparentShard, + // EmergencyReparentShard, and TabletExternallyReparented. + // + vttime.Time master_term_start_time = 14; + + // OBSOLETE: ip and tablet health information + // string ip = 3; + // map health_map = 11; + reserved 3, 11; +} + +// A Shard contains data about a subset of the data whithin a keyspace. +message Shard { + // master_alias is the tablet alias of the master for the shard. + // If it is unset, then there is no master in this shard yet. + // No lock is necessary to update this field, when for instance + // TabletExternallyReparented updates this. However, we lock the + // shard for reparenting operations (InitShardMaster, + // PlannedReparentShard,EmergencyReparentShard), to guarantee + // exclusive operation. + TabletAlias master_alias = 1; + + // master_term_start_time is the time (in UTC) at which the current term of + // the master specified in master_alias began. + // + // A new master term begins any time an authoritative decision is communicated + // about which tablet should be the master, such as via Vitess + // replication-management commands like PlannedReparentShard, + // EmergencyReparentShard, and TabletExternallyReparented. + // + // The master_alias should only ever be changed if the new master's term began + // at a later time than this. Note that a new term can start for the tablet + // that is already the master. In that case, the master_term_start_time would + // be increased without changing the master_alias. + vttime.Time master_term_start_time = 8; + + // key_range is the KeyRange for this shard. It can be unset if: + // - we are not using range-based sharding in this shard. + // - the shard covers the entire keyrange. + // This must match the shard name based on our other conventions, but + // helpful to have it decomposed here. + // Once set at creation time, it is never changed. + KeyRange key_range = 2; + + // ServedType is an entry in the served_types + message ServedType { + TabletType tablet_type = 1; + repeated string cells = 2; + } + + // served_types has at most one entry per TabletType + // This field is in the process of being deprecated in favor of + // is_master_serving. Keeping for backwards compatibility purposes. + repeated ServedType served_types = 3; + + // SourceShard represents a data source for filtered replication + // across shards. When this is used in a destination shard, the master + // of that shard will run filtered replication. + message SourceShard { + // Uid is the unique ID for this SourceShard object. + uint32 uid = 1; + + // the source keyspace + string keyspace = 2; + + // the source shard + string shard = 3; + + // the source shard keyrange + KeyRange key_range = 4; + + // the source table list to replicate + repeated string tables = 5; + } + + // SourceShards is the list of shards we're replicating from, + // using filtered replication. + // The keyspace lock is always taken when changing this. + repeated SourceShard source_shards = 4; + + // TabletControl controls tablet's behavior + message TabletControl { + // which tablet type is affected + TabletType tablet_type = 1; + repeated string cells = 2; + + // OBSOLETE: disable_query_service 3 + reserved 3; + + repeated string blacklisted_tables = 4; + + // frozen is set if we've started failing over traffic for + // the master. If set, this record should not be removed. + bool frozen = 5; + } + + // tablet_controls has at most one entry per TabletType. + // The keyspace lock is always taken when changing this. + repeated TabletControl tablet_controls = 6; + + // is_master_serving sets whether this shard master is serving traffic or not. + // The keyspace lock is always taken when changing this. + bool is_master_serving = 7; + + // OBSOLETE cells (5) + reserved 5; +} + +// A Keyspace contains data about a keyspace. +message Keyspace { + // name of the column used for sharding + // empty if the keyspace is not sharded + string sharding_column_name = 1; + + // type of the column used for sharding + // UNSET if the keyspace is not sharded + KeyspaceIdType sharding_column_type = 2; + + // OBSOLETE int32 split_shard_count = 3; + reserved 3; + + // ServedFrom indicates a relationship between a TabletType and the + // keyspace name that's serving it. + message ServedFrom { + // the tablet type (key for the map) + TabletType tablet_type = 1; + + // the cells to limit this to + repeated string cells = 2; + + // the keyspace name that's serving it + string keyspace = 3; + } + + // ServedFrom will redirect the appropriate traffic to + // another keyspace. + repeated ServedFrom served_froms = 4; + + // keyspace_type will determine how this keyspace is treated by + // vtgate / vschema. Normal keyspaces are routable by + // any query. Snapshot keyspaces are only accessible + // by explicit addresssing or by calling "use keyspace" first + KeyspaceType keyspace_type = 5; + + // base_keyspace is the base keyspace from which a snapshot + // keyspace is created. empty for normal keyspaces + string base_keyspace = 6; + + // snapshot_time (in UTC) is a property of snapshot + // keyspaces which tells us what point in time + // the snapshot is of + vttime.Time snapshot_time = 7; +} + +// ShardReplication describes the MySQL replication relationships +// whithin a cell. +message ShardReplication { + + // Node describes a tablet instance within the cell + message Node { + TabletAlias tablet_alias = 1; + } + + // Note there can be only one Node in this array + // for a given tablet. + repeated Node nodes = 1; +} + +// ShardReference is used as a pointer from a SrvKeyspace to a Shard +message ShardReference { + // Copied from Shard. + string name = 1; + KeyRange key_range = 2; + // Disable query serving in this shard +} + +// ShardTabletControl is used as a pointer from a SrvKeyspace to a Shard +message ShardTabletControl { + // Copied from Shard. + string name = 1; + KeyRange key_range = 2; + // Disable query serving in this shard + bool query_service_disabled = 3; +} + +// SrvKeyspace is a rollup node for the keyspace itself. +message SrvKeyspace { + message KeyspacePartition { + // The type this partition applies to. + TabletType served_type = 1; + + // List of non-overlapping continuous shards sorted by range. + repeated ShardReference shard_references = 2; + + // List of shard tablet controls + repeated ShardTabletControl shard_tablet_controls = 3; + } + + // The partitions this keyspace is serving, per tablet type. + repeated KeyspacePartition partitions = 1; + + // ServedFrom indicates a relationship between a TabletType and the + // keyspace name that's serving it. + message ServedFrom { + // the tablet type + TabletType tablet_type = 1; + + // the keyspace name that's serving it + string keyspace = 2; + } + + // copied from Keyspace + string sharding_column_name = 2; + KeyspaceIdType sharding_column_type = 3; + repeated ServedFrom served_from = 4; + // OBSOLETE int32 split_shard_count = 5; + reserved 5; +} + +// CellInfo contains information about a cell. CellInfo objects are +// stored in the global topology server, and describe how to reach +// local topology servers. +message CellInfo { + // ServerAddress contains the address of the server for the cell. + // The syntax of this field is topology implementation specific. + // For instance, for Zookeeper, it is a comma-separated list of + // server addresses. + string server_address = 1; + + // Root is the path to store data in. It is only used when talking + // to server_address. + string root = 2; + + // OBSOLETE: region 3 + reserved 3; +} + +// CellsAlias +message CellsAlias { + // Cells that map to this alias + repeated string cells = 2; +} diff --git a/internal/stackql-parser-fork/proto/vschema.proto b/internal/stackql-parser-fork/proto/vschema.proto new file mode 100644 index 00000000..3fc3c2fe --- /dev/null +++ b/internal/stackql-parser-fork/proto/vschema.proto @@ -0,0 +1,118 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains the types needed to define a vschema. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/vschema"; + +package vschema; + +import "query.proto"; + +// RoutingRules specify the high level routing rules for the VSchema. +message RoutingRules { + // rules should ideally be a map. However protos dont't allow + // repeated fields as elements of a map. So, we use a list + // instead. + repeated RoutingRule rules = 1; +} + +// RoutingRule specifies a routing rule. +message RoutingRule { + string from_table = 1; + repeated string to_tables = 2; +} + +// Keyspace is the vschema for a keyspace. +message Keyspace { + // If sharded is false, vindexes and tables are ignored. + bool sharded = 1; + map vindexes = 2; + map tables = 3; + // If require_explicit_routing is true, vindexes and tables are not added to global routing + bool require_explicit_routing = 4; +} + +// Vindex is the vindex info for a Keyspace. +message Vindex { + // The type must match one of the predefined + // (or plugged in) vindex names. + string type = 1; + // params is a map of attribute value pairs + // that must be defined as required by the + // vindex constructors. The values can only + // be strings. + map params = 2; + // A lookup vindex can have an owner table defined. + // If so, rows in the lookup table are created or + // deleted in sync with corresponding rows in the + // owner table. + string owner = 3; +} + +// Table is the table info for a Keyspace. +message Table { + // If the table is a sequence, type must be + // "sequence". Otherwise, it should be empty. + string type = 1; + // column_vindexes associates columns to vindexes. + repeated ColumnVindex column_vindexes = 2; + // auto_increment is specified if a column needs + // to be associated with a sequence. + AutoIncrement auto_increment = 3; + // columns lists the columns for the table. + repeated Column columns = 4; + // pinned pins an unsharded table to a specific + // shard, as dictated by the keyspace id. + // The keyspace id is represented in hex form + // like in keyranges. + string pinned = 5; + // column_list_authoritative is set to true if columns is + // an authoritative list for the table. This allows + // us to expand 'select *' expressions. + bool column_list_authoritative = 6; +} + +// ColumnVindex is used to associate a column to a vindex. +message ColumnVindex { + // Legacy implementation, moving forward all vindexes should define a list of columns. + string column = 1; + // The name must match a vindex defined in Keyspace. + string name = 2; + // List of columns that define this Vindex + repeated string columns = 3; +} + +// Autoincrement is used to designate a column as auto-inc. +message AutoIncrement { + string column = 1; + // The sequence must match a table of type SEQUENCE. + string sequence = 2; +} + +// Column describes a column. +message Column { + string name = 1; + query.Type type = 2; +} + +// SrvVSchema is the roll-up of all the Keyspace schema for a cell. +message SrvVSchema { + // keyspaces is a map of keyspace name -> Keyspace object. + map keyspaces = 1; + RoutingRules routing_rules = 2; +} diff --git a/internal/stackql-parser-fork/proto/vtctldata.proto b/internal/stackql-parser-fork/proto/vtctldata.proto new file mode 100644 index 00000000..0e8fa1a2 --- /dev/null +++ b/internal/stackql-parser-fork/proto/vtctldata.proto @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package contains the data structures for a service allowing +// you to use vtctld as a server for vt commands. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/vtctldata"; + +package vtctldata; + +import "logutil.proto"; + +// ExecuteVtctlCommandRequest is the payload for ExecuteVtctlCommand. +// timeouts are in nanoseconds. +message ExecuteVtctlCommandRequest { + repeated string args = 1; + int64 action_timeout = 2; +} + +// ExecuteVtctlCommandResponse is streamed back by ExecuteVtctlCommand. +message ExecuteVtctlCommandResponse { + logutil.Event event = 1; +} + +// TableMaterializeSttings contains the settings for one table. +message TableMaterializeSettings { + string target_table = 1; + // source_expression is a select statement. + string source_expression = 2; + // create_ddl contains the DDL to create the target table. + // If empty, the target table must already exist. + // if "copy", the target table DDL is the same as the source table. + string create_ddl = 3; +} + +// MaterializeSettings contains the settings for the Materialize command. +message MaterializeSettings { + // workflow is the name of the workflow. + string workflow = 1; + string source_keyspace = 2; + string target_keyspace = 3; + // stop_after_copy specifies if vreplication should be stopped after copying. + bool stop_after_copy = 4; + repeated TableMaterializeSettings table_settings = 5; + // optional parameters. + string cell = 6; + string tablet_types = 7; +} diff --git a/internal/stackql-parser-fork/proto/vtctlservice.proto b/internal/stackql-parser-fork/proto/vtctlservice.proto new file mode 100644 index 00000000..07cd70b3 --- /dev/null +++ b/internal/stackql-parser-fork/proto/vtctlservice.proto @@ -0,0 +1,30 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package contains a service allowing you to use vtctld as a +// proxy for vt commands. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/vtctlservice"; + +package vtctlservice; + +import "vtctldata.proto"; + +// Service Vtctl allows you to call vt commands through gRPC. +service Vtctl { + rpc ExecuteVtctlCommand (vtctldata.ExecuteVtctlCommandRequest) returns (stream vtctldata.ExecuteVtctlCommandResponse) {}; +} diff --git a/internal/stackql-parser-fork/proto/vtgate.proto b/internal/stackql-parser-fork/proto/vtgate.proto new file mode 100644 index 00000000..21ba0651 --- /dev/null +++ b/internal/stackql-parser-fork/proto/vtgate.proto @@ -0,0 +1,256 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Data definitions for service vtgateservice. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/vtgate"; + +package vtgate; + +option java_package="io.vitess.proto"; + +import "binlogdata.proto"; +import "query.proto"; +import "topodata.proto"; +import "vtrpc.proto"; + +// TransactionMode controls the execution of distributed transaction +// across multiple shards. +enum TransactionMode { + // UNSPECIFIED uses the transaction mode set by the VTGate flag 'transaction_mode'. + UNSPECIFIED = 0; + // SINGLE disallows distributed transactions. + SINGLE = 1; + // MULTI allows distributed transactions with best effort commit. + MULTI = 2; + // TWOPC is for distributed transactions with atomic commits. + TWOPC = 3; +} + + +// CommitOrder is used to designate which of the ShardSessions +// get used for transactions. +enum CommitOrder { + // NORMAL is the default commit order. + NORMAL = 0; + // PRE is used to designate pre_sessions. + PRE = 1; + // POST is used to designate post_sessions. + POST = 2; + // AUTOCOMMIT is used to run the statement as autocommitted transaction. + AUTOCOMMIT = 3; +} + +// Session objects are exchanged like cookies through various +// calls to VTGate. The behavior differs between V2 & V3 APIs. +// V3 APIs are Execute, ExecuteBatch and StreamExecute. All +// other APIs are V2. For the V3 APIs, the session +// must be sent with every call to Execute or ExecuteBatch. +// For the V2 APIs, Begin does not accept a session. It instead +// returns a brand new one with in_transaction set to true. +// After a call to Commit or Rollback, the session can be +// discarded. If you're not in a transaction, Session is +// an optional parameter for the V2 APIs. +message Session { + // in_transaction is set to true if the session is in a transaction. + bool in_transaction = 1; + + message ShardSession { + query.Target target = 1; + int64 transaction_id = 2; + topodata.TabletAlias tablet_alias = 3; + // reserved connection if a dedicated connection is needed + int64 reserved_id = 4; + } + // shard_sessions keep track of per-shard transaction info. + repeated ShardSession shard_sessions = 2; + + // single_db is deprecated. Use transaction_mode instead. + reserved 3; + + // autocommit specifies if the session is in autocommit mode. + // This is used only for V3. + bool autocommit = 4; + + // target_string is the target expressed as a string. Valid + // names are: keyspace:shard@target, keyspace@target or @target. + // This is used only for V3. + string target_string = 5; + + // options is used only for V3. + query.ExecuteOptions options = 6; + + // transaction_mode specifies the current transaction mode. + TransactionMode transaction_mode = 7; + + // warnings contains non-fatal warnings from the previous query + repeated query.QueryWarning warnings = 8; + + // pre_sessions contains sessions that have to be committed first. + repeated ShardSession pre_sessions = 9; + + // post_sessions contains sessions that have to be committed last. + repeated ShardSession post_sessions = 10; + + // last_insert_id keeps track of the last seen insert_id for this session + uint64 last_insert_id = 11; + + // found_rows keeps track of how many rows the last query returned + uint64 found_rows = 12; + + // user_defined_variables contains all the @variables defined for this session + map user_defined_variables = 13; + + // system_variables keeps track of all session variables set for this connection + // TODO: systay should we keep this so we can apply it ordered? + map system_variables = 14; + + // row_count keeps track of the last seen rows affected for this session + int64 row_count = 15; +} + +// ExecuteRequest is the payload to Execute. +message ExecuteRequest { + // Deprecated: + // bool not_in_transaction = 5; + reserved 5; + // caller_id identifies the caller. This is the effective caller ID, + // set by the application to further identify the caller. + vtrpc.CallerID caller_id = 1; + + // session carries the session state. + Session session = 2; + + // query is the query and bind variables to execute. + query.BoundQuery query = 3; + + // These values are deprecated. Use session instead. + // TODO(sougou): remove in 3.1 + topodata.TabletType tablet_type = 4; + string keyspace_shard = 6; + query.ExecuteOptions options = 7; +} + +// ExecuteResponse is the returned value from Execute. +message ExecuteResponse { + // error contains an application level error if necessary. Note the + // session may have changed, even when an error is returned (for + // instance if a database integrity error happened). + vtrpc.RPCError error = 1; + + // session is the updated session information. + Session session = 2; + + // result contains the query result, only set if error is unset. + query.QueryResult result = 3; +} + +// ExecuteBatchRequest is the payload to ExecuteBatch. +message ExecuteBatchRequest { + // caller_id identifies the caller. This is the effective caller ID, + // set by the application to further identify the caller. + vtrpc.CallerID caller_id = 1; + + // session carries the session state. + Session session = 2; + + // queries is a list of query and bind variables to execute. + repeated query.BoundQuery queries = 3; + + // These values are deprecated. Use session instead. + // TODO(sougou): remove in 3.1 + topodata.TabletType tablet_type = 4; + bool as_transaction = 5; + string keyspace_shard = 6; + query.ExecuteOptions options = 7; +} + + +// ExecuteBatchResponse is the returned value from ExecuteBatch. +message ExecuteBatchResponse { + // error contains an application level error if necessary. Note the + // session may have changed, even when an error is returned (for + // instance if a database integrity error happened). + vtrpc.RPCError error = 1; + + // session is the updated session information. + Session session = 2; + + // results contains the query results, only set if application level error is unset. + repeated query.ResultWithError results = 3; +} + +// StreamExecuteRequest is the payload to StreamExecute. +message StreamExecuteRequest { + // caller_id identifies the caller. This is the effective caller ID, + // set by the application to further identify the caller. + vtrpc.CallerID caller_id = 1; + + // query is the query and bind variables to execute. + query.BoundQuery query = 2; + + // These values are deprecated. Use session instead. + // TODO(sougou): remove in 3.1 + topodata.TabletType tablet_type = 3; + string keyspace_shard = 4; + query.ExecuteOptions options = 5; + + // session carries the session state. + Session session = 6; +} + +// StreamExecuteResponse is the returned value from StreamExecute. +// The session is currently not returned because StreamExecute is +// not expected to modify it. +message StreamExecuteResponse { + // result contains the result data. + // The first value contains only Fields information. + // The next values contain the actual rows, a few values per result. + query.QueryResult result = 1; +} + +// ResolveTransactionRequest is the payload to ResolveTransaction. +message ResolveTransactionRequest { + // caller_id identifies the caller. This is the effective caller ID, + // set by the application to further identify the caller. + vtrpc.CallerID caller_id = 1; + + // dtid is the dtid of the transaction to be resolved. + string dtid = 2; +} + +// ResolveTransactionResponse is the returned value from Rollback. +message ResolveTransactionResponse { +} + +// VStreamRequest is the payload for VStream. +message VStreamRequest { + vtrpc.CallerID caller_id = 1; + + topodata.TabletType tablet_type = 2; + + // position specifies the starting point of the bin log positions + // as well as the keyspace-shards to pull events from. + // position is of the form 'ks1:0@MySQL56/|ks2:-80@MySQL56/'. + binlogdata.VGtid vgtid = 3; + binlogdata.Filter filter = 4; +} + +// VStreamResponse is streamed by VStream. +message VStreamResponse { + repeated binlogdata.VEvent events = 1; +} diff --git a/internal/stackql-parser-fork/proto/vtgateservice.proto b/internal/stackql-parser-fork/proto/vtgateservice.proto new file mode 100644 index 00000000..894b9697 --- /dev/null +++ b/internal/stackql-parser-fork/proto/vtgateservice.proto @@ -0,0 +1,57 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Service definition for vtgateservice. +// This is the main entry point to Vitess. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/vtgateservice"; + +option java_package="io.vitess.proto.grpc"; + +package vtgateservice; + +import "vtgate.proto"; + +// Vitess is the main service to access a Vitess cluster. It is the API that vtgate +// exposes to serve all queries. +service Vitess { + // Execute tries to route the query to the right shard. + // It depends on the query and bind variables to provide enough + // information in conjunction with the vindexes to route the query. + // API group: v3 + rpc Execute(vtgate.ExecuteRequest) returns (vtgate.ExecuteResponse) {}; + + // ExecuteBatch tries to route the list of queries on the right shards. + // It depends on the query and bind variables to provide enough + // information in conjunction with the vindexes to route the query. + // API group: v3 + rpc ExecuteBatch(vtgate.ExecuteBatchRequest) returns (vtgate.ExecuteBatchResponse) {}; + + // StreamExecute executes a streaming query based on shards. + // It depends on the query and bind variables to provide enough + // information in conjunction with the vindexes to route the query. + // Use this method if the query returns a large number of rows. + // API group: v3 + rpc StreamExecute(vtgate.StreamExecuteRequest) returns (stream vtgate.StreamExecuteResponse) {}; + + // ResolveTransaction resolves a transaction. + // API group: Transactions + rpc ResolveTransaction(vtgate.ResolveTransactionRequest) returns (vtgate.ResolveTransactionResponse) {}; + + // VStream streams binlog events from the requested sources. + rpc VStream(vtgate.VStreamRequest) returns (stream vtgate.VStreamResponse) {}; +} diff --git a/internal/stackql-parser-fork/proto/vtrpc.proto b/internal/stackql-parser-fork/proto/vtrpc.proto new file mode 100644 index 00000000..9cd882f5 --- /dev/null +++ b/internal/stackql-parser-fork/proto/vtrpc.proto @@ -0,0 +1,262 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains useful data structures for RPCs in Vitess. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/vtrpc"; + +option java_package="io.vitess.proto"; + +package vtrpc; + +// CallerID is passed along RPCs to identify the originating client +// for a request. It is not meant to be secure, but only +// informational. The client can put whatever info they want in these +// fields, and they will be trusted by the servers. The fields will +// just be used for logging purposes, and to easily find a client. +// VtGate propagates it to VtTablet, and VtTablet may use this +// information for monitoring purposes, to display on dashboards, or +// for blacklisting purposes. +message CallerID { + // principal is the effective user identifier. It is usually filled in + // with whoever made the request to the appserver, if the request + // came from an automated job or another system component. + // If the request comes directly from the Internet, or if the Vitess client + // takes action on its own accord, it is okay for this field to be absent. + string principal = 1; + + // component describes the running process of the effective caller. + // It can for instance be the hostname:port of the servlet initiating the + // database call, or the container engine ID used by the servlet. + string component = 2; + + // subcomponent describes a component inisde the immediate caller which + // is responsible for generating is request. Suggested values are a + // servlet name or an API endpoint name. + string subcomponent = 3; +} + +// Code represents canonical error codes. The names, numbers and comments +// must match the ones defined by grpc: +// https://godoc.org/google.golang.org/grpc/codes. +enum Code { + // OK is returned on success. + OK = 0; + + // CANCELED indicates the operation was cancelled (typically by the caller). + CANCELED = 1; + + // UNKNOWN error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + UNKNOWN = 2; + + // INVALID_ARGUMENT indicates client specified an invalid argument. + // Note that this differs from FAILED_PRECONDITION. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + INVALID_ARGUMENT = 3; + + // DEADLINE_EXCEEDED means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DEADLINE_EXCEEDED = 4; + + // NOT_FOUND means some requested entity (e.g., file or directory) was + // not found. + NOT_FOUND = 5; + + // ALREADY_EXISTS means an attempt to create an entity failed because one + // already exists. + ALREADY_EXISTS = 6; + + // PERMISSION_DENIED indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use RESOURCE_EXHAUSTED + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PERMISSION_DENIED = 7; + + // UNAUTHENTICATED indicates the request does not have valid + // authentication credentials for the operation. + UNAUTHENTICATED = 16; + + // RESOURCE_EXHAUSTED indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + RESOURCE_EXHAUSTED = 8; + + // FAILED_PRECONDITION indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE: + // (a) Use UNAVAILABLE if the client can retry just the failing call. + // (b) Use ABORTED if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FAILED_PRECONDITION if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FAILED_PRECONDITION + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FAILED_PRECONDITION if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FAILED_PRECONDITION = 9; + + // ABORTED indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + ABORTED = 10; + + // OUT_OF_RANGE means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike INVALID_ARGUMENT, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate INVALID_ARGUMENT if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OUT_OF_RANGE if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FAILED_PRECONDITION and + // OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OUT_OF_RANGE error to detect when + // they are done. + OUT_OF_RANGE = 11; + + // UNIMPLEMENTED indicates operation is not implemented or not + // supported/enabled in this service. + UNIMPLEMENTED = 12; + + // INTERNAL errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + INTERNAL = 13; + + // UNAVAILABLE indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + UNAVAILABLE = 14; + + // DATA_LOSS indicates unrecoverable data loss or corruption. + DATA_LOSS = 15; +} + +// LegacyErrorCode is the enum values for Errors. This type is deprecated. +// Use Code instead. Background: In the initial design, we thought +// that we may end up with a different list of canonical error codes +// than the ones defined by grpc. In hindsight, we realize that +// the grpc error codes are fairly generic and mostly sufficient. +// In order to avoid confusion, this type will be deprecated in +// favor of the new Code that matches exactly what grpc defines. +// Some names below have a _LEGACY suffix. This is to prevent +// name collisions with Code. +enum LegacyErrorCode { + // SUCCESS_LEGACY is returned from a successful call. + SUCCESS_LEGACY = 0; + + // CANCELLED_LEGACY means that the context was cancelled (and noticed in the app layer, + // as opposed to the RPC layer). + CANCELLED_LEGACY = 1; + + // UNKNOWN_ERROR_LEGACY includes: + // 1. MySQL error codes that we don't explicitly handle. + // 2. MySQL response that wasn't as expected. For example, we might expect a MySQL + // timestamp to be returned in a particular way, but it wasn't. + // 3. Anything else that doesn't fall into a different bucket. + UNKNOWN_ERROR_LEGACY = 2; + + // BAD_INPUT_LEGACY is returned when an end-user either sends SQL that couldn't be parsed correctly, + // or tries a query that isn't supported by Vitess. + BAD_INPUT_LEGACY = 3; + + // DEADLINE_EXCEEDED_LEGACY is returned when an action is taking longer than a given timeout. + DEADLINE_EXCEEDED_LEGACY = 4; + + // INTEGRITY_ERROR_LEGACY is returned on integrity error from MySQL, usually due to + // duplicate primary keys. + INTEGRITY_ERROR_LEGACY = 5; + + // PERMISSION_DENIED_LEGACY errors are returned when a user requests access to something + // that they don't have permissions for. + PERMISSION_DENIED_LEGACY = 6; + + // RESOURCE_EXHAUSTED_LEGACY is returned when a query exceeds its quota in some dimension + // and can't be completed due to that. Queries that return RESOURCE_EXHAUSTED + // should not be retried, as it could be detrimental to the server's health. + // Examples of errors that will cause the RESOURCE_EXHAUSTED code: + // 1. TxPoolFull: this is retried server-side, and is only returned as an error + // if the server-side retries failed. + // 2. Query is killed due to it taking too long. + RESOURCE_EXHAUSTED_LEGACY = 7; + + // QUERY_NOT_SERVED_LEGACY means that a query could not be served right now. + // Client can interpret it as: "the tablet that you sent this query to cannot + // serve the query right now, try a different tablet or try again later." + // This could be due to various reasons: QueryService is not serving, should + // not be serving, wrong shard, wrong tablet type, blacklisted table, etc. + // Clients that receive this error should usually retry the query, but after taking + // the appropriate steps to make sure that the query will get sent to the correct + // tablet. + QUERY_NOT_SERVED_LEGACY = 8; + + // NOT_IN_TX_LEGACY means that we're not currently in a transaction, but we should be. + NOT_IN_TX_LEGACY = 9; + + // INTERNAL_ERROR_LEGACY means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + INTERNAL_ERROR_LEGACY = 10; + + // TRANSIENT_ERROR_LEGACY is used for when there is some error that we expect we can + // recover from automatically - often due to a resource limit temporarily being + // reached. Retrying this error, with an exponential backoff, should succeed. + // Clients should be able to successfully retry the query on the same backends. + // Examples of things that can trigger this error: + // 1. Query has been throttled + // 2. VtGate could have request backlog + TRANSIENT_ERROR_LEGACY = 11; + + // UNAUTHENTICATED_LEGACY errors are returned when a user requests access to something, + // and we're unable to verify the user's authentication. + UNAUTHENTICATED_LEGACY = 12; +} + +// RPCError is an application-level error structure returned by +// VtTablet (and passed along by VtGate if appropriate). +// We use this so the clients don't have to parse the error messages, +// but instead can depend on the value of the code. +message RPCError { + LegacyErrorCode legacy_code = 1; + string message = 2; + Code code = 3; +} diff --git a/internal/stackql-parser-fork/proto/vttest.proto b/internal/stackql-parser-fork/proto/vttest.proto new file mode 100644 index 00000000..2e76c7f7 --- /dev/null +++ b/internal/stackql-parser-fork/proto/vttest.proto @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains the vttest topology configuration structures. +// +// The protobuf message "VTTestTopology" specifies the Vitess and +// database configuration of the "vttest" test component which can be +// used in end-to-end tests to test an application against an actual +// Vitess and MySQL server. +// +// To start a "vttest" instance, pass the "VTTestTopology" message, +// encoded in the protobuf compact text format, to +// py/vttest/run_local_database.py which in turn will send it to the +// Vitess test binary called "vtcombo". +// +// To encode a "VTTestTopology" message in the protobuf compact text +// format, create the protobuf in your test's native language first +// and then use the protobuf library to encode it as text. +// For an example in Python, see: test/vttest_sample_test.py +// In go, see: go/vt/vttest/local_cluster_test.go +// +// Sample encoded proto configurations would be as follow. Note there are +// multiple encoding options, see the proto documentation for more info +// (first and last quote not included in the encoding): +// - single keyspace named test_keyspace with one shard '0': +// 'keyspaces: > ' +// - two keyspaces, one with two shards, the other one with a redirect: +// 'keyspaces { name: "test_keyspace" shards { name: "-80" } shards { name: "80-" } } keyspaces { name: "redirect" served_from: "test_keyspace" }' + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/vttest"; + +package vttest; + +// Shard describes a single shard in a keyspace. +message Shard { + // name has to be unique in a keyspace. For unsharded keyspaces, it + // should be '0'. For sharded keyspace, it should be derived from + // the keyrange, like '-80' or '40-80'. + string name = 1; + + // db_name_override is the mysql db name for this shard. Has to be + // globally unique. If not specified, we will by default use + // 'vt__'. + string db_name_override = 2; +} + +// Keyspace describes a single keyspace. +message Keyspace { + // name has to be unique in a VTTestTopology. + string name = 1; + + // shards inside this keyspace. Ignored if redirect is set. + repeated Shard shards = 2; + + // sharding_column_name for this keyspace. Used for v2 calls, but not for v3. + string sharding_column_name = 3; + + // sharding_column_type for this keyspace. Used for v2 calls, but not for v3. + string sharding_column_type = 4; + + // redirects all traffic to another keyspace. If set, shards is ignored. + string served_from = 5; + + // number of replica tablets to instantiate. This includes the master tablet. + int32 replica_count = 6; + + // number of rdonly tablets to instantiate. + int32 rdonly_count = 7; +} + +// VTTestTopology describes the keyspaces in the topology. +message VTTestTopology { + // all keyspaces in the topology. + repeated Keyspace keyspaces = 1; + + // list of cells the keyspaces reside in. Vtgate is started in only the first cell. + repeated string cells = 2; +} diff --git a/internal/stackql-parser-fork/proto/vttime.proto b/internal/stackql-parser-fork/proto/vttime.proto new file mode 100644 index 00000000..5224fcb9 --- /dev/null +++ b/internal/stackql-parser-fork/proto/vttime.proto @@ -0,0 +1,30 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package contains a shared time data structure + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/vttime"; + +package vttime; + +// Time represents a time stamp in nanoseconds. In go, use logutil library +// to convert times. +message Time { + int64 seconds = 1; + int32 nanoseconds = 2; +} + diff --git a/internal/stackql-parser-fork/proto/vtworkerdata.proto b/internal/stackql-parser-fork/proto/vtworkerdata.proto new file mode 100644 index 00000000..d94721d2 --- /dev/null +++ b/internal/stackql-parser-fork/proto/vtworkerdata.proto @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Data structures for the vtworker RPC interface. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/vtworkerdata"; + +package vtworkerdata; + +import "logutil.proto"; + +// ExecuteVtworkerCommandRequest is the payload for ExecuteVtworkerCommand. +message ExecuteVtworkerCommandRequest { + repeated string args = 1; +} + +// ExecuteVtworkerCommandResponse is streamed back by ExecuteVtworkerCommand. +message ExecuteVtworkerCommandResponse { + logutil.Event event = 1; +} diff --git a/internal/stackql-parser-fork/proto/vtworkerservice.proto b/internal/stackql-parser-fork/proto/vtworkerservice.proto new file mode 100644 index 00000000..7b0e8784 --- /dev/null +++ b/internal/stackql-parser-fork/proto/vtworkerservice.proto @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// RPC interface for vtworker. +// The interface is very similar to the vtctld interface (see vtctlservice.proto). + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/vtworkerservice"; + +package vtworkerservice; + +import "vtworkerdata.proto"; + +// Vtworker contains the vtworker RPC calls. +service Vtworker { + // ExecuteVtworkerCommand allows to run a vtworker command by specifying the + // same arguments as on the command line. + rpc ExecuteVtworkerCommand (vtworkerdata.ExecuteVtworkerCommandRequest) returns (stream vtworkerdata.ExecuteVtworkerCommandResponse) {}; +} diff --git a/internal/stackql-parser-fork/proto/workflow.proto b/internal/stackql-parser-fork/proto/workflow.proto new file mode 100644 index 00000000..772b64da --- /dev/null +++ b/internal/stackql-parser-fork/proto/workflow.proto @@ -0,0 +1,112 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains the Vitess workflow management related data +// structures. They are used to store / retrieve state from topology +// server. + +syntax = "proto3"; +option go_package = "vitess.io/vitess/go/vt/proto/workflow"; + +package workflow; + +// WorkflowState describes the state of a workflow. +// This constant should match the Node object described in +// web/vtctld2/src/app/workflows/node.ts as it is exposed as JSON to +// the Angular 2 web app. +enum WorkflowState { + NotStarted = 0; + Running = 1; + Done = 2; +} + +// Workflow is the persisted state of a long-running workflow. +message Workflow { + // uuid is set when the workflow is created, and immutable after + // that. + string uuid = 1; + + // factory_name is set with the name of the factory that created the + // job (and can also restart it). It is set at creation time, and + // immutable after that. + string factory_name = 2; + + // name is the display name of the workflow. + string name = 3; + + // state describes the state of the job. A job is created as + // NotStarted, then the Workflow Manager picks it up and starts it, + // switching it to Running (and populating 'start_time'). The + // workflow can then fail over to a new Workflow Manager is + // necessary, and still be in Running state. When done, it goes to + // Done, 'end_time' is populated, and 'error' is set if there was an + // error. + WorkflowState state = 4; + + // data is workflow-specific stored data. It is usually a binary + // proto-encoded data structure. It can vary throughout the + // execution of the workflow. It will not change after the workflow + // is Done. + bytes data = 5; + + // error is set if the job finished with an error. This field only + // makes sense if 'state' is Done. + string error = 6; + + // start_time is set when the workflow manager starts a workflow for + // the first time. This field only makes sense if 'state' is Running + // or Done. + int64 start_time = 7; + + // end_time is set when the workflow is finished. + // This field only makes sense if 'state' is Done. + int64 end_time = 8; + + // create_time is set when the workflow is created. + int64 create_time = 9; +} + +message WorkflowCheckpoint { + // code_version is used to detect incompabilities between the version of the + // running workflow and the one which wrote the checkpoint. If they don't + // match, the workflow must not continue. The author of workflow must update + // this variable in their implementation when incompabilities are introduced. + int32 code_version = 1; + // tasks stores all tasks of the workflow in a map. The key is a unique name + // to identify the task, e.g. clone/-80. + + // Task is the data structure that stores the execution status and the + // attributes of a task. + map tasks = 2; + // settings includes workflow specific data, e.g. the resharding workflow + // would store the source shards and destination shards. + map settings = 3; +} + +enum TaskState { + TaskNotStarted = 0; + TaskRunning = 1; + TaskDone = 2; +} + +message Task { + string id = 1; + TaskState state = 2; + // attributes includes the parameters the task needs. + map attributes = 3; + string error = 4; +} + diff --git a/internal/stackql-parser-fork/tools/all_test_for_coverage.sh b/internal/stackql-parser-fork/tools/all_test_for_coverage.sh new file mode 100755 index 00000000..07fff9f4 --- /dev/null +++ b/internal/stackql-parser-fork/tools/all_test_for_coverage.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The purpose of this script is to run testcase and get coverage report +# These script runs all unit testcases including go endtoend testcase +# Here we ignore any error from testcase as the purpose is to collect coverage. +# So if there is a flaky test, it will get only chance to run, if it fails we ignore coverage from that. + + +### Execute unit testcase ### +source build.env +make tools +make build +echo "--------- executing unit testcases ---------" +packages_with_all_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/... | sort) +all_except_endtoend_tests=$(echo "$packages_with_all_tests" | grep -v "endtoend" | cut -d" " -f1 ) + +counter=0 +for pkg in $all_except_endtoend_tests +do + go test -coverpkg=vitess.io/vitess/go/... -coverprofile "/tmp/unit_$counter.out" -json > "/tmp/unit_$counter.json" $pkg -v -p=1 || : + counter=$((counter+1)) +done + +## Copy the test files to get instrumented binaries ### +cp ./tools/coverage-go/vtctl_test.go ./go/cmd/vtctl/vtctl_test.go +cp ./tools/coverage-go/vtctld_test.go ./go/cmd/vtctld/vtctld_test.go +cp ./tools/coverage-go/mysqlctl_test.go ./go/cmd/mysqlctl/mysqlctl_test.go +cp ./tools/coverage-go/vtctlclient_test.go ./go/cmd/vtctlclient/vtctlclient_test.go +cp ./tools/coverage-go/vttablet_test.go ./go/cmd/vttablet/vttablet_test.go +cp ./tools/coverage-go/vtgate_test.go ./go/cmd/vtgate/vtgate_test.go +cp ./tools/coverage-go/vtworker_test.go ./go/cmd/vtworker/vtworker_test.go +cp ./tools/coverage-go/vtworkerclient_test.go ./go/cmd/vtworkerclient/vtworkerclient_test.go + +go test -coverpkg=vitess.io/vitess/go/... -c vitess.io/vitess/go/cmd/vtctl -o ./bin/vtctl +go test -coverpkg=vitess.io/vitess/go/... -c vitess.io/vitess/go/cmd/vtctld -o ./bin/vtctld +go test -coverpkg=vitess.io/vitess/go/... -c vitess.io/vitess/go/cmd/mysqlctl -o ./bin/mysqlctl +go test -coverpkg=vitess.io/vitess/go/... -c vitess.io/vitess/go/cmd/vtctlclient -o ./bin/vtctlclient +go test -coverpkg=vitess.io/vitess/go/... -c vitess.io/vitess/go/cmd/vttablet -o ./bin/vttablet +go test -coverpkg=vitess.io/vitess/go/... -c vitess.io/vitess/go/cmd/vtgate -o ./bin/vtgate +go test -coverpkg=vitess.io/vitess/go/... -c vitess.io/vitess/go/cmd/vtworker -o ./bin/vtworker +go test -coverpkg=vitess.io/vitess/go/... -c vitess.io/vitess/go/cmd/vtworkerclient -o ./bin/vtworkerclient + +### Execute go/test/endtoend testcase ### +echo "--------- executing endtoend testcases ---------" +cluster_tests=$(echo "$packages_with_all_tests" | grep -E "go/test/endtoend" | cut -d" " -f1) + + +# Run cluster test sequentially +for i in $cluster_tests +do + echo "starting test for $i" + go test $i -v -p=1 -is-coverage=true || : +done + diff --git a/internal/stackql-parser-fork/tools/bootstrap_web.sh b/internal/stackql-parser-fork/tools/bootstrap_web.sh new file mode 100755 index 00000000..cc0e4096 --- /dev/null +++ b/internal/stackql-parser-fork/tools/bootstrap_web.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script is used to install dependencies for compiling +# the code of our upcoming Angular 2 based vtctld UI. +# +# Regular users should not have to run it. Run bootstrap.sh (located in the +# repository root) instead. + +# TODO(mberlin): Merge this back into bootstrap.sh once we support caching the +# dependencies on Travis and local disk. + +# Download node +node_ver=v6.3.1 +node_dist=$VTROOT/dist/node +if [[ -x $node_dist/bin/node && `$node_dist/bin/node -v` == "$node_ver" ]]; then + echo "skipping nodejs download. remove $node_dist to force redownload." +else + echo "Downloading nodejs" + rm -rf $node_dist + node_tar="node_linux64.tar.xz" + curl -sL https://nodejs.org/dist/$node_ver/node-$node_ver-linux-x64.tar.xz -o $node_tar + tar xf $node_tar -C $VTROOT/dist + mv $VTROOT/dist/node-$node_ver-linux-x64 $node_dist + rm $node_tar + # Add the node directory to PATH to make sure that the Angular + # installation below can find the "node" binary. + # (dev.env does actually append it to PATH.) + source $VTROOT/dev.env +fi + +echo "Installing dependencies for building web UI" +angular_cli_dir=$VTROOT/dist/angular-cli +web_dir2=$VTROOT/web/vtctld2 +angular_cli_commit=cacaa4eff10e135016ef81076fab1086a3bce92f +if [[ -d $angular_cli_dir && `cd $angular_cli_dir && git rev-parse HEAD` == "$angular_cli_commit" ]]; then + echo "skipping angular cli download. remove $angular_cli_dir to force download." +else + cd $VTROOT/dist && git clone https://github.com/angular/angular-cli.git --quiet + cd $angular_cli_dir && git checkout $angular_cli_commit --quiet +fi +cd $angular_cli_dir && $node_dist/bin/npm link --silent +cd $web_dir2 && $node_dist/bin/npm install --silent +cd $web_dir2 && $node_dist/bin/npm link angular-cli --silent diff --git a/internal/stackql-parser-fork/tools/build_version_flags.sh b/internal/stackql-parser-fork/tools/build_version_flags.sh new file mode 100755 index 00000000..b2effa7f --- /dev/null +++ b/internal/stackql-parser-fork/tools/build_version_flags.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source $DIR/shell_functions.inc + +# Normal builds run directly against the git repo, but when packaging (for example with rpms) +# a tar ball might be used, which will prevent the git metadata from being available. +# Should this be the case then allow environment variables to be used to source +# this information instead. +_build_git_rev=$(git rev-parse --short HEAD) +if [ -z "$_build_git_rev" ]; then + _build_git_rev="$BUILD_GIT_REV" +fi +_build_git_branch=$(git rev-parse --abbrev-ref HEAD) +if [ -z "$_build_git_branch" ]; then + _build_git_branch="$BUILD_GIT_BRANCH" +fi + +echo "\ + -X 'vitess.io/vitess/go/vt/servenv.buildHost=$(hostname)' \ + -X 'vitess.io/vitess/go/vt/servenv.buildUser=$(whoami)' \ + -X 'vitess.io/vitess/go/vt/servenv.buildGitRev=${_build_git_rev}' \ + -X 'vitess.io/vitess/go/vt/servenv.buildGitBranch=${_build_git_branch}' \ + -X 'vitess.io/vitess/go/vt/servenv.buildTime=$(LC_ALL=C date)' \ + -X 'vitess.io/vitess/go/vt/servenv.jenkinsBuildNumberStr=${BUILD_NUMBER}' \ +" diff --git a/internal/stackql-parser-fork/tools/check_make_parser.sh b/internal/stackql-parser-fork/tools/check_make_parser.sh new file mode 100755 index 00000000..89207621 --- /dev/null +++ b/internal/stackql-parser-fork/tools/check_make_parser.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# +# Validate that the current version of the generated parser matches the output +# generated by the version of goyacc installed on the local system. +# +# This is used in Travis to verify that the currently committed version was +# generated with the proper version of goyacc. + +source build.env + +CUR="sql.go" +TMP="/tmp/sql.$$.go" + +set -e + +if ! cd go/vt/sqlparser/ ; then + echo "ERROR: $0 must be run in the root project directory" + exit 1 +fi + +mv $CUR $TMP +output=$(go run golang.org/x/tools/cmd/goyacc -o $CUR sql.y) + +if [ -n "$output" ]; then + echo "Expected empty output from goyacc, got:" + echo $output + mv $TMP $CUR + exit 1 +fi + +gofmt -w $CUR + +if ! diff -q $CUR $TMP > /dev/null ; then + echo "ERROR: Regenerated parser $TMP does not match current version $(pwd)/sql.go:" + diff -u $CUR $TMP + mv $TMP $CUR + + echo + echo "Please ensure go and goyacc are up to date and re-run 'make parser' to generate." + exit 1 +fi + +mv $TMP $CUR diff --git a/internal/stackql-parser-fork/tools/coverage-go/Readme.md b/internal/stackql-parser-fork/tools/coverage-go/Readme.md new file mode 100644 index 00000000..58d27615 --- /dev/null +++ b/internal/stackql-parser-fork/tools/coverage-go/Readme.md @@ -0,0 +1,6 @@ +#### Purpose + +- To get the coverage when we run the end to end testcase,we need instrumented binaries. +- To get such binaries, we have to put a test file under go/cmd/, so that if we execute `go test ... -c .. -o ..` command, it will produce such files. +- This directory contains the test files which will copied to go/cmd/ via some script and then it instrumented binaries will be produced. +- The end to end test can be configured to run in coverage mode, which will utilize the binaries to produce coverage report. \ No newline at end of file diff --git a/internal/stackql-parser-fork/tools/coverage-go/mysqlctl_test.go b/internal/stackql-parser-fork/tools/coverage-go/mysqlctl_test.go new file mode 100644 index 00000000..8015ad4c --- /dev/null +++ b/internal/stackql-parser-fork/tools/coverage-go/mysqlctl_test.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import "testing" + +func TestMysqlCtl(t *testing.T) { + main() +} diff --git a/internal/stackql-parser-fork/tools/coverage-go/vtctl_test.go b/internal/stackql-parser-fork/tools/coverage-go/vtctl_test.go new file mode 100644 index 00000000..497d0b21 --- /dev/null +++ b/internal/stackql-parser-fork/tools/coverage-go/vtctl_test.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "testing" +) + +func TestVtctl(t *testing.T) { + main() +} diff --git a/internal/stackql-parser-fork/tools/coverage-go/vtctlclient_test.go b/internal/stackql-parser-fork/tools/coverage-go/vtctlclient_test.go new file mode 100644 index 00000000..4e78872b --- /dev/null +++ b/internal/stackql-parser-fork/tools/coverage-go/vtctlclient_test.go @@ -0,0 +1,7 @@ +package main + +import "testing" + +func TestVtclient(t *testing.T) { + main() +} diff --git a/internal/stackql-parser-fork/tools/coverage-go/vtctld_test.go b/internal/stackql-parser-fork/tools/coverage-go/vtctld_test.go new file mode 100644 index 00000000..bf370469 --- /dev/null +++ b/internal/stackql-parser-fork/tools/coverage-go/vtctld_test.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import "testing" + +func TestVtctld(t *testing.T) { + main() +} diff --git a/internal/stackql-parser-fork/tools/coverage-go/vtgate_test.go b/internal/stackql-parser-fork/tools/coverage-go/vtgate_test.go new file mode 100644 index 00000000..57d03089 --- /dev/null +++ b/internal/stackql-parser-fork/tools/coverage-go/vtgate_test.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import "testing" + +func TestVtgate(t *testing.T) { + main() +} diff --git a/internal/stackql-parser-fork/tools/coverage-go/vttablet_test.go b/internal/stackql-parser-fork/tools/coverage-go/vttablet_test.go new file mode 100644 index 00000000..2fc3525a --- /dev/null +++ b/internal/stackql-parser-fork/tools/coverage-go/vttablet_test.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "testing" +) + +func TestVttablet(t *testing.T) { + main() +} diff --git a/internal/stackql-parser-fork/tools/coverage-go/vtworker_test.go b/internal/stackql-parser-fork/tools/coverage-go/vtworker_test.go new file mode 100644 index 00000000..1a077e21 --- /dev/null +++ b/internal/stackql-parser-fork/tools/coverage-go/vtworker_test.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import "testing" + +func TestVtworker(t *testing.T) { + main() +} diff --git a/internal/stackql-parser-fork/tools/coverage-go/vtworkerclient_test.go b/internal/stackql-parser-fork/tools/coverage-go/vtworkerclient_test.go new file mode 100644 index 00000000..e1e6fa1c --- /dev/null +++ b/internal/stackql-parser-fork/tools/coverage-go/vtworkerclient_test.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import "testing" + +func TestVtworkerclient(t *testing.T) { + main() +} diff --git a/internal/stackql-parser-fork/tools/dependency_check.sh b/internal/stackql-parser-fork/tools/dependency_check.sh new file mode 100755 index 00000000..cfaa912f --- /dev/null +++ b/internal/stackql-parser-fork/tools/dependency_check.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source build.env + +function fail() { + echo "ERROR: $1" + exit 1 +} + +PLATFORM_BINARIES="" +case "$(uname -s)" in + Linux*) PLATFORM_BINARIES="k3s";; +esac + +# These binaries are required to 'make test' +# mysqld might be in /usr/sbin which will not be in the default PATH +PATH="/usr/sbin:$PATH" +for binary in mysqld consul etcd etcdctl zksrv.sh javadoc mvn ant curl wget zip unzip $PLATFORM_BINARIES; do + command -v "$binary" > /dev/null || fail "${binary} is not installed in PATH. See https://vitess.io/contributing/build-from-source for install instructions." +done; diff --git a/internal/stackql-parser-fork/tools/e2e_go_test.sh b/internal/stackql-parser-fork/tools/e2e_go_test.sh new file mode 100755 index 00000000..bc4964ff --- /dev/null +++ b/internal/stackql-parser-fork/tools/e2e_go_test.sh @@ -0,0 +1,4 @@ +#!/bin/bash +source build.env +echo "running tests for " "$@" +go test -v "$@" -alsologtostderr -count=1 \ No newline at end of file diff --git a/internal/stackql-parser-fork/tools/e2e_test_cluster.sh b/internal/stackql-parser-fork/tools/e2e_test_cluster.sh new file mode 100755 index 00000000..991cd29c --- /dev/null +++ b/internal/stackql-parser-fork/tools/e2e_test_cluster.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# These test uses excutables and launch them as process +# After that all tests run, here we are testing those + +# All Go packages with test files. +# Output per line: * + +source build.env + +packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/.../endtoend/... | sort) + +cluster_tests=$(echo "$packages_with_tests" | grep -E "go/test/endtoend" | cut -d" " -f1) + +# Run cluster test sequentially +echo "running cluster tests $cluster_tests" +echo "$cluster_tests" | xargs go test -v -p=1 +if [ $? -ne 0 ]; then + echo "ERROR: Go cluster tests failed. See above for errors." + echo + echo "This should NOT happen. Did you introduce a flaky unit test?" + echo "If so, please rename it to the suffix _flaky_test.go." + exit 1 +fi diff --git a/internal/stackql-parser-fork/tools/e2e_test_race.sh b/internal/stackql-parser-fork/tools/e2e_test_race.sh new file mode 100755 index 00000000..32374a25 --- /dev/null +++ b/internal/stackql-parser-fork/tools/e2e_test_race.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source build.env + +temp_log_file="$(mktemp --suffix .unit_test_race.log)" +trap '[ -f "$temp_log_file" ] && rm $temp_log_file' EXIT + +# Wrapper around go test -race. + +# This script exists because the -race test doesn't allow to distinguish +# between a failed (e.g. flaky) unit test and a found data race. +# Although Go 1.5 says 'exit status 66' in case of a race, it exits with 1. +# Therefore, we manually check the output of 'go test' for data races and +# exit with an error if one was found. +# TODO(mberlin): Test all packages (go/... instead of go/vt/...) once +# go/cgzip is moved into a separate repository. We currently +# skip the cgzip package because -race takes >30 sec for it. + +# All endtoend Go packages with test files. +# Output per line: * +packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/.../endtoend/... | sort) +packages_with_tests=$(echo "$packages_with_tests" | grep -vE "go/test/endtoend" | cut -d" " -f1) + +# endtoend tests should be in a directory called endtoend +all_e2e_tests=$(echo "$packages_with_tests" | cut -d" " -f1) + +# Run all endtoend tests. +echo "$all_e2e_tests" | xargs go test $VT_GO_PARALLEL -race 2>&1 | tee $temp_log_file +if [ ${PIPESTATUS[0]} -ne 0 ]; then + if grep "WARNING: DATA RACE" -q $temp_log_file; then + echo + echo "ERROR: go test -race found a data race. See log above." + exit 2 + fi + + echo "ERROR: go test -race found NO data race, but failed. See log above." + exit 1 +fi + +echo +echo "SUCCESS: No data race was found." diff --git a/internal/stackql-parser-fork/tools/e2e_test_runner.sh b/internal/stackql-parser-fork/tools/e2e_test_runner.sh new file mode 100755 index 00000000..c581957a --- /dev/null +++ b/internal/stackql-parser-fork/tools/e2e_test_runner.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Custom Go endtoend test runner which runs all endtoend tests in parallel +# except for known flaky tests. +# Flaky unit tests are run sequentially in the second phase and retried up to +# three times. + +# Why are there flaky unit tests? +# +# Some of the Go unit tests are inherently flaky e.g. because they use the +# real timer implementation and might fail when things take longer as usual. +# In particular, this happens when the system is under load and threads do not +# get scheduled as fast as usual. Then, the expected timings do not match. + +# Set VT_GO_PARALLEL variable in the same way as the Makefile does. +# We repeat this here because this script is called directly by test.go +# and not via the Makefile. + +source build.env + +if [[ -z $VT_GO_PARALLEL && -n $VT_GO_PARALLEL_VALUE ]]; then + VT_GO_PARALLEL="-p $VT_GO_PARALLEL_VALUE" +fi + +# All Go packages with test files. +# Output per line: * +packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/.../endtoend/... | sort) + +# Flaky tests have the suffix "_flaky_test.go". +all_except_flaky_and_cluster_tests=$(echo "$packages_with_tests" | grep -vE ".+ .+_flaky_test\.go" | grep -vE "go/test/endtoend" | cut -d" " -f1) +flaky_tests=$(echo "$packages_with_tests" | grep -E ".+ .+_flaky_test\.go" | grep -vE "go/test/endtoend" | cut -d" " -f1) + +# Run non-flaky tests. +echo "$all_except_flaky_and_cluster_tests" | xargs go test $VT_GO_PARALLEL +if [ $? -ne 0 ]; then + echo "ERROR: Go unit tests failed. See above for errors." + echo + echo "This should NOT happen. Did you introduce a flaky unit test?" + echo "If so, please rename it to the suffix _flaky_test.go." + exit 1 +fi + +# Run flaky tests sequentially. Retry when necessary. +for pkg in $flaky_tests; do + max_attempts=3 + attempt=1 + # Set a timeout because some tests may deadlock when they flake. + until go test -timeout 30s $VT_GO_PARALLEL $pkg; do + echo "FAILED (try $attempt/$max_attempts) in $pkg (return code $?). See above for errors." + if [ $((++attempt)) -gt $max_attempts ]; then + echo "ERROR: Flaky Go unit tests in package $pkg failed too often (after $max_attempts retries). Please reduce the flakiness." + exit 1 + fi + done +done diff --git a/internal/stackql-parser-fork/tools/generate_web_artifacts.sh b/internal/stackql-parser-fork/tools/generate_web_artifacts.sh new file mode 100755 index 00000000..a7080db7 --- /dev/null +++ b/internal/stackql-parser-fork/tools/generate_web_artifacts.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script is used to build and copy the Angular 2 based vtctld UI +# into the release folder (app) for checkin. Prior to running this script, +# bootstrap.sh and bootstrap_web.sh should already have been run. + +set -e + +vtctld2_dir=$VTROOT/web/vtctld2 +if [[ -d $vtctld2_dir/app ]]; then + rm -rf $vtctld2_dir/app +fi +cd $vtctld2_dir && ng build -prod --output-path app/ +rm -rf $vtctld2_dir/app/assets +cp -f $vtctld2_dir/src/{favicon.ico,plotly-latest.min.js,primeui-ng-all.min.css} $vtctld2_dir/app/ diff --git a/internal/stackql-parser-fork/tools/make-release-packages.sh b/internal/stackql-parser-fork/tools/make-release-packages.sh new file mode 100755 index 00000000..ba31cb4d --- /dev/null +++ b/internal/stackql-parser-fork/tools/make-release-packages.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +# This script builds and packages a Vitess release suitable for creating a new +# release on https://github.com/vitessio/vitess/releases. + +# http://redsymbol.net/articles/unofficial-bash-strict-mode/ +set -euo pipefail + +# sudo gem install --no-ri --no-rdoc fpm +# shellcheck disable=SC1091 +source build.env + +SHORT_REV="$(git rev-parse --short HEAD)" +if [ -n "$*" ]; then + VERSION="$1" +else + VERSION="$(git describe --tags --dirty --always | sed s/^v// | sed s/-dirty//)" +fi + +RELEASE_ID="vitess-${VERSION}-${SHORT_REV}" +RELEASE_DIR="${VTROOT}/releases/${RELEASE_ID}" +DESCRIPTION="A database clustering system for horizontal scaling of MySQL + +Vitess is a database solution for deploying, scaling and managing large +clusters of MySQL instances. It's architected to run as effectively in a public +or private cloud architecture as it does on dedicated hardware. It combines and +extends many important MySQL features with the scalability of a NoSQL database." + +TAR_FILE="${RELEASE_ID}.tar.gz" + +make tools +make build + +mkdir -p releases + +# Copy a subset of binaries from issue #5421 +mkdir -p "${RELEASE_DIR}/bin" +for binary in vttestserver mysqlctl mysqlctld query_analyzer topo2topo vtaclcheck vtbackup vtbench vtclient vtcombo vtctl vtctlclient vtctld vtexplain vtgate vttablet vtworker vtworkerclient zk zkctl zkctld; do + cp "bin/$binary" "${RELEASE_DIR}/bin/" +done; + +# Copy remaining files, preserving date/permissions +# But resolving symlinks +cp -rpfL examples "${RELEASE_DIR}" + +echo "Follow the installation instructions at: https://vitess.io/docs/get-started/local/" > "${RELEASE_DIR}"/examples/README.md + +cd "${RELEASE_DIR}/.." +tar -czf "${TAR_FILE}" "${RELEASE_ID}" + +cd "${RELEASE_DIR}" +PREFIX=${PREFIX:-/usr} + +# For RPMs and DEBs, binaries will be in /usr/bin +# Examples will be in /usr/share/vitess/examples + +mkdir -p share/vitess/ +mv examples share/vitess/ + +fpm \ + --force \ + --input-type dir \ + --name vitess \ + --version "${VERSION}" \ + --url "https://vitess.io/" \ + --description "${DESCRIPTION}" \ + --license "Apache License - Version 2.0, January 2004" \ + --prefix "$PREFIX" \ + -C "${RELEASE_DIR}" \ + --before-install "$VTROOT/tools/preinstall.sh" \ + --package "$(dirname "${RELEASE_DIR}")" \ + --iteration "${SHORT_REV}" \ + -t deb --deb-no-default-config-files + +fpm \ + --force \ + --input-type dir \ + --name vitess \ + --version "${VERSION}" \ + --url "https://vitess.io/" \ + --description "${DESCRIPTION}" \ + --license "Apache License - Version 2.0, January 2004" \ + --prefix "$PREFIX" \ + -C "${RELEASE_DIR}" \ + --before-install "$VTROOT/tools/preinstall.sh" \ + --package "$(dirname "${RELEASE_DIR}")" \ + --iteration "${SHORT_REV}" \ + -t rpm + +cd "${VTROOT}"/releases +echo "" +echo "Packages created as of $(date +"%m-%d-%y") at $(date +"%r %Z")" +echo "" +echo "Package | SHA256" +echo "------------ | -------------" +for file in $(find . -type f -printf '%T@ %p\n' | sort -n | tail -3 | awk '{print $2}' | sed s?^./??); do + echo "$file | $(sha256sum "$file" | awk '{print $1}')"; +done diff --git a/internal/stackql-parser-fork/tools/preinstall.sh b/internal/stackql-parser-fork/tools/preinstall.sh new file mode 100755 index 00000000..1d9c80ca --- /dev/null +++ b/internal/stackql-parser-fork/tools/preinstall.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +if ! /usr/bin/getent group vitess >/dev/null ; then + groupadd -r vitess +fi + +if ! /usr/bin/getent passwd vitess >/dev/null ; then + useradd -r -g vitess vitess +fi diff --git a/internal/stackql-parser-fork/tools/pylint.sh b/internal/stackql-parser-fork/tools/pylint.sh new file mode 100755 index 00000000..92f97f4e --- /dev/null +++ b/internal/stackql-parser-fork/tools/pylint.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs pylint with our desired flags. +# It's used by the pre-commit hook, but is a separate script +# so you can run it manually too. + +PYLINT=${PYLINT:-/usr/bin/gpylint} + +file=$1 + +if [[ "$file" =~ \btest/ ]] ; then + mode=style,test +else + mode=style +fi + +$PYLINT --mode $mode \ + --disable g-bad-file-header,g-bad-import-order,g-unknown-interpreter \ + --module-header-template '' \ + --msg-template '{path}:{line}:{msg_id}{obj_prefix}{obj}: {msg}{sym_separator}[{symbol}]' $file diff --git a/internal/stackql-parser-fork/tools/shell_functions.inc b/internal/stackql-parser-fork/tools/shell_functions.inc new file mode 100644 index 00000000..bd344db9 --- /dev/null +++ b/internal/stackql-parser-fork/tools/shell_functions.inc @@ -0,0 +1,68 @@ +#!/bin/bash + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Library of functions which are used by bootstrap.sh or the Makefile. + +# goversion_min returns true if major.minor go version is at least some value. +function goversion_min() { + [[ "$(go version)" =~ go([0-9]+)\.([0-9]+) ]] + gotmajor=${BASH_REMATCH[1]} + gotminor=${BASH_REMATCH[2]} + [[ "$1" =~ ([0-9]+)\.([0-9]+) ]] + wantmajor=${BASH_REMATCH[1]} + wantminor=${BASH_REMATCH[2]} + [ "$gotmajor" -lt "$wantmajor" ] && return 1 + [ "$gotmajor" -gt "$wantmajor" ] && return 0 + [ "$gotminor" -lt "$wantminor" ] && return 1 + return 0 +} + +# prepend_path returns $2 prepended the colon separated path $1. +# If it's already part of the path, it won't be added again. +# +# Note the first time it's called, the original value is empty, +# and the second value has the path to add. We just end up adding it regardless +# of its existence. +function prepend_path() { + # $1 path variable + # $2 path to add + if [[ ! -d "$2" ]]; then + # To be added path does not exist. Ignore it and return the path variable unchanged. + echo "$1" + return + fi + + if [[ -z "$1" ]]; then + # path variable is empty. Set its initial value to the path to add. + echo "$2" + return + fi + + if [[ ":$1:" != *":$2:"* ]]; then + # path variable does not contain path to add yet. Prepend it. + echo "$2:$1" + return + fi + + # Return path variable unchanged. + echo "$1" +} + +function fail() { + echo "ERROR: $1" + exit 1 +} + diff --git a/internal/stackql-parser-fork/tools/statsd.go b/internal/stackql-parser-fork/tools/statsd.go new file mode 100644 index 00000000..3699d7ba --- /dev/null +++ b/internal/stackql-parser-fork/tools/statsd.go @@ -0,0 +1,138 @@ +/* + * Copyright 2019 The Vitess Authors. + + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + + * http://www.apache.org/licenses/LICENSE-2.0 + + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// statsd is a simple server for hosting test.go remote stats. +package main + +import ( + "encoding/json" + "io/ioutil" + "log" + "net/http" + "strconv" + "sync" + "time" +) + +var mu sync.Mutex + +const statsFileName = "stats.json" + +func main() { + http.HandleFunc("/travis/stats", func(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + test := r.FormValue("test") + result := r.FormValue("result") + + if test == "" || result == "" { + return + } + + switch result { + case "pass": + duration := r.FormValue("duration") + if duration == "" { + return + } + dur, err := time.ParseDuration(duration) + if err != nil { + return + } + testPassed(test, dur) + case "fail": + testFailed(test) + case "flake": + try := r.FormValue("try") + if try == "" { + return + } + i, err := strconv.ParseInt(try, 10, 64) + if err != nil { + return + } + testFlaked(test, int(i)) + } + + return + } + + http.ServeFile(w, r, statsFileName) + }) + + http.ListenAndServe(":15123", nil) +} + +type Stats struct { + TestStats map[string]TestStats +} + +type TestStats struct { + Pass, Fail, Flake int + PassTime time.Duration +} + +func testPassed(name string, passTime time.Duration) { + updateTestStats(name, func(ts *TestStats) { + totalTime := int64(ts.PassTime)*int64(ts.Pass) + int64(passTime) + ts.Pass++ + ts.PassTime = time.Duration(totalTime / int64(ts.Pass)) + }) +} + +func testFailed(name string) { + updateTestStats(name, func(ts *TestStats) { + ts.Fail++ + }) +} + +func testFlaked(name string, try int) { + updateTestStats(name, func(ts *TestStats) { + ts.Flake += try - 1 + }) +} + +func updateTestStats(name string, update func(*TestStats)) { + var stats Stats + + mu.Lock() + defer mu.Unlock() + + data, err := ioutil.ReadFile(statsFileName) + if err != nil { + log.Print("Can't read stats file, starting new one.") + } else { + if err := json.Unmarshal(data, &stats); err != nil { + log.Printf("Can't parse stats file: %v", err) + return + } + } + + if stats.TestStats == nil { + stats.TestStats = make(map[string]TestStats) + } + ts := stats.TestStats[name] + update(&ts) + stats.TestStats[name] = ts + + data, err = json.MarshalIndent(stats, "", "\t") + if err != nil { + log.Printf("Can't encode stats file: %v", err) + return + } + if err := ioutil.WriteFile(statsFileName, data, 0644); err != nil { + log.Printf("Can't write stats file: %v", err) + } +} diff --git a/internal/stackql-parser-fork/tools/tools.go b/internal/stackql-parser-fork/tools/tools.go new file mode 100644 index 00000000..b9fb5d92 --- /dev/null +++ b/internal/stackql-parser-fork/tools/tools.go @@ -0,0 +1,30 @@ +// +build tools + +/* + * Copyright 2019 The Vitess Authors. + + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + + * http://www.apache.org/licenses/LICENSE-2.0 + + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tools + +// These imports ensure that "go mod tidy" won't remove deps +// for build-time dependencies like linters and code generators +import ( + _ "github.com/golang/mock/mockgen" + _ "golang.org/x/lint" + _ "golang.org/x/tools/cmd/cover" + _ "golang.org/x/tools/cmd/goimports" + _ "golang.org/x/tools/cmd/goyacc" + _ "honnef.co/go/tools/cmd/staticcheck" +) diff --git a/internal/stackql-parser-fork/tools/unit_test_race.sh b/internal/stackql-parser-fork/tools/unit_test_race.sh new file mode 100755 index 00000000..320f220a --- /dev/null +++ b/internal/stackql-parser-fork/tools/unit_test_race.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source build.env + +if [[ -z $VT_GO_PARALLEL && -n $VT_GO_PARALLEL_VALUE ]]; then + VT_GO_PARALLEL="-p $VT_GO_PARALLEL_VALUE" +fi + +# All Go packages with test files. +# Output per line: * + +packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/... | sort) + +# exclude end to end tests +packages_to_test=$(echo "$packages_with_tests" | cut -d" " -f1 | grep -v "endtoend") +all_except_flaky_tests=$(echo "$packages_to_test" | grep -vE ".+ .+_flaky_test\.go" | cut -d" " -f1 | grep -v "endtoend") +flaky_tests=$(echo "$packages_to_test" | grep -E ".+ .+_flaky_test\.go" | cut -d" " -f1) + +# Flaky tests have the suffix "_flaky_test.go". +# Exclude endtoend tests +all_except_flaky_tests=$(echo "$packages_with_tests" | grep -vE ".+ .+_flaky_test\.go" | cut -d" " -f1 | grep -v "endtoend") +flaky_tests=$(echo "$packages_with_tests" | grep -E ".+ .+_flaky_test\.go" | cut -d" " -f1) + +# Run non-flaky tests. +echo "$all_except_flaky_tests" | xargs go test $VT_GO_PARALLEL -race -count=1 +if [ $? -ne 0 ]; then + echo "ERROR: Go unit tests failed. See above for errors." + echo + echo "This should NOT happen. Did you introduce a flaky unit test?" + echo "If so, please rename it to the suffix _flaky_test.go." + exit 1 +fi + +echo '# Flaky tests (3 attempts permitted)' + +# Run flaky tests sequentially. Retry when necessary. +for pkg in $flaky_tests; do + max_attempts=3 + attempt=1 + # Set a timeout because some tests may deadlock when they flake. + until go test -timeout 2m $VT_GO_PARALLEL $pkg -race -count=1; do + echo "FAILED (try $attempt/$max_attempts) in $pkg (return code $?). See above for errors." + if [ $((++attempt)) -gt $max_attempts ]; then + echo "ERROR: Flaky Go unit tests in package $pkg failed too often (after $max_attempts retries). Please reduce the flakiness." + exit 1 + fi + done +done + diff --git a/internal/stackql-parser-fork/tools/unit_test_runner.sh b/internal/stackql-parser-fork/tools/unit_test_runner.sh new file mode 100755 index 00000000..2929958e --- /dev/null +++ b/internal/stackql-parser-fork/tools/unit_test_runner.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Custom Go unit test runner which runs all unit tests in parallel except for +# known flaky unit tests. +# Flaky unit tests are run sequentially in the second phase and retried up to +# three times. + +# Why are there flaky unit tests? +# +# Some of the Go unit tests are inherently flaky e.g. because they use the +# real timer implementation and might fail when things take longer as usual. +# In particular, this happens when the system is under load and threads do not +# get scheduled as fast as usual. Then, the expected timings do not match. + +# Set VT_GO_PARALLEL variable in the same way as the Makefile does. +# We repeat this here because this script is called directly by test.go +# and not via the Makefile. + +source build.env + +if [[ -z $VT_GO_PARALLEL && -n $VT_GO_PARALLEL_VALUE ]]; then + VT_GO_PARALLEL="-p $VT_GO_PARALLEL_VALUE" +fi + +# All Go packages with test files. +# Output per line: * +packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/... | sort) + +# Flaky tests have the suffix "_flaky_test.go". +# Exclude endtoend tests +all_except_flaky_tests=$(echo "$packages_with_tests" | grep -vE ".+ .+_flaky_test\.go" | cut -d" " -f1 | grep -v "endtoend") +flaky_tests=$(echo "$packages_with_tests" | grep -E ".+ .+_flaky_test\.go" | cut -d" " -f1) + +# Run non-flaky tests. +echo "$all_except_flaky_tests" | xargs go test $VT_GO_PARALLEL -count=1 +if [ $? -ne 0 ]; then + echo "ERROR: Go unit tests failed. See above for errors." + echo + echo "This should NOT happen. Did you introduce a flaky unit test?" + echo "If so, please rename it to the suffix _flaky_test.go." + exit 1 +fi + +echo '# Flaky tests (3 attempts permitted)' + +# Run flaky tests sequentially. Retry when necessary. +for pkg in $flaky_tests; do + max_attempts=3 + attempt=1 + # Set a timeout because some tests may deadlock when they flake. + until go test -timeout 2m $VT_GO_PARALLEL $pkg -count=1; do + echo "FAILED (try $attempt/$max_attempts) in $pkg (return code $?). See above for errors." + if [ $((++attempt)) -gt $max_attempts ]; then + echo "ERROR: Flaky Go unit tests in package $pkg failed too often (after $max_attempts retries). Please reduce the flakiness." + exit 1 + fi + done +done