From d629d1adad195dd89ee8a712f50826337097a457 Mon Sep 17 00:00:00 2001 From: Tyk Date: Thu, 1 Aug 2019 09:32:49 +0000 Subject: [PATCH] Commit message --- .gitignore | 1 - .travis.yml | 18 +- CONTRIBUTING.md | 8 +- README.md | 12 +- TESTING.md | 129 +- gateway/analytics.go => analytics.go | 6 +- .../analytics_test.go => analytics_test.go | 10 +- gateway/api.go => api.go | 257 +- .../api_definition.go => api_definition.go | 78 +- ...finition_test.go => api_definition_test.go | 168 +- .../api_healthcheck.go => api_healthcheck.go | 13 +- gateway/api_loader.go => api_loader.go | 251 +- gateway/api_test.go => api_test.go | 156 +- apidef/api_definitions.go | 81 +- apidef/importer/blueprint.go | 4 +- apidef/importer/importer.go | 2 - apidef/importer/swagger.go | 2 +- apidef/importer/wsdl.go | 445 --- apidef/importer/wsdl_test.go | 1055 ------- apidef/schema.go | 5 +- gateway/auth_manager.go => auth_manager.go | 10 +- .../batch_requests.go => batch_requests.go | 2 +- ...requests_test.go => batch_requests_test.go | 14 +- bin/ci-swagger.sh | 52 - bin/ci-test.sh | 70 - gateway/cert.go => cert.go | 4 +- ...cert_go1.10_test.go => cert_go1.10_test.go | 40 +- gateway/cert_test.go => cert_test.go | 231 +- certs/manager.go | 2 +- checkup/checkup.go | 39 +- cli/bundler/bundler_test.go | 2 +- cli/cli.go | 11 +- cli/importer/importer.go | 114 +- cli/{linter/linter.go => lint/lint.go} | 6 +- .../linter_test.go => lint/lint_test.go} | 26 +- cli/lint/schema.go | 781 +++++ cli/linter/schema.json | 24 - config/config.go | 339 +-- config/config_test.go | 77 +- gateway/coprocess.go => coprocess.go | 67 +- coprocess/coprocess_common.pb.go | 80 +- coprocess/coprocess_mini_request_object.pb.go | 89 +- coprocess/coprocess_object.pb.go | 197 +- coprocess/coprocess_return_overrides.pb.go | 57 +- coprocess/coprocess_session_state.pb.go | 292 +- gateway/coprocess_api.go => coprocess_api.go | 8 +- ...coprocess_bundle.go => coprocess_bundle.go | 2 +- ...bundle_test.go => coprocess_bundle_test.go | 8 +- .../coprocess_dummy.go => coprocess_dummy.go | 7 +- ...coprocess_events.go => coprocess_events.go | 2 +- .../coprocess_grpc.go => coprocess_grpc.go | 7 +- ...ess_grpc_test.go => coprocess_grpc_test.go | 30 +- ...process_helpers.go => coprocess_helpers.go | 2 +- ..._extractor.go => coprocess_id_extractor.go | 4 +- ...o => coprocess_id_extractor_python_test.go | 23 +- ..._test.go => coprocess_id_extractor_test.go | 12 +- gateway/coprocess_lua.go => coprocess_lua.go | 18 +- ...coprocess_native.go => coprocess_native.go | 12 +- ...coprocess_python.go => coprocess_python.go | 24 +- coprocess_python_api.c | 1 + ...python_test.go => coprocess_python_test.go | 28 +- .../coprocess_test.go => coprocess_test.go | 96 +- ...s_testutil.go => coprocess_test_helpers.go | 9 +- ctx/ctx.go | 79 - ...board_register.go => dashboard_register.go | 46 +- ..._limiter.go => distributed_rate_limiter.go | 2 +- dnscache/manager.go | 162 - dnscache/manager_test.go | 141 - dnscache/mock_storage.go | 29 - dnscache/storage.go | 93 - dnscache/storage_test.go | 288 -- ...r_webhooks.go => event_handler_webhooks.go | 4 +- ..._test.go => event_handler_webhooks_test.go | 16 +- gateway/event_system.go => event_system.go | 4 +- ...ent_system_test.go => event_system_test.go | 2 +- gateway/auth_manager_test.go | 104 - gateway/coprocess_python_api.c | 78 - gateway/mw_go_plugin.go | 164 - gateway/redis_analytics_purger.go | 38 - gateway/sds.c | 1277 -------- gateway/server.go | 1411 --------- gateway/swagger.go | 25 - gateway/version.go | 3 - gateway/gateway_test.go => gateway_test.go | 458 +-- goplugin/goplugin.go | 31 - goplugin/mw_go_plugin_test.go | 95 - goplugin/no_goplugin.go | 12 - gateway/handler_error.go => handler_error.go | 84 +- .../handler_success.go => handler_success.go | 33 +- ...ndler_websocket.go => handler_websocket.go | 2 +- gateway/testutil.go => helpers_test.go | 533 ++-- gateway/host_checker.go => host_checker.go | 22 +- ...cker_manager.go => host_checker_manager.go | 6 +- ...st_checker_test.go => host_checker_test.go | 10 +- install/data/tyk.self_contained.conf | 11 +- install/data/tyk.with_dash.conf | 9 +- ...handlers.go => instrumentation_handlers.go | 2 +- ..._sink.go => instrumentation_statsd_sink.go | 2 +- gateway/jq.go => jq.go | 2 +- ..._event_handler.go => jsvm_event_handler.go | 2 +- ...ap_auth_handler.go => ldap_auth_handler.go | 2 +- gateway/le_helpers.go => le_helpers.go | 2 +- log/log.go | 31 +- gateway/log_helpers.go => log_helpers.go | 2 +- ...log_helpers_test.go => log_helpers_test.go | 4 +- gateway/looping_test.go => looping_test.go | 22 +- main.go | 1381 ++++++++- gateway/middleware.go => middleware.go | 45 +- gateway/monitor.go => monitor.go | 2 +- ...andler.go => multi_target_proxy_handler.go | 2 +- .../multiauth_test.go => multiauth_test.go | 18 +- ...mw_access_rights.go => mw_access_rights.go | 2 +- ..._api_rate_limit.go => mw_api_rate_limit.go | 2 +- ...limit_test.go => mw_api_rate_limit_test.go | 27 +- gateway/mw_auth_key.go => mw_auth_key.go | 2 +- ...mw_auth_key_test.go => mw_auth_key_test.go | 58 +- gateway/mw_basic_auth.go => mw_basic_auth.go | 2 +- ...asic_auth_test.go => mw_basic_auth_test.go | 30 +- ...ficate_check.go => mw_certificate_check.go | 2 +- .../mw_context_vars.go => mw_context_vars.go | 4 +- ...xt_vars_test.go => mw_context_vars_test.go | 8 +- .../mw_example_test.go => mw_example_test.go | 2 +- ...ranular_access.go => mw_granular_access.go | 2 +- gateway/mw_hmac.go => mw_hmac.go | 2 +- gateway/mw_hmac_test.go => mw_hmac_test.go | 26 +- .../mw_ip_blacklist.go => mw_ip_blacklist.go | 2 +- ...acklist_test.go => mw_ip_blacklist_test.go | 8 +- .../mw_ip_whitelist.go => mw_ip_whitelist.go | 2 +- ...itelist_test.go => mw_ip_whitelist_test.go | 8 +- gateway/mw_js_plugin.go => mw_js_plugin.go | 23 +- ..._js_plugin_test.go => mw_js_plugin_test.go | 74 +- gateway/mw_jwt.go => mw_jwt.go | 63 +- gateway/mw_jwt_test.go => mw_jwt_test.go | 514 ++-- ...xpired_check.go => mw_key_expired_check.go | 2 +- ...hod_transform.go => mw_method_transform.go | 2 +- ..._modify_headers.go => mw_modify_headers.go | 2 +- ...2_key_exists.go => mw_oauth2_key_exists.go | 2 +- gateway/mw_openid.go => mw_openid.go | 48 +- ...activity.go => mw_organisation_activity.go | 11 +- ...est.go => mw_organization_activity_test.go | 36 +- gateway/mw_rate_check.go => mw_rate_check.go | 2 +- ...mw_rate_limiting.go => mw_rate_limiting.go | 2 +- .../mw_redis_cache.go => mw_redis_cache.go | 84 +- ...is_cache_test.go => mw_redis_cache_test.go | 8 +- ..._size_limit.go => mw_request_size_limit.go | 2 +- gateway/mw_strip_auth.go => mw_strip_auth.go | 2 +- ...trip_auth_test.go => mw_strip_auth_test.go | 2 +- ...rack_endpoints.go => mw_track_endpoints.go | 2 +- gateway/mw_transform.go => mw_transform.go | 2 +- .../mw_transform_jq.go => mw_transform_jq.go | 2 +- ...rm_jq_dummy.go => mw_transform_jq_dummy.go | 2 +- ...form_jq_test.go => mw_transform_jq_test.go | 10 +- ..._transform_test.go => mw_transform_test.go | 71 +- .../mw_url_rewrite.go => mw_url_rewrite.go | 36 +- ..._rewrite_test.go => mw_url_rewrite_test.go | 371 +-- ...mw_validate_json.go => mw_validate_json.go | 2 +- ...e_json_test.go => mw_validate_json_test.go | 10 +- ...mw_version_check.go => mw_version_check.go | 2 +- ..._check_test.go => mw_version_check_test.go | 26 +- ...tual_endpoint.go => mw_virtual_endpoint.go | 10 +- ...int_test.go => mw_virtual_endpoint_test.go | 10 +- gateway/newrelic.go => newrelic.go | 4 +- gateway/oauth_manager.go => oauth_manager.go | 77 +- ...h_manager_test.go => oauth_manager_test.go | 183 +- gateway/policy.go => policy.go | 9 +- gateway/policy_test.go => policy_test.go | 46 +- ...dis_logrus_hook.go => redis_logrus_hook.go | 4 +- ...config.go => redis_signal_handle_config.go | 2 +- gateway/redis_signals.go => redis_signals.go | 8 +- regexp/cache_regexp_byte_ret_bool.go | 12 +- regexp/cache_regexp_str_func_ret_str.go | 13 +- ...ache_regexp_str_int_ret_slice_slice_str.go | 13 +- regexp/cache_regexp_str_int_ret_slice_str.go | 13 +- regexp/cache_regexp_str_ret_bool.go | 12 +- regexp/cache_regexp_str_ret_slice_str.go | 12 +- regexp/cache_regexp_str_str_ret_str.go | 12 +- regexp/keybuilder.go | 66 - regexp/keybuilder_test.go | 110 - ...ector.go => res_handler_header_injector.go | 5 +- ....go => res_handler_header_injector_test.go | 20 +- ...form.go => res_handler_header_transform.go | 6 +- ...ransform.go => res_handler_jq_transform.go | 2 +- ...my.go => res_handler_jq_transform_dummy.go | 6 +- ...r_transform.go => res_handler_transform.go | 6 +- ...m_test.go => res_handler_transform_test.go | 36 +- gateway/reverse_proxy.go => reverse_proxy.go | 142 +- ...rse_proxy_test.go => reverse_proxy_test.go | 215 +- rpc/rpc_analytics_purger.go | 135 - rpc/rpc_client.go | 393 --- rpc_analytics_purger.go | 101 + ...ckup_handlers.go => rpc_backup_handlers.go | 2 +- ...orage_handler.go => rpc_storage_handler.go | 640 ++-- gateway/rpc_test.go => rpc_test.go | 43 +- sds.c | 1 + ...rvice_discovery.go => service_discovery.go | 2 +- ...overy_test.go => service_discovery_test.go | 2 +- .../session_manager.go => session_manager.go | 2 +- storage/redis_cluster.go | 91 +- storage/storage.go | 2 +- swagger.yml | 2627 ----------------- test/dns.go | 225 -- test/goplugins/test_goplugin.go | 75 - test/http.go | 205 +- trace/handler.go | 13 - trace/jaeger/config.go | 24 - trace/jaeger/config_test.go | 72 - trace/jaeger/jaeger.go | 48 - trace/log.go | 46 - trace/manager.go | 263 -- trace/openzipkin/config.go | 31 - trace/openzipkin/zipkin.go | 293 -- trace/trace.go | 41 - trace/trace_test.go | 13 - gateway/tracing.go => tracing.go | 16 +- tyk.conf.example | 5 - user/session.go | 8 - {bin => utils}/ci-benchmark.sh | 0 utils/ci-test.sh | 60 + {bin => utils}/dist_build.sh | 0 {bin => utils}/dist_push.sh | 0 {bin => utils}/set-version.sh | 0 .../Masterminds/semver/CHANGELOG.md | 86 - .../github.com/Masterminds/semver/LICENSE.txt | 19 - vendor/github.com/Masterminds/semver/Makefile | 36 - .../github.com/Masterminds/semver/README.md | 186 -- .../Masterminds/semver/appveyor.yml | 44 - .../Masterminds/semver/collection.go | 24 - .../Masterminds/semver/constraints.go | 406 --- vendor/github.com/Masterminds/semver/doc.go | 115 - .../github.com/Masterminds/semver/version.go | 421 --- .../rediscluster/rediscluster.go | 55 +- vendor/github.com/aokoli/goutils/CHANGELOG.md | 8 - vendor/github.com/aokoli/goutils/LICENSE.txt | 202 -- vendor/github.com/aokoli/goutils/README.md | 70 - vendor/github.com/aokoli/goutils/appveyor.yml | 21 - .../aokoli/goutils/cryptorandomstringutils.go | 251 -- .../aokoli/goutils/randomstringutils.go | 268 -- .../github.com/aokoli/goutils/stringutils.go | 224 -- vendor/github.com/aokoli/goutils/wordutils.go | 357 --- .../golang/protobuf/proto/decode.go | 1 + .../golang/protobuf/proto/deprecated.go | 63 - .../golang/protobuf/proto/encode.go | 18 + .../github.com/golang/protobuf/proto/equal.go | 3 +- .../golang/protobuf/proto/extensions.go | 78 +- .../github.com/golang/protobuf/proto/lib.go | 100 +- .../golang/protobuf/proto/message_set.go | 137 +- .../golang/protobuf/proto/pointer_reflect.go | 5 +- .../golang/protobuf/proto/pointer_unsafe.go | 15 +- .../golang/protobuf/proto/properties.go | 47 +- .../golang/protobuf/proto/table_marshal.go | 229 +- .../golang/protobuf/proto/table_unmarshal.go | 196 +- .../github.com/golang/protobuf/proto/text.go | 4 +- .../golang/protobuf/proto/text_parser.go | 6 +- vendor/github.com/google/uuid/CONTRIBUTING.md | 10 - vendor/github.com/google/uuid/CONTRIBUTORS | 9 - vendor/github.com/google/uuid/LICENSE | 27 - vendor/github.com/google/uuid/README.md | 19 - vendor/github.com/google/uuid/dce.go | 80 - vendor/github.com/google/uuid/doc.go | 12 - vendor/github.com/google/uuid/go.mod | 1 - vendor/github.com/google/uuid/hash.go | 53 - vendor/github.com/google/uuid/marshal.go | 37 - vendor/github.com/google/uuid/node.go | 90 - vendor/github.com/google/uuid/node_js.go | 12 - vendor/github.com/google/uuid/node_net.go | 33 - vendor/github.com/google/uuid/sql.go | 59 - vendor/github.com/google/uuid/time.go | 123 - vendor/github.com/google/uuid/util.go | 43 - vendor/github.com/google/uuid/uuid.go | 245 -- vendor/github.com/google/uuid/version1.go | 44 - vendor/github.com/google/uuid/version4.go | 43 - vendor/github.com/hashicorp/hil/LICENSE | 353 --- vendor/github.com/hashicorp/hil/README.md | 102 - vendor/github.com/hashicorp/hil/appveyor.yml | 18 - .../hashicorp/hil/ast/arithmetic.go | 43 - .../hashicorp/hil/ast/arithmetic_op.go | 24 - vendor/github.com/hashicorp/hil/ast/ast.go | 99 - vendor/github.com/hashicorp/hil/ast/call.go | 47 - .../hashicorp/hil/ast/conditional.go | 36 - vendor/github.com/hashicorp/hil/ast/index.go | 76 - .../github.com/hashicorp/hil/ast/literal.go | 88 - vendor/github.com/hashicorp/hil/ast/output.go | 78 - vendor/github.com/hashicorp/hil/ast/scope.go | 90 - vendor/github.com/hashicorp/hil/ast/stack.go | 25 - .../hashicorp/hil/ast/type_string.go | 54 - .../github.com/hashicorp/hil/ast/unknown.go | 30 - .../hashicorp/hil/ast/variable_access.go | 36 - .../hashicorp/hil/ast/variables_helper.go | 63 - vendor/github.com/hashicorp/hil/builtins.go | 331 --- .../hashicorp/hil/check_identifier.go | 88 - .../github.com/hashicorp/hil/check_types.go | 668 ----- vendor/github.com/hashicorp/hil/convert.go | 159 - vendor/github.com/hashicorp/hil/eval.go | 472 --- vendor/github.com/hashicorp/hil/eval_type.go | 16 - .../hashicorp/hil/evaltype_string.go | 42 - vendor/github.com/hashicorp/hil/go.mod | 6 - vendor/github.com/hashicorp/hil/go.sum | 4 - vendor/github.com/hashicorp/hil/parse.go | 29 - .../hashicorp/hil/parser/binary_op.go | 45 - .../github.com/hashicorp/hil/parser/error.go | 38 - .../github.com/hashicorp/hil/parser/fuzz.go | 28 - .../github.com/hashicorp/hil/parser/parser.go | 522 ---- .../hashicorp/hil/scanner/peeker.go | 55 - .../hashicorp/hil/scanner/scanner.go | 556 ---- .../github.com/hashicorp/hil/scanner/token.go | 105 - .../hashicorp/hil/scanner/tokentype_string.go | 51 - .../hashicorp/hil/transform_fixed.go | 29 - vendor/github.com/hashicorp/hil/walk.go | 266 -- vendor/github.com/hashicorp/terraform/LICENSE | 354 --- .../hashicorp/terraform/flatmap/expand.go | 152 - .../hashicorp/terraform/flatmap/flatten.go | 71 - .../hashicorp/terraform/flatmap/map.go | 82 - .../huandu/xstrings/CONTRIBUTING.md | 23 - vendor/github.com/huandu/xstrings/LICENSE | 22 - vendor/github.com/huandu/xstrings/README.md | 117 - vendor/github.com/huandu/xstrings/common.go | 25 - vendor/github.com/huandu/xstrings/convert.go | 400 --- vendor/github.com/huandu/xstrings/count.go | 120 - vendor/github.com/huandu/xstrings/doc.go | 8 - vendor/github.com/huandu/xstrings/format.go | 170 -- vendor/github.com/huandu/xstrings/go.mod | 1 - .../github.com/huandu/xstrings/manipulate.go | 217 -- .../github.com/huandu/xstrings/translate.go | 547 ---- .../imdario/mergo/CODE_OF_CONDUCT.md | 46 - vendor/github.com/imdario/mergo/LICENSE | 28 - vendor/github.com/imdario/mergo/README.md | 238 -- vendor/github.com/imdario/mergo/doc.go | 44 - vendor/github.com/imdario/mergo/map.go | 175 -- vendor/github.com/imdario/mergo/merge.go | 275 -- vendor/github.com/imdario/mergo/mergo.go | 97 - .../gorpc/LICENSE | 0 .../gorpc/Makefile | 0 .../gorpc/README.md | 0 .../gorpc/TODO | 0 .../gorpc/client.go | 7 +- .../gorpc/common.go | 4 +- .../gorpc/conn_stats.go | 0 .../gorpc/conn_stats_386.go | 0 .../gorpc/conn_stats_generic.go | 0 .../gorpc/dispatcher.go | 0 .../gorpc/doc.go | 0 .../gorpc/encoding.go | 0 .../gorpc/server.go | 17 +- .../gorpc/transport.go | 25 +- .../github.com/mitchellh/reflectwalk/LICENSE | 21 - .../mitchellh/reflectwalk/README.md | 6 - .../github.com/mitchellh/reflectwalk/go.mod | 1 - .../mitchellh/reflectwalk/location.go | 19 - .../mitchellh/reflectwalk/location_string.go | 16 - .../mitchellh/reflectwalk/reflectwalk.go | 401 --- .../opentracing/opentracing-go/CHANGELOG.md | 46 - .../opentracing/opentracing-go/LICENSE | 201 -- .../opentracing/opentracing-go/Makefile | 20 - .../opentracing/opentracing-go/README.md | 171 -- .../opentracing/opentracing-go/ext/tags.go | 210 -- .../opentracing-go/globaltracer.go | 42 - .../opentracing/opentracing-go/gocontext.go | 60 - .../opentracing/opentracing-go/log/field.go | 269 -- .../opentracing/opentracing-go/log/util.go | 54 - .../opentracing/opentracing-go/noop.go | 64 - .../opentracing/opentracing-go/propagation.go | 176 -- .../opentracing/opentracing-go/span.go | 189 -- .../opentracing/opentracing-go/tracer.go | 304 -- .../github.com/openzipkin/zipkin-go/LICENSE | 201 -- .../github.com/openzipkin/zipkin-go/Makefile | 29 - .../github.com/openzipkin/zipkin-go/README.md | 102 - .../openzipkin/zipkin-go/appveyor.yml | 23 - .../openzipkin/zipkin-go/circle.yml | 11 - .../openzipkin/zipkin-go/context.go | 52 - vendor/github.com/openzipkin/zipkin-go/doc.go | 20 - .../openzipkin/zipkin-go/endpoint.go | 80 - vendor/github.com/openzipkin/zipkin-go/go.mod | 25 - vendor/github.com/openzipkin/zipkin-go/go.sum | 76 - .../zipkin-go/idgenerator/idgenerator.go | 130 - .../openzipkin/zipkin-go/model/annotation.go | 60 - .../openzipkin/zipkin-go/model/doc.go | 23 - .../openzipkin/zipkin-go/model/endpoint.go | 31 - .../openzipkin/zipkin-go/model/kind.go | 27 - .../openzipkin/zipkin-go/model/span.go | 138 - .../openzipkin/zipkin-go/model/span_id.go | 44 - .../openzipkin/zipkin-go/model/traceid.go | 75 - .../github.com/openzipkin/zipkin-go/noop.go | 39 - .../zipkin-go/propagation/b3/doc.go | 19 - .../zipkin-go/propagation/b3/grpc.go | 87 - .../zipkin-go/propagation/b3/http.go | 127 - .../zipkin-go/propagation/b3/shared.go | 44 - .../zipkin-go/propagation/b3/spancontext.go | 202 -- .../zipkin-go/propagation/propagation.go | 30 - .../zipkin-go/reporter/http/http.go | 249 -- .../openzipkin/zipkin-go/reporter/reporter.go | 41 - .../zipkin-go/reporter/serializer.go | 42 - .../github.com/openzipkin/zipkin-go/sample.go | 127 - .../github.com/openzipkin/zipkin-go/span.go | 52 - .../zipkin-go/span_implementation.go | 101 - .../openzipkin/zipkin-go/span_options.go | 88 - .../github.com/openzipkin/zipkin-go/tags.go | 37 - .../github.com/openzipkin/zipkin-go/tracer.go | 187 -- .../openzipkin/zipkin-go/tracer_options.go | 138 - .../uber/jaeger-client-go/CHANGELOG.md | 216 -- .../uber/jaeger-client-go/CONTRIBUTING.md | 170 -- vendor/github.com/uber/jaeger-client-go/DCO | 37 - .../uber/jaeger-client-go/Gopkg.lock | 223 -- .../uber/jaeger-client-go/Gopkg.toml | 31 - .../github.com/uber/jaeger-client-go/LICENSE | 201 -- .../github.com/uber/jaeger-client-go/Makefile | 123 - .../uber/jaeger-client-go/README.md | 270 -- .../uber/jaeger-client-go/RELEASE.md | 11 - .../uber/jaeger-client-go/baggage_setter.go | 77 - .../uber/jaeger-client-go/config/config.go | 396 --- .../jaeger-client-go/config/config_env.go | 221 -- .../uber/jaeger-client-go/config/options.go | 156 - .../uber/jaeger-client-go/constants.go | 88 - .../uber/jaeger-client-go/context.go | 258 -- .../uber/jaeger-client-go/contrib_observer.go | 56 - .../github.com/uber/jaeger-client-go/doc.go | 24 - .../uber/jaeger-client-go/glide.lock | 90 - .../uber/jaeger-client-go/glide.yaml | 22 - .../uber/jaeger-client-go/header.go | 65 - .../internal/baggage/remote/options.go | 101 - .../baggage/remote/restriction_manager.go | 157 - .../internal/baggage/restriction_manager.go | 71 - .../jaeger-client-go/internal/spanlog/json.go | 81 - .../internal/throttler/remote/options.go | 99 - .../internal/throttler/remote/throttler.go | 216 -- .../internal/throttler/throttler.go | 32 - .../uber/jaeger-client-go/interop.go | 55 - .../uber/jaeger-client-go/jaeger_tag.go | 84 - .../jaeger-client-go/jaeger_thrift_span.go | 179 -- .../uber/jaeger-client-go/log/logger.go | 90 - .../uber/jaeger-client-go/logger.go | 53 - .../uber/jaeger-client-go/metrics.go | 107 - .../uber/jaeger-client-go/observer.go | 88 - .../uber/jaeger-client-go/process.go | 29 - .../uber/jaeger-client-go/propagation.go | 309 -- .../uber/jaeger-client-go/reference.go | 23 - .../uber/jaeger-client-go/reporter.go | 297 -- .../uber/jaeger-client-go/reporter_options.go | 69 - .../jaeger-client-go/rpcmetrics/README.md | 5 - .../uber/jaeger-client-go/rpcmetrics/doc.go | 16 - .../jaeger-client-go/rpcmetrics/endpoints.go | 63 - .../jaeger-client-go/rpcmetrics/metrics.go | 124 - .../jaeger-client-go/rpcmetrics/normalizer.go | 101 - .../jaeger-client-go/rpcmetrics/observer.go | 171 -- .../uber/jaeger-client-go/sampler.go | 557 ---- .../uber/jaeger-client-go/sampler_options.go | 81 - .../github.com/uber/jaeger-client-go/span.go | 288 -- .../uber/jaeger-client-go/span_allocator.go | 56 - .../thrift-gen/agent/agent.go | 411 --- .../thrift-gen/agent/constants.go | 23 - .../thrift-gen/agent/ttypes.go | 21 - .../baggage/baggagerestrictionmanager.go | 435 --- .../thrift-gen/baggage/constants.go | 18 - .../thrift-gen/baggage/ttypes.go | 154 - .../thrift-gen/jaeger/agent.go | 242 -- .../thrift-gen/jaeger/constants.go | 18 - .../thrift-gen/jaeger/ttypes.go | 1838 ------------ .../thrift-gen/sampling/constants.go | 18 - .../thrift-gen/sampling/samplingmanager.go | 410 --- .../thrift-gen/sampling/ttypes.go | 873 ------ .../thrift-gen/zipkincore/constants.go | 35 - .../thrift-gen/zipkincore/ttypes.go | 1337 --------- .../thrift-gen/zipkincore/zipkincollector.go | 446 --- .../uber/jaeger-client-go/thrift/README.md | 7 - .../thrift/application_exception.go | 142 - .../thrift/binary_protocol.go | 514 ---- .../thrift/compact_protocol.go | 815 ----- .../uber/jaeger-client-go/thrift/exception.go | 44 - .../jaeger-client-go/thrift/memory_buffer.go | 79 - .../jaeger-client-go/thrift/messagetype.go | 31 - .../uber/jaeger-client-go/thrift/numeric.go | 164 - .../uber/jaeger-client-go/thrift/processor.go | 30 - .../uber/jaeger-client-go/thrift/protocol.go | 175 -- .../thrift/protocol_exception.go | 78 - .../thrift/protocol_factory.go | 25 - .../jaeger-client-go/thrift/rich_transport.go | 69 - .../jaeger-client-go/thrift/serializer.go | 75 - .../thrift/simple_json_protocol.go | 1337 --------- .../uber/jaeger-client-go/thrift/transport.go | 68 - .../thrift/transport_exception.go | 90 - .../thrift/transport_factory.go | 39 - .../uber/jaeger-client-go/thrift/type.go | 69 - .../uber/jaeger-client-go/tracer.go | 437 --- .../uber/jaeger-client-go/tracer_options.go | 163 - .../uber/jaeger-client-go/transport.go | 38 - .../uber/jaeger-client-go/transport/doc.go | 23 - .../uber/jaeger-client-go/transport/http.go | 163 - .../uber/jaeger-client-go/transport_udp.go | 131 - .../uber/jaeger-client-go/utils/http_json.go | 54 - .../uber/jaeger-client-go/utils/localip.go | 84 - .../uber/jaeger-client-go/utils/rand.go | 46 - .../jaeger-client-go/utils/rate_limiter.go | 77 - .../uber/jaeger-client-go/utils/udp_client.go | 98 - .../uber/jaeger-client-go/utils/utils.go | 87 - .../uber/jaeger-client-go/zipkin.go | 76 - .../jaeger-client-go/zipkin_thrift_span.go | 328 -- vendor/github.com/uber/jaeger-lib/LICENSE | 201 -- .../uber/jaeger-lib/metrics/counter.go | 28 - .../uber/jaeger-lib/metrics/factory.go | 78 - .../uber/jaeger-lib/metrics/gauge.go | 28 - .../uber/jaeger-lib/metrics/histogram.go | 28 - .../uber/jaeger-lib/metrics/keys.go | 35 - .../uber/jaeger-lib/metrics/metrics.go | 137 - .../uber/jaeger-lib/metrics/stopwatch.go | 43 - .../uber/jaeger-lib/metrics/timer.go | 33 - vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 - vendor/golang.org/x/crypto/scrypt/scrypt.go | 213 -- .../helloworld/helloworld/helloworld.pb.go | 198 -- .../helloworld/helloworld/helloworld.proto | 37 - .../Masterminds/sprig.v2/CHANGELOG.md | 153 - .../gopkg.in/Masterminds/sprig.v2/LICENSE.txt | 20 - vendor/gopkg.in/Masterminds/sprig.v2/Makefile | 13 - .../gopkg.in/Masterminds/sprig.v2/README.md | 81 - .../Masterminds/sprig.v2/appveyor.yml | 26 - .../gopkg.in/Masterminds/sprig.v2/crypto.go | 441 --- vendor/gopkg.in/Masterminds/sprig.v2/date.go | 76 - .../gopkg.in/Masterminds/sprig.v2/defaults.go | 83 - vendor/gopkg.in/Masterminds/sprig.v2/dict.go | 97 - vendor/gopkg.in/Masterminds/sprig.v2/doc.go | 19 - .../Masterminds/sprig.v2/functions.go | 287 -- .../gopkg.in/Masterminds/sprig.v2/glide.lock | 33 - .../gopkg.in/Masterminds/sprig.v2/glide.yaml | 16 - vendor/gopkg.in/Masterminds/sprig.v2/list.go | 291 -- .../gopkg.in/Masterminds/sprig.v2/numeric.go | 159 - .../gopkg.in/Masterminds/sprig.v2/reflect.go | 28 - vendor/gopkg.in/Masterminds/sprig.v2/regex.go | 35 - .../gopkg.in/Masterminds/sprig.v2/semver.go | 23 - .../gopkg.in/Masterminds/sprig.v2/strings.go | 210 -- vendor/vendor.json | 274 +- version.go | 3 + 529 files changed, 5573 insertions(+), 54450 deletions(-) rename gateway/analytics.go => analytics.go (98%) rename gateway/analytics_test.go => analytics_test.go (96%) rename gateway/api.go => api.go (91%) rename gateway/api_definition.go => api_definition.go (95%) rename gateway/api_definition_test.go => api_definition_test.go (82%) rename gateway/api_healthcheck.go => api_healthcheck.go (94%) rename gateway/api_loader.go => api_loader.go (72%) rename gateway/api_test.go => api_test.go (89%) delete mode 100644 apidef/importer/wsdl.go delete mode 100644 apidef/importer/wsdl_test.go rename gateway/auth_manager.go => auth_manager.go (97%) rename gateway/batch_requests.go => batch_requests.go (99%) rename gateway/batch_requests_test.go => batch_requests_test.go (94%) delete mode 100755 bin/ci-swagger.sh delete mode 100755 bin/ci-test.sh rename gateway/cert.go => cert.go (99%) rename gateway/cert_go1.10_test.go => cert_go1.10_test.go (89%) rename gateway/cert_test.go => cert_test.go (77%) rename cli/{linter/linter.go => lint/lint.go} (95%) rename cli/{linter/linter_test.go => lint/lint_test.go} (76%) create mode 100644 cli/lint/schema.go rename gateway/coprocess.go => coprocess.go (84%) rename gateway/coprocess_api.go => coprocess_api.go (93%) rename gateway/coprocess_bundle.go => coprocess_bundle.go (99%) rename gateway/coprocess_bundle_test.go => coprocess_bundle_test.go (90%) rename gateway/coprocess_dummy.go => coprocess_dummy.go (92%) rename gateway/coprocess_events.go => coprocess_events.go (98%) rename gateway/coprocess_grpc.go => coprocess_grpc.go (96%) rename coprocess/grpc/coprocess_grpc_test.go => coprocess_grpc_test.go (93%) rename gateway/coprocess_helpers.go => coprocess_helpers.go (99%) rename gateway/coprocess_id_extractor.go => coprocess_id_extractor.go (99%) rename coprocess/python/coprocess_id_extractor_python_test.go => coprocess_id_extractor_python_test.go (89%) rename gateway/coprocess_id_extractor_test.go => coprocess_id_extractor_test.go (98%) rename gateway/coprocess_lua.go => coprocess_lua.go (95%) rename gateway/coprocess_native.go => coprocess_native.go (90%) rename gateway/coprocess_python.go => coprocess_python.go (95%) create mode 120000 coprocess_python_api.c rename coprocess/python/coprocess_python_test.go => coprocess_python_test.go (89%) rename coprocess/coprocess_test.go => coprocess_test.go (72%) rename gateway/coprocess_testutil.go => coprocess_test_helpers.go (97%) delete mode 100644 ctx/ctx.go rename gateway/dashboard_register.go => dashboard_register.go (85%) rename gateway/distributed_rate_limiter.go => distributed_rate_limiter.go (99%) delete mode 100644 dnscache/manager.go delete mode 100644 dnscache/manager_test.go delete mode 100644 dnscache/mock_storage.go delete mode 100644 dnscache/storage.go delete mode 100644 dnscache/storage_test.go rename gateway/event_handler_webhooks.go => event_handler_webhooks.go (98%) rename gateway/event_handler_webhooks_test.go => event_handler_webhooks_test.go (94%) rename gateway/event_system.go => event_system.go (99%) rename gateway/event_system_test.go => event_system_test.go (99%) delete mode 100644 gateway/auth_manager_test.go delete mode 100644 gateway/coprocess_python_api.c delete mode 100644 gateway/mw_go_plugin.go delete mode 100644 gateway/redis_analytics_purger.go delete mode 100644 gateway/sds.c delete mode 100644 gateway/server.go delete mode 100644 gateway/swagger.go delete mode 100644 gateway/version.go rename gateway/gateway_test.go => gateway_test.go (81%) delete mode 100644 goplugin/goplugin.go delete mode 100644 goplugin/mw_go_plugin_test.go delete mode 100644 goplugin/no_goplugin.go rename gateway/handler_error.go => handler_error.go (69%) rename gateway/handler_success.go => handler_success.go (94%) rename gateway/handler_websocket.go => handler_websocket.go (99%) rename gateway/testutil.go => helpers_test.go (56%) rename gateway/host_checker.go => host_checker.go (94%) rename gateway/host_checker_manager.go => host_checker_manager.go (99%) rename gateway/host_checker_test.go => host_checker_test.go (95%) rename gateway/instrumentation_handlers.go => instrumentation_handlers.go (99%) rename gateway/instrumentation_statsd_sink.go => instrumentation_statsd_sink.go (99%) rename gateway/jq.go => jq.go (99%) rename gateway/jsvm_event_handler.go => jsvm_event_handler.go (98%) rename gateway/ldap_auth_handler.go => ldap_auth_handler.go (99%) rename gateway/le_helpers.go => le_helpers.go (99%) rename gateway/log_helpers.go => log_helpers.go (98%) rename gateway/log_helpers_test.go => log_helpers_test.go (98%) rename gateway/looping_test.go => looping_test.go (96%) rename gateway/middleware.go => middleware.go (92%) rename gateway/monitor.go => monitor.go (99%) rename gateway/multi_target_proxy_handler.go => multi_target_proxy_handler.go (99%) rename gateway/multiauth_test.go => multiauth_test.go (95%) rename gateway/mw_access_rights.go => mw_access_rights.go (99%) rename gateway/mw_api_rate_limit.go => mw_api_rate_limit.go (99%) rename gateway/mw_api_rate_limit_test.go => mw_api_rate_limit_test.go (94%) rename gateway/mw_auth_key.go => mw_auth_key.go (99%) rename gateway/mw_auth_key_test.go => mw_auth_key_test.go (90%) rename gateway/mw_basic_auth.go => mw_basic_auth.go (99%) rename gateway/mw_basic_auth_test.go => mw_basic_auth_test.go (96%) rename gateway/mw_certificate_check.go => mw_certificate_check.go (98%) rename gateway/mw_context_vars.go => mw_context_vars.go (96%) rename gateway/mw_context_vars_test.go => mw_context_vars_test.go (98%) rename gateway/mw_example_test.go => mw_example_test.go (98%) rename gateway/mw_granular_access.go => mw_granular_access.go (98%) rename gateway/mw_hmac.go => mw_hmac.go (99%) rename gateway/mw_hmac_test.go => mw_hmac_test.go (97%) rename gateway/mw_ip_blacklist.go => mw_ip_blacklist.go (99%) rename gateway/mw_ip_blacklist_test.go => mw_ip_blacklist_test.go (93%) rename gateway/mw_ip_whitelist.go => mw_ip_whitelist.go (98%) rename gateway/mw_ip_whitelist_test.go => mw_ip_whitelist_test.go (92%) rename gateway/mw_js_plugin.go => mw_js_plugin.go (98%) rename gateway/mw_js_plugin_test.go => mw_js_plugin_test.go (86%) rename gateway/mw_jwt.go => mw_jwt.go (90%) rename gateway/mw_jwt_test.go => mw_jwt_test.go (82%) rename gateway/mw_key_expired_check.go => mw_key_expired_check.go (99%) rename gateway/mw_method_transform.go => mw_method_transform.go (98%) rename gateway/mw_modify_headers.go => mw_modify_headers.go (98%) rename gateway/mw_oauth2_key_exists.go => mw_oauth2_key_exists.go (99%) rename gateway/mw_openid.go => mw_openid.go (82%) rename gateway/mw_organisation_activity.go => mw_organisation_activity.go (98%) rename gateway/mw_organization_activity_test.go => mw_organization_activity_test.go (93%) rename gateway/mw_rate_check.go => mw_rate_check.go (95%) rename gateway/mw_rate_limiting.go => mw_rate_limiting.go (99%) rename gateway/mw_redis_cache.go => mw_redis_cache.go (78%) rename gateway/mw_redis_cache_test.go => mw_redis_cache_test.go (88%) rename gateway/mw_request_size_limit.go => mw_request_size_limit.go (99%) rename gateway/mw_strip_auth.go => mw_strip_auth.go (99%) rename gateway/mw_strip_auth_test.go => mw_strip_auth_test.go (99%) rename gateway/mw_track_endpoints.go => mw_track_endpoints.go (98%) rename gateway/mw_transform.go => mw_transform.go (99%) rename gateway/mw_transform_jq.go => mw_transform_jq.go (99%) rename gateway/mw_transform_jq_dummy.go => mw_transform_jq_dummy.go (98%) rename gateway/mw_transform_jq_test.go => mw_transform_jq_test.go (90%) rename gateway/mw_transform_test.go => mw_transform_test.go (76%) rename gateway/mw_url_rewrite.go => mw_url_rewrite.go (95%) rename gateway/mw_url_rewrite_test.go => mw_url_rewrite_test.go (69%) rename gateway/mw_validate_json.go => mw_validate_json.go (99%) rename gateway/mw_validate_json_test.go => mw_validate_json_test.go (93%) rename gateway/mw_version_check.go => mw_version_check.go (99%) rename gateway/mw_version_check_test.go => mw_version_check_test.go (88%) rename gateway/mw_virtual_endpoint.go => mw_virtual_endpoint.go (98%) rename gateway/mw_virtual_endpoint_test.go => mw_virtual_endpoint_test.go (94%) rename gateway/newrelic.go => newrelic.go (98%) rename gateway/oauth_manager.go => oauth_manager.go (93%) rename gateway/oauth_manager_test.go => oauth_manager_test.go (84%) rename gateway/policy.go => policy.go (95%) rename gateway/policy_test.go => policy_test.go (96%) rename gateway/redis_logrus_hook.go => redis_logrus_hook.go (91%) rename gateway/redis_signal_handle_config.go => redis_signal_handle_config.go (99%) rename gateway/redis_signals.go => redis_signals.go (97%) delete mode 100644 regexp/keybuilder.go delete mode 100644 regexp/keybuilder_test.go rename gateway/res_handler_header_injector.go => res_handler_header_injector.go (94%) rename gateway/res_handler_header_injector_test.go => res_handler_header_injector_test.go (88%) rename gateway/res_handler_header_transform.go => res_handler_header_transform.go (95%) rename gateway/res_handler_jq_transform.go => res_handler_jq_transform.go (98%) rename gateway/res_handler_jq_transform_dummy.go => res_handler_jq_transform_dummy.go (80%) rename gateway/res_handler_transform.go => res_handler_transform.go (97%) rename gateway/res_handler_transform_test.go => res_handler_transform_test.go (85%) rename gateway/reverse_proxy.go => reverse_proxy.go (89%) rename gateway/reverse_proxy_test.go => reverse_proxy_test.go (68%) delete mode 100644 rpc/rpc_analytics_purger.go delete mode 100644 rpc/rpc_client.go create mode 100644 rpc_analytics_purger.go rename gateway/rpc_backup_handlers.go => rpc_backup_handlers.go (99%) rename gateway/rpc_storage_handler.go => rpc_storage_handler.go (55%) rename gateway/rpc_test.go => rpc_test.go (92%) create mode 120000 sds.c rename gateway/service_discovery.go => service_discovery.go (99%) rename gateway/service_discovery_test.go => service_discovery_test.go (99%) rename gateway/session_manager.go => session_manager.go (99%) delete mode 100644 swagger.yml delete mode 100644 test/dns.go delete mode 100644 test/goplugins/test_goplugin.go delete mode 100644 trace/handler.go delete mode 100644 trace/jaeger/config.go delete mode 100644 trace/jaeger/config_test.go delete mode 100644 trace/jaeger/jaeger.go delete mode 100644 trace/log.go delete mode 100644 trace/manager.go delete mode 100644 trace/openzipkin/config.go delete mode 100644 trace/openzipkin/zipkin.go delete mode 100644 trace/trace.go delete mode 100644 trace/trace_test.go rename gateway/tracing.go => tracing.go (90%) rename {bin => utils}/ci-benchmark.sh (100%) create mode 100755 utils/ci-test.sh rename {bin => utils}/dist_build.sh (100%) rename {bin => utils}/dist_push.sh (100%) rename {bin => utils}/set-version.sh (100%) delete mode 100644 vendor/github.com/Masterminds/semver/CHANGELOG.md delete mode 100644 vendor/github.com/Masterminds/semver/LICENSE.txt delete mode 100644 vendor/github.com/Masterminds/semver/Makefile delete mode 100644 vendor/github.com/Masterminds/semver/README.md delete mode 100644 vendor/github.com/Masterminds/semver/appveyor.yml delete mode 100644 vendor/github.com/Masterminds/semver/collection.go delete mode 100644 vendor/github.com/Masterminds/semver/constraints.go delete mode 100644 vendor/github.com/Masterminds/semver/doc.go delete mode 100644 vendor/github.com/Masterminds/semver/version.go delete mode 100644 vendor/github.com/aokoli/goutils/CHANGELOG.md delete mode 100644 vendor/github.com/aokoli/goutils/LICENSE.txt delete mode 100644 vendor/github.com/aokoli/goutils/README.md delete mode 100644 vendor/github.com/aokoli/goutils/appveyor.yml delete mode 100644 vendor/github.com/aokoli/goutils/cryptorandomstringutils.go delete mode 100644 vendor/github.com/aokoli/goutils/randomstringutils.go delete mode 100644 vendor/github.com/aokoli/goutils/stringutils.go delete mode 100644 vendor/github.com/aokoli/goutils/wordutils.go delete mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS delete mode 100644 vendor/github.com/google/uuid/LICENSE delete mode 100644 vendor/github.com/google/uuid/README.md delete mode 100644 vendor/github.com/google/uuid/dce.go delete mode 100644 vendor/github.com/google/uuid/doc.go delete mode 100644 vendor/github.com/google/uuid/go.mod delete mode 100644 vendor/github.com/google/uuid/hash.go delete mode 100644 vendor/github.com/google/uuid/marshal.go delete mode 100644 vendor/github.com/google/uuid/node.go delete mode 100644 vendor/github.com/google/uuid/node_js.go delete mode 100644 vendor/github.com/google/uuid/node_net.go delete mode 100644 vendor/github.com/google/uuid/sql.go delete mode 100644 vendor/github.com/google/uuid/time.go delete mode 100644 vendor/github.com/google/uuid/util.go delete mode 100644 vendor/github.com/google/uuid/uuid.go delete mode 100644 vendor/github.com/google/uuid/version1.go delete mode 100644 vendor/github.com/google/uuid/version4.go delete mode 100644 vendor/github.com/hashicorp/hil/LICENSE delete mode 100644 vendor/github.com/hashicorp/hil/README.md delete mode 100644 vendor/github.com/hashicorp/hil/appveyor.yml delete mode 100644 vendor/github.com/hashicorp/hil/ast/arithmetic.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/arithmetic_op.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/ast.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/call.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/conditional.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/index.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/literal.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/output.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/scope.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/stack.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/type_string.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/unknown.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/variable_access.go delete mode 100644 vendor/github.com/hashicorp/hil/ast/variables_helper.go delete mode 100644 vendor/github.com/hashicorp/hil/builtins.go delete mode 100644 vendor/github.com/hashicorp/hil/check_identifier.go delete mode 100644 vendor/github.com/hashicorp/hil/check_types.go delete mode 100644 vendor/github.com/hashicorp/hil/convert.go delete mode 100644 vendor/github.com/hashicorp/hil/eval.go delete mode 100644 vendor/github.com/hashicorp/hil/eval_type.go delete mode 100644 vendor/github.com/hashicorp/hil/evaltype_string.go delete mode 100644 vendor/github.com/hashicorp/hil/go.mod delete mode 100644 vendor/github.com/hashicorp/hil/go.sum delete mode 100644 vendor/github.com/hashicorp/hil/parse.go delete mode 100644 vendor/github.com/hashicorp/hil/parser/binary_op.go delete mode 100644 vendor/github.com/hashicorp/hil/parser/error.go delete mode 100644 vendor/github.com/hashicorp/hil/parser/fuzz.go delete mode 100644 vendor/github.com/hashicorp/hil/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hil/scanner/peeker.go delete mode 100644 vendor/github.com/hashicorp/hil/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hil/scanner/token.go delete mode 100644 vendor/github.com/hashicorp/hil/scanner/tokentype_string.go delete mode 100644 vendor/github.com/hashicorp/hil/transform_fixed.go delete mode 100644 vendor/github.com/hashicorp/hil/walk.go delete mode 100644 vendor/github.com/hashicorp/terraform/LICENSE delete mode 100644 vendor/github.com/hashicorp/terraform/flatmap/expand.go delete mode 100644 vendor/github.com/hashicorp/terraform/flatmap/flatten.go delete mode 100644 vendor/github.com/hashicorp/terraform/flatmap/map.go delete mode 100644 vendor/github.com/huandu/xstrings/CONTRIBUTING.md delete mode 100644 vendor/github.com/huandu/xstrings/LICENSE delete mode 100644 vendor/github.com/huandu/xstrings/README.md delete mode 100644 vendor/github.com/huandu/xstrings/common.go delete mode 100644 vendor/github.com/huandu/xstrings/convert.go delete mode 100644 vendor/github.com/huandu/xstrings/count.go delete mode 100644 vendor/github.com/huandu/xstrings/doc.go delete mode 100644 vendor/github.com/huandu/xstrings/format.go delete mode 100644 vendor/github.com/huandu/xstrings/go.mod delete mode 100644 vendor/github.com/huandu/xstrings/manipulate.go delete mode 100644 vendor/github.com/huandu/xstrings/translate.go delete mode 100644 vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/imdario/mergo/LICENSE delete mode 100644 vendor/github.com/imdario/mergo/README.md delete mode 100644 vendor/github.com/imdario/mergo/doc.go delete mode 100644 vendor/github.com/imdario/mergo/map.go delete mode 100644 vendor/github.com/imdario/mergo/merge.go delete mode 100644 vendor/github.com/imdario/mergo/mergo.go rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/LICENSE (100%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/Makefile (100%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/README.md (100%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/TODO (100%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/client.go (99%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/common.go (96%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/conn_stats.go (100%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/conn_stats_386.go (100%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/conn_stats_generic.go (100%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/dispatcher.go (100%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/doc.go (100%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/encoding.go (100%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/server.go (97%) rename vendor/github.com/{TykTechnologies => lonelycode}/gorpc/transport.go (89%) delete mode 100644 vendor/github.com/mitchellh/reflectwalk/LICENSE delete mode 100644 vendor/github.com/mitchellh/reflectwalk/README.md delete mode 100644 vendor/github.com/mitchellh/reflectwalk/go.mod delete mode 100644 vendor/github.com/mitchellh/reflectwalk/location.go delete mode 100644 vendor/github.com/mitchellh/reflectwalk/location_string.go delete mode 100644 vendor/github.com/mitchellh/reflectwalk/reflectwalk.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/CHANGELOG.md delete mode 100644 vendor/github.com/opentracing/opentracing-go/LICENSE delete mode 100644 vendor/github.com/opentracing/opentracing-go/Makefile delete mode 100644 vendor/github.com/opentracing/opentracing-go/README.md delete mode 100644 vendor/github.com/opentracing/opentracing-go/ext/tags.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/globaltracer.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/gocontext.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/log/field.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/log/util.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/noop.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/propagation.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/span.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/tracer.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/LICENSE delete mode 100644 vendor/github.com/openzipkin/zipkin-go/Makefile delete mode 100644 vendor/github.com/openzipkin/zipkin-go/README.md delete mode 100644 vendor/github.com/openzipkin/zipkin-go/appveyor.yml delete mode 100644 vendor/github.com/openzipkin/zipkin-go/circle.yml delete mode 100644 vendor/github.com/openzipkin/zipkin-go/context.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/doc.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/endpoint.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/go.mod delete mode 100644 vendor/github.com/openzipkin/zipkin-go/go.sum delete mode 100644 vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/model/annotation.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/model/doc.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/model/endpoint.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/model/kind.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/model/span.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/model/span_id.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/model/traceid.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/noop.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/propagation/b3/doc.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/propagation/b3/grpc.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/propagation/b3/http.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/propagation/b3/shared.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/propagation/b3/spancontext.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/propagation/propagation.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/reporter/http/http.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/reporter/reporter.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/reporter/serializer.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/sample.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/span.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/span_implementation.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/span_options.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/tags.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/tracer.go delete mode 100644 vendor/github.com/openzipkin/zipkin-go/tracer_options.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/CHANGELOG.md delete mode 100644 vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md delete mode 100644 vendor/github.com/uber/jaeger-client-go/DCO delete mode 100644 vendor/github.com/uber/jaeger-client-go/Gopkg.lock delete mode 100644 vendor/github.com/uber/jaeger-client-go/Gopkg.toml delete mode 100644 vendor/github.com/uber/jaeger-client-go/LICENSE delete mode 100644 vendor/github.com/uber/jaeger-client-go/Makefile delete mode 100644 vendor/github.com/uber/jaeger-client-go/README.md delete mode 100644 vendor/github.com/uber/jaeger-client-go/RELEASE.md delete mode 100644 vendor/github.com/uber/jaeger-client-go/baggage_setter.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/config/config.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/config/config_env.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/config/options.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/constants.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/context.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/contrib_observer.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/doc.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/glide.lock delete mode 100644 vendor/github.com/uber/jaeger-client-go/glide.yaml delete mode 100644 vendor/github.com/uber/jaeger-client-go/header.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/interop.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/jaeger_tag.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/log/logger.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/logger.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/metrics.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/observer.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/process.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/propagation.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/reference.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/reporter.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/reporter_options.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md delete mode 100644 vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/sampler.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/sampler_options.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/span.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/span_allocator.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/README.md delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/exception.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/numeric.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/processor.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/protocol.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/serializer.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/transport.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/type.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/tracer.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/tracer_options.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/transport.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/transport/doc.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/transport/http.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/transport_udp.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/utils/http_json.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/utils/localip.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/utils/rand.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/utils/udp_client.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/utils/utils.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/zipkin.go delete mode 100644 vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go delete mode 100644 vendor/github.com/uber/jaeger-lib/LICENSE delete mode 100644 vendor/github.com/uber/jaeger-lib/metrics/counter.go delete mode 100644 vendor/github.com/uber/jaeger-lib/metrics/factory.go delete mode 100644 vendor/github.com/uber/jaeger-lib/metrics/gauge.go delete mode 100644 vendor/github.com/uber/jaeger-lib/metrics/histogram.go delete mode 100644 vendor/github.com/uber/jaeger-lib/metrics/keys.go delete mode 100644 vendor/github.com/uber/jaeger-lib/metrics/metrics.go delete mode 100644 vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go delete mode 100644 vendor/github.com/uber/jaeger-lib/metrics/timer.go delete mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go delete mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go delete mode 100644 vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go delete mode 100644 vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/CHANGELOG.md delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/LICENSE.txt delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/Makefile delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/README.md delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/appveyor.yml delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/crypto.go delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/date.go delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/defaults.go delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/dict.go delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/doc.go delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/functions.go delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/glide.lock delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/glide.yaml delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/list.go delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/numeric.go delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/reflect.go delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/regex.go delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/semver.go delete mode 100644 vendor/gopkg.in/Masterminds/sprig.v2/strings.go create mode 100644 version.go diff --git a/.gitignore b/.gitignore index d1c9a90dc3af..94f7bb3dead0 100644 --- a/.gitignore +++ b/.gitignore @@ -42,7 +42,6 @@ petstore.json *.pdf *.mmdb *.cov -*.so !testdata/*.mmdb *.pid coprocess_gen_test.go diff --git a/.travis.yml b/.travis.yml index fb3f2e5cffb3..09241e039427 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,10 +18,9 @@ addons: matrix: include: - - go: 1.10.x - - go: 1.11.x + - go: 1.9.x env: LATEST_GO=true # run linters and report coverage - + - go: 1.10.x services: - redis-server @@ -31,22 +30,13 @@ install: - go install ./... # As of 1.9, ./... no longer includes ./vendor/... - go install ./vendor/{golang.org/x/tools/cmd/goimports,github.com/wadey/gocovmerge,github.com/mattn/goveralls} - - go get github.com/go-swagger/go-swagger/cmd/swagger script: - sudo pip3 install google - sudo pip3 install protobuf - ### Needed to convert the swagger 2.0 file to openapi 3.0 - ### The swagger docs are actually written as per the 2.0 spec as there is no - ### support for openapi 3.0 in Go - at least for now. - ### https://github.com/nodesource/distributions/blob/master/README.md#debinstall - - curl -sL https://deb.nodesource.com/setup_11.x | sudo -E bash - - - sudo -E apt-get -yq --no-install-suggests --no-install-recommends $(travis_apt_get_options) install nodejs - - sudo npm install -g api-spec-converter --unsafe-perm=true --allow-root - go build -tags 'coprocess python' - go build -tags 'coprocess lua' - go build -tags 'coprocess grpc' - - ./bin/ci-swagger.sh - - ./bin/ci-test.sh + - ./utils/ci-test.sh - if [[ $LATEST_GO ]]; then goveralls -coverprofile=<(gocovmerge *.cov); fi - - ./bin/ci-benchmark.sh + - ./utils/ci-benchmark.sh diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9582eba6c7ee..e6f19abbe31e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -18,7 +18,7 @@ We'd love to accept your patches! Before we can take them, we have to jump a cou The Tyk CLA [must be signed](https://github.com/TykTechnologies/tyk/blob/master/CLA.md) by all contributors. You will be automatically asked to sign CLA once PR will be created. -Once you are CLA'ed, we'll be able to accept your pull requests. For any issues that you face during this process, please create a GitHub issue explaining the problem and we will help get it sorted out. +Once you are CLA'ed, we'll be able to accept your pull requests. For any issues that you face during this process, please create an GitHub issue explaining the problem and we will help get it sorted out. ***NOTE***: Only original source code from you and other people that have signed the CLA can be accepted into the repository. This policy does not @@ -65,11 +65,11 @@ some bugs or smaller features. It is always better to discuss your idea with our You need to clone Tyk from GitHub to your GOPATH folder, or alternatively you can run `go get -d github.com/TykTechnologies/tyk` which automatically downloads project to the right path. ### Building the project -You need to have working Go environment: see [golang.org](https://golang.org/doc/code.html) for more info on how Go works with code. +You need to have working Go enironment: see [golang.org](https://golang.org/doc/code.html) for more info on how Go works with code. -To build and test Tyk use built-in `go` commands: `go build` and `go test -v`. If you want to just test a subset of the project, you can pass the `-run` argument with the name of the test. Note that logs are hidden by default when running the tests, which you can override by setting `TYK_LOGLEVEL=info`. +To build and test Tyk use built-in `go` commands: `go build` and `go test -v`. If you want to just test a subset of the project, you can pass the `-run` argument with name of the test. Note that logs are hidden by default when running the tests, which you can override by setting `TYK_LOGLEVEL=info`. -Currently, in order for tests to pass, a **Redis host is required**. We know, this is terrible and should be handled with an interface, and it is, however in the current version there is a hard requirement for the application to have its default memory setup to use Redis as part of a deployment, this is to make it easier to install the application for the end-user. Future versions will work around this, or we may drop the memory requirement. The simplest way to run Redis is to use official Docker image [https://hub.docker.com/_/redis/](https://hub.docker.com/_/redis/) +Currently in order for tests to pass, a **Redis host is required**. We know, this is terrible and should be handled with an interface, and it is, however in the current version there is a hard requirement for the application to have its default memory setup to use redis as part of a deployment, this is to make it easier to install the application for the end-user. Future versions will work around this, or we may drop the memory requirement. Simplest way to run Redis is to use official Docker image [https://hub.docker.com/_/redis/](https://hub.docker.com/_/redis/) ### Adding dependencies diff --git a/README.md b/README.md index 5d4ca58962e1..deebb9355fa0 100644 --- a/README.md +++ b/README.md @@ -2,16 +2,15 @@ [![Build Status](https://travis-ci.org/TykTechnologies/tyk.svg?branch=master)](https://travis-ci.org/TykTechnologies/tyk) [![Go Report Card](https://goreportcard.com/badge/github.com/TykTechnologies/tyk)](https://goreportcard.com/report/github.com/TykTechnologies/tyk) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FTykTechnologies%2Ftyk.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2FTykTechnologies%2Ftyk?ref=badge_shield) Tyk is a lightweight, open source API Gateway and Management Platform enables you to control who accesses your API, when they access it and how they access it. Tyk will also record detailed analytics on how your users are interacting with your API and when things go wrong. -Go version 1.10 is required to build `master`, the current +Go version 1.9 is required to build `master`, the current development version. Tyk is officially supported on `linux/amd64`, `linux/i386` and `linux/arm64`. -Tests are run against both Go versions 1.10 & 1.11, however at present, only Go 1.10 is officially supported. +Tests are run against both Go versions 1.9 & 1.10, however at present, only Go 1.9 is officially supported. ## What is an API Gateway? @@ -56,9 +55,6 @@ All the documentation can be found at http://tyk.io/docs/. Tyk is released under the MPL v2.0; please see [LICENSE.md](LICENSE.md) for a full version of the license. - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FTykTechnologies%2Ftyk.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2FTykTechnologies%2Ftyk?ref=badge_large) - ### Contributing For more information about contributing PRs and issues, see [CONTRIBUTING.md](CONTRIBUTING.md). @@ -66,6 +62,6 @@ For more information about contributing PRs and issues, see [CONTRIBUTING.md](CO ### Roadmap To coordinate development and be completely transparent as to where the project is going, the version roadmap for the next version, as well as proposed features -and adopted proposals can be viewed on our public [Tyk Roadmap Repository](https://github.com/TykTechnologies/tyk-roadmap). +and adopted proposals can be viewed on our public [Trello board](https://trello.com/b/59d5kAZ5/tyk-api-gateway-roadmap). -Any proposals can be made in the Github issue tracker. Proposals that are adopted will be placed into roadmap. +Any proposals can be made in the Github issue tracker. Proposals that are adopted will be placed into trello and then moved according to their status. diff --git a/TESTING.md b/TESTING.md index 6968a9d350d0..57856722bdd8 100644 --- a/TESTING.md +++ b/TESTING.md @@ -1,34 +1,16 @@ -Table of Contents -================= - -* [Tyk testing guide](#tyk-testing-guide) - * [Initializing test server](#initializing-test-server) - * [Loading and configuring APIs](#loading-and-configuring-apis) - * [Running the tests](#running-the-tests) - * [Changing config variables](#changing-config-variables) - * [Upstream test server](#upstream-test-server) - * [Coprocess plugin testing](#coprocess-plugin-testing) - * [Creating user sessions](#creating-user-sessions) - * [Mocking dashboard](#mocking-dashboard) - * [Mocking RPC (Hybrid)](#mocking-rpc-hybrid) - * [Mocking DNS](#mocking-dns) -* [Test Framework](#test-framework) - - ## Tyk testing guide -When it comes to the tests, one of the main questions is how to keep balance between expressivity, extendability, repeatability and performance. There are countless discussions if you should write integration or unit tests, should your mock or not, should you write tests first or after and etc. Since you will never find the right answer, on a growing code base, multiple people start introducing own methodology and distinct test helpers. Even looking at our quite small code base, you can find like 3-4 ways to write the same test. +When it comes to the tests, one of the main questions is how to keep balance between expressivity, extendability, repeatability and performance. There are countless discussions if you should write integration or unit tests, should your mock or not, should you write tests first or after and etc. Since you will never find the right answer, on a growing code base, multiple people start introducing own methodology and distinct test helpers. Even looking at our quite small code base, I can find like 3-4 ways to write the same test. Additionally expressivity of our tests are quite bad: it is quite hard to understand what actually get tested, lot of boilerplate code not related to test logic, and amount of copy-paste growing with each test. -This document describes Tyk test framework and unified guidelines on writing tests. +In order to fix issues described above, I think it is important to have an official guide on writing the tests. -Main points of the test framework are: +This idea behind this framework is not new, and we already had pieces of it around the code. My goal was to unify all the patterns we used previously, and design a small layer on top of it, to streamline process of writing the tests. + +Main points of the new framework are: - All tests run HTTP requests though the full HTTP stack, same as user will do - Test definition logic separated from test runner. - Official mocks for the Dashboard, RPC, and Bundler -Framework located inside "github.com/TykTechnologies/tyk/test" package. -See its API docs https://godoc.org/github.com/TykTechnologies/tyk/test - Let’s learn by example: ```go @@ -219,27 +201,30 @@ Tests are defined using new `test` package `TestCase` structure, which allows yo ```go type TestCase struct { - Method, Path string `json:",omitempty"` - Domain string `json:",omitempty"` - Proto string `json:",omitempty"` - Code int `json:",omitempty"` - Data interface{} `json:",omitempty"` - Headers map[string]string `json:",omitempty"` - PathParams map[string]string `json:",omitempty"` - Cookies []*http.Cookie `json:",omitempty"` - Delay time.Duration `json:",omitempty"` - BodyMatch string `json:",omitempty"` - BodyMatchFunc func([]byte) bool `json:",omitempty"` - BodyNotMatch string `json:",omitempty"` - HeadersMatch map[string]string `json:",omitempty"` - HeadersNotMatch map[string]string `json:",omitempty"` - JSONMatch map[string]string `json:",omitempty"` - ErrorMatch string `json:",omitempty"` - BeforeFn func() `json:"-"` - Client *http.Client `json:"-"` - - AdminAuth bool `json:",omitempty"` - ControlRequest bool `json:",omitempty"` + Method string + Path string + Domain string + // Request body, can be any object. If not string, It will be automatically serialized + Data interface{} + Headers map[string]string + PathParams map[string]string + Cookies []*http.Cookie + BeforeFn func() + // If need custom http client settings, like SSL client certificate or custom timeouts + Client *http.Client + // If true, pass valid admin-auth header + AdminAuth bool + // if Control API run on separate port (or domain), tell to run request on this listener + ControlRequest bool + + // Assertions + Code int + BodyMatch string + BodyNotMatch string + HeadersMatch map[string]string + HeadersNotMatch map[string]string + // If http request returns non http error, like TLS or Timeout + ErrorMatch string } ``` @@ -399,58 +384,4 @@ func TestSyncAPISpecsRPCSuccess(t *testing.T) { ### DNS mocks Inside tests we override default network resolver to use custom DNS server mock, creating using awesome `github.com/miekg/dns` library. Domain -\> IP mapping set via map inside `helpers_test.go` file. By default you have access to domains: `localhost`, `host1.local`, `host2.local` and `host3.local`. Access to all unknown domains will cause panic. -Using DNS mock means that you are able to create tests with APIs on multiple domains, without modifying machine `/etc/hosts` file. - -## Test Framework - -Usage of framework described above is not limited by Tyk Gateway, and it is used across variety of Tyk projects. -The main building block is the test runner. -```go -type HTTPTestRunner struct { - Do func(*http.Request, *TestCase) (*http.Response, error) - Assert func(*http.Response, *TestCase) error - RequestBuilder func(*TestCase) (*http.Request, error) -} -func (r HTTPTestRunner) Run(t testing.TB, testCases ...TestCase) { -... -} -``` -By overriding its variables, you can tune runner behavior. -For example http runner can be look like: -``` -import "github.com/TykTechnologies/tyk/test" - -... -baseURL := "http://example.com" -runner := test.HTTPTestRunner{ - Do: func(r *http.Request, tc *TestCase) (*http.Response, error) { - return tc.Client.Do(r) - } - RequestBuilder: func(tc *TestCase) (*http.Request, error) { - tc.BaseURL = baseURL - return NewRequest(tc) - }, -} -runner.Run(t, testCases...) -... -``` -And Unit testing of http handlers can be: -``` -import "github.com/TykTechnologies/tyk/test" - -... -handler := func(wr http.RequestWriter, r *http.Request){...} -runner := test.HTTPTestRunner{ - Do: func(r *http.Request, _ *TestCase) (*http.Response, error) { - rec := httptest.NewRecorder() - handler(rec, r) - return rec.Result(), nil - }, -} -runner.Run(t, testCases...) -... -``` - -This package already exports functions for cases mentioned above: - - `func TestHttpServer(t testing.TB, baseURL string, testCases ...TestCase)` - - `func TestHttpHandler(t testing.TB, handle http.HandlerFunc, testCases ...TestCase)` +Using DNS mock means that you are able to create tests with APIs on multiple domains, without modifying machine `/etc/hosts` file. \ No newline at end of file diff --git a/gateway/analytics.go b/analytics.go similarity index 98% rename from gateway/analytics.go rename to analytics.go index eb39f0ddc019..2a08391f3717 100644 --- a/gateway/analytics.go +++ b/analytics.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "fmt" @@ -8,8 +8,8 @@ import ( "sync/atomic" "time" - maxminddb "github.com/oschwald/maxminddb-golang" - msgpack "gopkg.in/vmihailenco/msgpack.v2" + "github.com/oschwald/maxminddb-golang" + "gopkg.in/vmihailenco/msgpack.v2" "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/regexp" diff --git a/gateway/analytics_test.go b/analytics_test.go similarity index 96% rename from gateway/analytics_test.go rename to analytics_test.go index c79e2b732775..07e3aa1445dc 100644 --- a/gateway/analytics_test.go +++ b/analytics_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "testing" @@ -27,7 +27,7 @@ func TestGeoIPLookup(t *testing.T) { } func TestURLReplacer(t *testing.T) { - defer ResetTestConfig() + defer resetTestConfig() globalConf := config.Global() globalConf.AnalyticsConfig.NormaliseUrls.Enabled = true globalConf.AnalyticsConfig.NormaliseUrls.NormaliseUUIDs = true @@ -87,7 +87,7 @@ func TestURLReplacer(t *testing.T) { func BenchmarkURLReplacer(b *testing.B) { b.ReportAllocs() - defer ResetTestConfig() + defer resetTestConfig() globalConf := config.Global() globalConf.AnalyticsConfig.NormaliseUrls.Enabled = true @@ -115,7 +115,7 @@ func BenchmarkURLReplacer(b *testing.B) { } func TestTagHeaders(t *testing.T) { - req := TestReq(t, "GET", "/tagmeplease", nil) + req := testReq(t, "GET", "/tagmeplease", nil) req.Header.Set("Content-Type", "application/json") req.Header.Set("X-Tag-Me", "1") req.Header.Set("X-Tag-Me2", "2") @@ -159,7 +159,7 @@ func TestTagHeaders(t *testing.T) { func BenchmarkTagHeaders(b *testing.B) { b.ReportAllocs() - req := TestReq(b, "GET", "/tagmeplease", nil) + req := testReq(b, "GET", "/tagmeplease", nil) req.Header.Set("Content-Type", "application/json") req.Header.Set("X-Tag-Me", "1") req.Header.Set("X-Tag-Me2", "2") diff --git a/gateway/api.go b/api.go similarity index 91% rename from gateway/api.go rename to api.go index 11fa0594a784..c674d7351459 100644 --- a/gateway/api.go +++ b/api.go @@ -1,29 +1,4 @@ -// Tyk Gateway API -// -// The code below describes the Tyk Gateway API -// Version: 2.8.0 -// -// Schemes: https, http -// Host: localhost -// BasePath: /tyk/ -// -// Consumes: -// - application/json -// -// Produces: -// - application/json -// -// Security: -// - api_key: -// -// SecurityDefinitions: -// api_key: -// type: apiKey -// name: X-Tyk-Authorization -// in: header -// -// swagger:meta -package gateway +package main import ( "bytes" @@ -43,21 +18,17 @@ import ( "github.com/Sirupsen/logrus" "github.com/gorilla/mux" - uuid "github.com/satori/go.uuid" + "github.com/satori/go.uuid" "golang.org/x/crypto/bcrypt" "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/ctx" "github.com/TykTechnologies/tyk/storage" "github.com/TykTechnologies/tyk/user" ) // apiModifyKeySuccess represents when a Key modification was successful -// -// swagger:model apiModifyKeySuccess type apiModifyKeySuccess struct { - // in:body Key string `json:"key"` Status string `json:"status"` Action string `json:"action"` @@ -65,11 +36,8 @@ type apiModifyKeySuccess struct { } // apiStatusMessage represents an API status message -// -// swagger:model apiStatusMessage type apiStatusMessage struct { - Status string `json:"status"` - // Response details + Status string `json:"status"` Message string `json:"message"` } @@ -81,18 +49,6 @@ func apiError(msg string) apiStatusMessage { return apiStatusMessage{"error", msg} } -// paginationStatus provides more information about a paginated data set -type paginationStatus struct { - PageNum int `json:"page_num"` - PageTotal int `json:"page_total"` - PageSize int `json:"page_size"` -} - -type paginatedOAuthClientTokens struct { - Pagination paginationStatus - Tokens []OAuthClientToken -} - func doJSONWrite(w http.ResponseWriter, code int, obj interface{}) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) @@ -441,10 +397,10 @@ func handleGetDetail(sessionKey, apiID string, byHash bool) (interface{}, int) { } } else { log.WithFields(logrus.Fields{ - "prefix": "api", - "key": obfuscateKey(sessionKey), - "message": err, - "status": "ok", + "prefix": "api", + "key": obfuscateKey(sessionKey), + "error": err, + "status": "ok", }).Info("Can't retrieve key quota") } @@ -494,7 +450,6 @@ func handleGetDetail(sessionKey, apiID string, byHash bool) (interface{}, int) { } // apiAllKeys represents a list of keys in the memory store -// swagger:model type apiAllKeys struct { APIKeys []string `json:"keys"` } @@ -542,7 +497,7 @@ func handleAddKey(keyName, hashedName, sessionString, apiID string) { if err != nil { log.WithFields(logrus.Fields{ "prefix": "api", - "key": obfuscateKey(keyName), + "key": keyName, "status": "fail", "err": err, }).Error("Failed to update key.") @@ -554,28 +509,16 @@ func handleAddKey(keyName, hashedName, sessionString, apiID string) { }).Info("Updated hashed key in slave storage.") } -func handleDeleteKey(keyName, apiID string) (interface{}, int) { +func handleDeleteKey(keyName, apiID string, resetQuota bool) (interface{}, int) { if apiID == "-1" { // Go through ALL managed API's and delete the key apisMu.RLock() - removed := false for _, spec := range apisByID { - if spec.SessionManager.RemoveSession(keyName, false) { - removed = true - } + spec.SessionManager.RemoveSession(keyName, false) spec.SessionManager.ResetQuota(keyName, &user.SessionState{}, false) } apisMu.RUnlock() - if !removed { - log.WithFields(logrus.Fields{ - "prefix": "api", - "key": obfuscateKey(keyName), - "status": "fail", - }).Error("Failed to remove the key") - return apiError("Failed to remove the key"), http.StatusBadRequest - } - log.WithFields(logrus.Fields{ "prefix": "api", "key": keyName, @@ -592,15 +535,11 @@ func handleDeleteKey(keyName, apiID string) (interface{}, int) { sessionManager = spec.SessionManager } - if !sessionManager.RemoveSession(keyName, false) { - log.WithFields(logrus.Fields{ - "prefix": "api", - "key": obfuscateKey(keyName), - "status": "fail", - }).Error("Failed to remove the key") - return apiError("Failed to remove the key"), http.StatusBadRequest + sessionManager.RemoveSession(keyName, false) + + if resetQuota { + sessionManager.ResetQuota(keyName, &user.SessionState{}, false) } - sessionManager.ResetQuota(keyName, &user.SessionState{}, false) statusObj := apiModifyKeySuccess{ Key: keyName, @@ -623,27 +562,15 @@ func handleDeleteKey(keyName, apiID string) (interface{}, int) { return statusObj, http.StatusOK } -func handleDeleteHashedKey(keyName, apiID string) (interface{}, int) { +func handleDeleteHashedKey(keyName, apiID string, resetQuota bool) (interface{}, int) { if apiID == "-1" { // Go through ALL managed API's and delete the key - removed := false apisMu.RLock() for _, spec := range apisByID { - if spec.SessionManager.RemoveSession(keyName, true) { - removed = true - } + spec.SessionManager.RemoveSession(keyName, true) } apisMu.RUnlock() - if !removed { - log.WithFields(logrus.Fields{ - "prefix": "api", - "key": obfuscateKey(keyName), - "status": "fail", - }).Error("Failed to remove the key") - return apiError("Failed to remove the key"), http.StatusBadRequest - } - log.WithFields(logrus.Fields{ "prefix": "api", "key": keyName, @@ -657,14 +584,10 @@ func handleDeleteHashedKey(keyName, apiID string) (interface{}, int) { if spec := getApiSpec(apiID); spec != nil { sessionManager = spec.SessionManager } + sessionManager.RemoveSession(keyName, true) - if !sessionManager.RemoveSession(keyName, true) { - log.WithFields(logrus.Fields{ - "prefix": "api", - "key": obfuscateKey(keyName), - "status": "fail", - }).Error("Failed to remove the key") - return apiError("Failed to remove the key"), http.StatusBadRequest + if resetQuota { + sessionManager.ResetQuota(keyName, &user.SessionState{}, true) } statusObj := apiModifyKeySuccess{ @@ -874,16 +797,16 @@ func keyHandler(w http.ResponseWriter, r *http.Request) { case http.MethodDelete: // Remove a key if !isHashed { - obj, code = handleDeleteKey(keyName, apiID) + obj, code = handleDeleteKey(keyName, apiID, true) } else { - obj, code = handleDeleteHashedKey(keyName, apiID) + obj, code = handleDeleteHashedKey(keyName, apiID, true) } if code != http.StatusOK && hashKeyFunction != "" { // try to use legacy key format if !isHashed { - obj, code = handleDeleteKey(origKeyName, apiID) + obj, code = handleDeleteKey(origKeyName, apiID, true) } else { - obj, code = handleDeleteHashedKey(origKeyName, apiID) + obj, code = handleDeleteHashedKey(origKeyName, apiID, true) } } } @@ -1103,10 +1026,7 @@ func handleDeleteOrgKey(orgID string) (interface{}, int) { return apiError("Org not found"), http.StatusNotFound } - if !spec.OrgSessionManager.RemoveSession(orgID, false) { - return apiError("Failed to remove the key"), http.StatusBadRequest - } - + spec.OrgSessionManager.RemoveSession(orgID, false) log.WithFields(logrus.Fields{ "prefix": "api", "key": orgID, @@ -1148,7 +1068,6 @@ func groupResetHandler(w http.ResponseWriter, r *http.Request) { // was in the URL parameters, it will block until the reload is done. // Otherwise, it won't block and fn will be called once the reload is // finished. -// func resetHandler(fn func()) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { var wg sync.WaitGroup @@ -1304,8 +1223,6 @@ func createKeyHandler(w http.ResponseWriter, r *http.Request) { } // NewClientRequest is an outward facing JSON object translated from osin OAuthClients -// -// swagger:model NewClientRequest type NewClientRequest struct { ClientID string `json:"client_id"` ClientRedirectURI string `json:"redirect_uri"` @@ -1668,36 +1585,8 @@ func oAuthClientTokensHandler(w http.ResponseWriter, r *http.Request) { return } - if p := r.URL.Query().Get("page"); p != "" { - page := 1 - - queryPage, err := strconv.Atoi(p) - if err == nil { - page = queryPage - } - - if page <= 0 { - page = 1 - } - - tokens, totalPages, err := apiSpec.OAuthManager.OsinServer.Storage.GetPaginatedClientTokens(keyName, page) - if err != nil { - doJSONWrite(w, http.StatusInternalServerError, apiError("Get client tokens failed")) - return - } - - doJSONWrite(w, http.StatusOK, paginatedOAuthClientTokens{ - Pagination: paginationStatus{ - PageSize: 100, - PageNum: page, - PageTotal: totalPages, - }, - Tokens: tokens, - }) - - return - } - + // get tokens from redis + // TODO: add pagination tokens, err := apiSpec.OAuthManager.OsinServer.Storage.GetClientTokens(keyName) if err != nil { doJSONWrite(w, http.StatusInternalServerError, apiError("Get client tokens failed")) @@ -1928,7 +1817,7 @@ func setCtxValue(r *http.Request, key, val interface{}) { } func ctxGetData(r *http.Request) map[string]interface{} { - if v := r.Context().Value(ctx.ContextData); v != nil { + if v := r.Context().Value(ContextData); v != nil { return v.(map[string]interface{}) } return nil @@ -1938,38 +1827,64 @@ func ctxSetData(r *http.Request, m map[string]interface{}) { if m == nil { panic("setting a nil context ContextData") } - setCtxValue(r, ctx.ContextData, m) + setCtxValue(r, ContextData, m) } func ctxGetSession(r *http.Request) *user.SessionState { - return ctx.GetSession(r) + if v := r.Context().Value(SessionData); v != nil { + return v.(*user.SessionState) + } + return nil } func ctxSetSession(r *http.Request, s *user.SessionState, token string, scheduleUpdate bool) { - ctx.SetSession(r, s, token, scheduleUpdate) + if s == nil { + panic("setting a nil context SessionData") + } + + if token == "" { + token = ctxGetAuthToken(r) + } + + if s.KeyHashEmpty() { + s.SetKeyHash(storage.HashKey(token)) + } + + ctx := r.Context() + ctx = context.WithValue(ctx, SessionData, s) + ctx = context.WithValue(ctx, AuthToken, token) + + if scheduleUpdate { + ctx = context.WithValue(ctx, UpdateSession, true) + } + + setContext(r, ctx) } func ctxScheduleSessionUpdate(r *http.Request) { - setCtxValue(r, ctx.UpdateSession, true) + setCtxValue(r, UpdateSession, true) } func ctxDisableSessionUpdate(r *http.Request) { - setCtxValue(r, ctx.UpdateSession, false) + setCtxValue(r, UpdateSession, false) } func ctxSessionUpdateScheduled(r *http.Request) bool { - if v := r.Context().Value(ctx.UpdateSession); v != nil { + if v := r.Context().Value(UpdateSession); v != nil { return v.(bool) } return false } func ctxGetAuthToken(r *http.Request) string { - return ctx.GetAuthToken(r) + if v := r.Context().Value(AuthToken); v != nil { + return v.(string) + } + return "" } func ctxGetTrackedPath(r *http.Request) string { - if v := r.Context().Value(ctx.TrackThisEndpoint); v != nil { + if v := r.Context().Value(TrackThisEndpoint); v != nil { return v.(string) } return "" @@ -1979,34 +1894,34 @@ func ctxSetTrackedPath(r *http.Request, p string) { if p == "" { panic("setting a nil context TrackThisEndpoint") } - setCtxValue(r, ctx.TrackThisEndpoint, p) + setCtxValue(r, TrackThisEndpoint, p) } func ctxGetDoNotTrack(r *http.Request) bool { - return r.Context().Value(ctx.DoNotTrackThisEndpoint) == true + return r.Context().Value(DoNotTrackThisEndpoint) == true } func ctxSetDoNotTrack(r *http.Request, b bool) { - setCtxValue(r, ctx.DoNotTrackThisEndpoint, b) + setCtxValue(r, DoNotTrackThisEndpoint, b) } func ctxGetVersionInfo(r *http.Request) *apidef.VersionInfo { - if v := r.Context().Value(ctx.VersionData); v != nil { + if v := r.Context().Value(VersionData); v != nil { return v.(*apidef.VersionInfo) } return nil } func ctxSetVersionInfo(r *http.Request, v *apidef.VersionInfo) { - setCtxValue(r, ctx.VersionData, v) + setCtxValue(r, VersionData, v) } func ctxSetOrigRequestURL(r *http.Request, url *url.URL) { - setCtxValue(r, ctx.OrigRequestURL, url) + setCtxValue(r, OrigRequestURL, url) } func ctxGetOrigRequestURL(r *http.Request) *url.URL { - if v := r.Context().Value(ctx.OrigRequestURL); v != nil { + if v := r.Context().Value(OrigRequestURL); v != nil { if urlVal, ok := v.(*url.URL); ok { return urlVal } @@ -2016,11 +1931,11 @@ func ctxGetOrigRequestURL(r *http.Request) *url.URL { } func ctxSetUrlRewritePath(r *http.Request, path string) { - setCtxValue(r, ctx.UrlRewritePath, path) + setCtxValue(r, UrlRewritePath, path) } func ctxGetUrlRewritePath(r *http.Request) string { - if v := r.Context().Value(ctx.UrlRewritePath); v != nil { + if v := r.Context().Value(UrlRewritePath); v != nil { if strVal, ok := v.(string); ok { return strVal } @@ -2029,7 +1944,7 @@ func ctxGetUrlRewritePath(r *http.Request) string { } func ctxSetCheckLoopLimits(r *http.Request, b bool) { - setCtxValue(r, ctx.CheckLoopLimits, b) + setCtxValue(r, CheckLoopLimits, b) } // Should we check Rate limits and Quotas? @@ -2039,7 +1954,7 @@ func ctxCheckLimits(r *http.Request) bool { return true } - if v := r.Context().Value(ctx.CheckLoopLimits); v != nil { + if v := r.Context().Value(CheckLoopLimits); v != nil { return v.(bool) } @@ -2047,11 +1962,11 @@ func ctxCheckLimits(r *http.Request) bool { } func ctxSetRequestMethod(r *http.Request, path string) { - setCtxValue(r, ctx.RequestMethod, path) + setCtxValue(r, RequestMethod, path) } func ctxGetRequestMethod(r *http.Request) string { - if v := r.Context().Value(ctx.RequestMethod); v != nil { + if v := r.Context().Value(RequestMethod); v != nil { if strVal, ok := v.(string); ok { return strVal } @@ -2060,11 +1975,11 @@ func ctxGetRequestMethod(r *http.Request) string { } func ctxGetDefaultVersion(r *http.Request) bool { - return r.Context().Value(ctx.VersionDefault) != nil + return r.Context().Value(VersionDefault) != nil } func ctxSetDefaultVersion(r *http.Request) { - setCtxValue(r, ctx.VersionDefault, true) + setCtxValue(r, VersionDefault, true) } func ctxLoopingEnabled(r *http.Request) bool { @@ -2072,7 +1987,7 @@ func ctxLoopingEnabled(r *http.Request) bool { } func ctxLoopLevel(r *http.Request) int { - if v := r.Context().Value(ctx.LoopLevel); v != nil { + if v := r.Context().Value(LoopLevel); v != nil { if intVal, ok := v.(int); ok { return intVal } @@ -2082,7 +1997,7 @@ func ctxLoopLevel(r *http.Request) int { } func ctxSetLoopLevel(r *http.Request, value int) { - setCtxValue(r, ctx.LoopLevel, value) + setCtxValue(r, LoopLevel, value) } func ctxIncLoopLevel(r *http.Request, loopLimit int) { @@ -2091,7 +2006,7 @@ func ctxIncLoopLevel(r *http.Request, loopLimit int) { } func ctxLoopLevelLimit(r *http.Request) int { - if v := r.Context().Value(ctx.LoopLevelLimit); v != nil { + if v := r.Context().Value(LoopLevelLimit); v != nil { if intVal, ok := v.(int); ok { return intVal } @@ -2103,12 +2018,12 @@ func ctxLoopLevelLimit(r *http.Request) int { func ctxSetLoopLimit(r *http.Request, limit int) { // Can be set only one time per request if ctxLoopLevelLimit(r) == 0 && limit > 0 { - setCtxValue(r, ctx.LoopLevelLimit, limit) + setCtxValue(r, LoopLevelLimit, limit) } } func ctxThrottleLevelLimit(r *http.Request) int { - if v := r.Context().Value(ctx.ThrottleLevelLimit); v != nil { + if v := r.Context().Value(ThrottleLevelLimit); v != nil { if intVal, ok := v.(int); ok { return intVal } @@ -2118,7 +2033,7 @@ func ctxThrottleLevelLimit(r *http.Request) int { } func ctxThrottleLevel(r *http.Request) int { - if v := r.Context().Value(ctx.ThrottleLevel); v != nil { + if v := r.Context().Value(ThrottleLevel); v != nil { if intVal, ok := v.(int); ok { return intVal } @@ -2130,12 +2045,12 @@ func ctxThrottleLevel(r *http.Request) int { func ctxSetThrottleLimit(r *http.Request, limit int) { // Can be set only one time per request if ctxThrottleLevelLimit(r) == 0 && limit > 0 { - setCtxValue(r, ctx.ThrottleLevelLimit, limit) + setCtxValue(r, ThrottleLevelLimit, limit) } } func ctxSetThrottleLevel(r *http.Request, value int) { - setCtxValue(r, ctx.ThrottleLevel, value) + setCtxValue(r, ThrottleLevel, value) } func ctxIncThrottleLevel(r *http.Request, throttleLimit int) { @@ -2144,9 +2059,9 @@ func ctxIncThrottleLevel(r *http.Request, throttleLimit int) { } func ctxTraceEnabled(r *http.Request) bool { - return r.Context().Value(ctx.Trace) != nil + return r.Context().Value(Trace) != nil } func ctxSetTrace(r *http.Request) { - setCtxValue(r, ctx.Trace, true) + setCtxValue(r, Trace, true) } diff --git a/gateway/api_definition.go b/api_definition.go similarity index 95% rename from gateway/api_definition.go rename to api_definition.go index 1fd3ccd1d3c5..7115d845ed9c 100644 --- a/gateway/api_definition.go +++ b/api_definition.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/base64" @@ -17,10 +17,6 @@ import ( "text/template" "time" - sprig "gopkg.in/Masterminds/sprig.v2" - - "github.com/TykTechnologies/tyk/rpc" - "github.com/Sirupsen/logrus" circuit "github.com/rubyist/circuitbreaker" @@ -31,22 +27,11 @@ import ( "github.com/TykTechnologies/tyk/storage" ) -//const used by cache middleware -const SAFE_METHODS = "SAFE_METHODS" - const ( LDAPStorageEngine apidef.StorageEngineCode = "ldap" RPCStorageEngine apidef.StorageEngineCode = "rpc" ) -// Constants used by the version check middleware -const ( - headerLocation = "header" - urlParamLocation = "url-param" - urlLocation = "url" - expiredTimeFormat = "2006-01-02 15:04" -) - // URLStatus is a custom enum type to avoid collisions type URLStatus int @@ -102,7 +87,7 @@ const ( StatusURLRewrite RequestStatus = "URL Rewritten" StatusVirtualPath RequestStatus = "Virtual Endpoint" StatusRequestSizeControlled RequestStatus = "Request Size Limited" - StatusRequestTracked RequestStatus = "Request Tracked" + StatusRequesTracked RequestStatus = "Request Tracked" StatusRequestNotTracked RequestStatus = "Request Not Tracked" StatusValidateJSON RequestStatus = "Validate JSON" StatusInternal RequestStatus = "Internal path" @@ -115,7 +100,6 @@ type URLSpec struct { Spec *regexp.Regexp Status URLStatus MethodActions map[string]apidef.EndpointMethodMeta - CacheConfig EndPointCacheMeta TransformAction TransformSpec TransformResponseAction TransformSpec TransformJQAction TransformJQSpec @@ -134,11 +118,6 @@ type URLSpec struct { Internal apidef.InternalMeta } -type EndPointCacheMeta struct { - Method string - CacheKeyRegex string -} - type TransformSpec struct { apidef.TemplateMeta Template *template.Template @@ -228,7 +207,7 @@ func (a APIDefinitionLoader) MakeSpec(def *apidef.APIDefinition, logger *logrus. continue } // calculate the time - if t, err := time.Parse(expiredTimeFormat, ver.Expires); err != nil { + if t, err := time.Parse("2006-01-02 15:04", ver.Expires); err != nil { logger.WithError(err).WithField("Expires", ver.Expires).Error("Could not parse expiry date for API") } else { ver.ExpiresTs = t @@ -387,11 +366,11 @@ func (a APIDefinitionLoader) FromDashboardService(endpoint, secret string) ([]*A // FromCloud will connect and download ApiDefintions from a Mongo DB instance. func (a APIDefinitionLoader) FromRPC(orgId string) ([]*APISpec, error) { - if rpc.IsEmergencyMode() { + if rpcEmergencyMode { return LoadDefinitionsFromRPCBackup() } - store := RPCStorageHandler{} + store := RPCStorageHandler{UserKey: config.Global().SlaveOptions.APIKey, Address: config.Global().SlaveOptions.ConnectionString} if !store.Connect() { return nil, errors.New("Can't connect RPC layer") } @@ -407,7 +386,7 @@ func (a APIDefinitionLoader) FromRPC(orgId string) ([]*APISpec, error) { //store.Disconnect() - if rpc.LoadCount() > 0 { + if rpcLoadCount > 0 { if err := saveRPCDefinitionsBackup(apiCollection); err != nil { return nil, err } @@ -525,25 +504,14 @@ func (a APIDefinitionLoader) compileExtendedPathSpec(paths []apidef.EndPointMeta return urlSpec } -func (a APIDefinitionLoader) compileCachedPathSpec(oldpaths []string, newpaths []apidef.CacheMeta) []URLSpec { +func (a APIDefinitionLoader) compileCachedPathSpec(paths []string) []URLSpec { // transform an extended configuration URL into an array of URLSpecs // This way we can iterate the whole array once, on match we break with status urlSpec := []URLSpec{} - for _, stringSpec := range oldpaths { + for _, stringSpec := range paths { newSpec := URLSpec{} a.generateRegex(stringSpec, &newSpec, Cached) - newSpec.CacheConfig.Method = SAFE_METHODS - newSpec.CacheConfig.CacheKeyRegex = "" - // Extend with method actions - urlSpec = append(urlSpec, newSpec) - } - - for _, spec := range newpaths { - newSpec := URLSpec{} - a.generateRegex(spec.Path, &newSpec, Cached) - newSpec.CacheConfig.Method = spec.Method - newSpec.CacheConfig.CacheKeyRegex = spec.CacheKeyRegex // Extend with method actions urlSpec = append(urlSpec, newSpec) } @@ -551,17 +519,9 @@ func (a APIDefinitionLoader) compileCachedPathSpec(oldpaths []string, newpaths [ return urlSpec } -func (a APIDefinitionLoader) filterSprigFuncs() template.FuncMap { - tmp := sprig.GenericFuncMap() - delete(tmp, "env") - delete(tmp, "expandenv") - - return template.FuncMap(tmp) -} - func (a APIDefinitionLoader) loadFileTemplate(path string) (*template.Template, error) { log.Debug("-- Loading template: ", path) - return apidef.Template.New("").Funcs(a.filterSprigFuncs()).ParseFiles(path) + return apidef.Template.New("").ParseFiles(path) } func (a APIDefinitionLoader) loadBlobTemplate(blob string) (*template.Template, error) { @@ -570,7 +530,7 @@ func (a APIDefinitionLoader) loadBlobTemplate(blob string) (*template.Template, if err != nil { return nil, err } - return apidef.Template.New("").Funcs(a.filterSprigFuncs()).Parse(string(uDec)) + return apidef.Template.New("").Parse(string(uDec)) } func (a APIDefinitionLoader) compileTransformPathSpec(paths []apidef.TemplateMeta, stat URLStatus) []URLSpec { @@ -873,7 +833,7 @@ func (a APIDefinitionLoader) getExtendedPathSpecs(apiVersionDef apidef.VersionIn ignoredPaths := a.compileExtendedPathSpec(apiVersionDef.ExtendedPaths.Ignored, Ignored) blackListPaths := a.compileExtendedPathSpec(apiVersionDef.ExtendedPaths.BlackList, BlackList) whiteListPaths := a.compileExtendedPathSpec(apiVersionDef.ExtendedPaths.WhiteList, WhiteList) - cachedPaths := a.compileCachedPathSpec(apiVersionDef.ExtendedPaths.Cached, apiVersionDef.ExtendedPaths.AdvanceCacheConfig) + cachedPaths := a.compileCachedPathSpec(apiVersionDef.ExtendedPaths.Cached) transformPaths := a.compileTransformPathSpec(apiVersionDef.ExtendedPaths.Transform, Transformed) transformResponsePaths := a.compileTransformPathSpec(apiVersionDef.ExtendedPaths.TransformResponse, TransformedResponse) transformJQPaths := a.compileTransformJQPathSpec(apiVersionDef.ExtendedPaths.TransformJQ, TransformedJQ) @@ -963,7 +923,7 @@ func (a *APISpec) getURLStatus(stat URLStatus) RequestStatus { case MethodTransformed: return StatusMethodTransformed case RequestTracked: - return StatusRequestTracked + return StatusRequesTracked case RequestNotTracked: return StatusRequestNotTracked case ValidateJSONRequest: @@ -981,7 +941,7 @@ func (a *APISpec) getURLStatus(stat URLStatus) RequestStatus { func (a *APISpec) URLAllowedAndIgnored(r *http.Request, rxPaths []URLSpec, whiteListStatus bool) (RequestStatus, interface{}) { // Check if ignored for _, v := range rxPaths { - if !v.Spec.MatchString(r.URL.Path) { + if !v.Spec.MatchString(strings.ToLower(r.URL.Path)) { continue } @@ -1085,12 +1045,8 @@ func (a *APISpec) CheckSpecMatchesStatus(r *http.Request, rxPaths []URLSpec, mod } switch v.Status { - case Ignored, BlackList, WhiteList: + case Ignored, BlackList, WhiteList, Cached: return true, nil - case Cached: - if method == v.CacheConfig.Method || (v.CacheConfig.Method == SAFE_METHODS && (method == "GET" || method == "HEADERS" || method == "OPTIONS")) { - return true, &v.CacheConfig - } case Transformed: if method == v.TransformAction.Method { return true, &v.TransformAction @@ -1166,13 +1122,13 @@ func (a *APISpec) getVersionFromRequest(r *http.Request) string { } switch a.VersionDefinition.Location { - case headerLocation: + case "header": return r.Header.Get(a.VersionDefinition.Key) - case urlParamLocation: + case "url-param": return r.URL.Query().Get(a.VersionDefinition.Key) - case urlLocation: + case "url": uPath := strings.TrimPrefix(r.URL.Path, a.Proxy.ListenPath) uPath = strings.TrimPrefix(uPath, "/"+a.Slug) diff --git a/gateway/api_definition_test.go b/api_definition_test.go similarity index 82% rename from gateway/api_definition_test.go rename to api_definition_test.go index 1b7cfa6bbfd7..2af81b0b88c1 100644 --- a/gateway/api_definition_test.go +++ b/api_definition_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/json" @@ -6,6 +6,7 @@ import ( "net" "net/http" "net/http/httptest" + "strings" "sync" "testing" "time" @@ -18,13 +19,20 @@ import ( "github.com/TykTechnologies/tyk/user" ) +func createDefinitionFromString(defStr string) *APISpec { + loader := APIDefinitionLoader{} + def := loader.ParseDefinition(strings.NewReader(defStr)) + spec := loader.MakeSpec(def, nil) + return spec +} + func TestURLRewrites(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() t.Run("Extended Paths with url_rewrites", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + buildAndLoadAPI(func(spec *APISpec) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { json.Unmarshal([]byte(`[ { "path": "/rewrite1", @@ -73,12 +81,12 @@ func TestURLRewrites(t *testing.T) { } func TestWhitelist(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() t.Run("Extended Paths", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + buildAndLoadAPI(func(spec *APISpec) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { json.Unmarshal([]byte(`[ { "path": "/reply/{id}", @@ -108,8 +116,8 @@ func TestWhitelist(t *testing.T) { }) t.Run("Simple Paths", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + buildAndLoadAPI(func(spec *APISpec) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.Paths.WhiteList = []string{"/simple", "/regex/{id}/test"} v.UseExtendedPaths = false }) @@ -127,8 +135,8 @@ func TestWhitelist(t *testing.T) { }) t.Run("Test #1944", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + buildAndLoadAPI(func(spec *APISpec) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.Paths.WhiteList = []string{"/foo/{fooId}$", "/foo/{fooId}/bar/{barId}$", "/baz/{bazId}"} v.UseExtendedPaths = false }) @@ -151,33 +159,15 @@ func TestWhitelist(t *testing.T) { {Path: "/baz/1/bazz", Code: http.StatusOK}, }...) }) - - t.Run("Case Sensitivity", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { - v.Paths.WhiteList = []string{"/Foo", "/bar"} - v.UseExtendedPaths = false - }) - - spec.Proxy.ListenPath = "/" - }) - - ts.Run(t, []test.TestCase{ - {Path: "/foo", Code: http.StatusForbidden}, - {Path: "/Foo", Code: http.StatusOK}, - {Path: "/bar", Code: http.StatusOK}, - {Path: "/Bar", Code: http.StatusForbidden}, - }...) - }) } func TestBlacklist(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() t.Run("Extended Paths", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + buildAndLoadAPI(func(spec *APISpec) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { json.Unmarshal([]byte(`[ { "path": "/blacklist/literal", @@ -204,8 +194,8 @@ func TestBlacklist(t *testing.T) { }) t.Run("Simple Paths", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + buildAndLoadAPI(func(spec *APISpec) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.Paths.BlackList = []string{"/blacklist/literal", "/blacklist/{id}/test"} v.UseExtendedPaths = false }) @@ -222,32 +212,14 @@ func TestBlacklist(t *testing.T) { {Method: "POST", Path: "/blacklist/literal", Code: http.StatusForbidden}, }...) }) - - t.Run("Case Sensitivity", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { - v.Paths.BlackList = []string{"/Foo", "/bar"} - v.UseExtendedPaths = false - }) - - spec.Proxy.ListenPath = "/" - }) - - ts.Run(t, []test.TestCase{ - {Path: "/foo", Code: http.StatusOK}, - {Path: "/Foo", Code: http.StatusForbidden}, - {Path: "/bar", Code: http.StatusForbidden}, - {Path: "/Bar", Code: http.StatusOK}, - }...) - }) } func TestConflictingPaths(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + buildAndLoadAPI(func(spec *APISpec) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { json.Unmarshal([]byte(`[ { "path": "/metadata/{id}", @@ -271,12 +243,12 @@ func TestConflictingPaths(t *testing.T) { } func TestIgnored(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() t.Run("Extended Paths", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + buildAndLoadAPI(func(spec *APISpec) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { json.Unmarshal([]byte(`[ { "path": "/ignored/literal", @@ -305,8 +277,8 @@ func TestIgnored(t *testing.T) { }) t.Run("Simple Paths", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + buildAndLoadAPI(func(spec *APISpec) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.Paths.Ignored = []string{"/ignored/literal", "/ignored/{id}/test"} v.UseExtendedPaths = false }) @@ -328,15 +300,15 @@ func TestIgnored(t *testing.T) { } func TestWhitelistMethodWithAdditionalMiddleware(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() t.Run("Extended Paths", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = true spec.Proxy.ListenPath = "/" - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.UseExtendedPaths = true json.Unmarshal([]byte(`[ @@ -360,6 +332,7 @@ func TestWhitelistMethodWithAdditionalMiddleware(t *testing.T) { //headers := map[string]string{"foo": "bar"} ts.Run(t, []test.TestCase{ + //Should get original upstream response //{Method: "GET", Path: "/get", Code: http.StatusOK, HeadersMatch: headers}, //Reject not whitelisted (but know by upstream) path @@ -369,7 +342,7 @@ func TestWhitelistMethodWithAdditionalMiddleware(t *testing.T) { } func TestSyncAPISpecsDashboardSuccess(t *testing.T) { - // Test Dashboard + // Mock Dashboard ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/system/apis" { w.Write([]byte(`{"Status": "OK", "Nonce": "1", "Message": [{"api_definition": {}}]}`)) @@ -389,7 +362,7 @@ func TestSyncAPISpecsDashboardSuccess(t *testing.T) { globalConf.DBAppConfOptions.ConnectionString = ts.URL config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() var wg sync.WaitGroup wg.Add(1) @@ -402,7 +375,7 @@ func TestSyncAPISpecsDashboardSuccess(t *testing.T) { handleRedisEvent(msg, handled, wg.Done) // Since we already know that reload is queued - ReloadTick <- time.Time{} + reloadTick <- time.Time{} // Wait for the reload to finish, then check it worked wg.Wait() @@ -445,7 +418,7 @@ func (ln *customListener) Init(addr string) (err error) { return } -func (ln *customListener) Accept() (conn net.Conn, err error) { +func (ln *customListener) Accept() (conn io.ReadWriteCloser, clientAddr string, err error) { c, err := ln.L.Accept() if err != nil { return @@ -472,7 +445,7 @@ func (ln *customListener) Accept() (conn net.Conn, err error) { return } - return c, nil + return c, string(id), nil } func (ln *customListener) Close() error { @@ -480,7 +453,7 @@ func (ln *customListener) Close() error { } func TestDefaultVersion(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() key := testPrepareDefaultVersion() @@ -498,7 +471,7 @@ func TestDefaultVersion(t *testing.T) { func BenchmarkDefaultVersion(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() key := testPrepareDefaultVersion() @@ -519,7 +492,7 @@ func BenchmarkDefaultVersion(b *testing.B) { } func testPrepareDefaultVersion() string { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { v1 := apidef.VersionInfo{Name: "v1"} v1.Name = "v1" v1.Paths.WhiteList = []string{"/foo"} @@ -527,7 +500,7 @@ func testPrepareDefaultVersion() string { v2 := apidef.VersionInfo{Name: "v2"} v2.Paths.WhiteList = []string{"/bar"} - spec.VersionDefinition.Location = urlParamLocation + spec.VersionDefinition.Location = "url-param" spec.VersionDefinition.Key = "v" spec.VersionData.NotVersioned = false @@ -539,7 +512,7 @@ func testPrepareDefaultVersion() string { spec.UseKeylessAccess = false }) - return CreateSession(func(s *user.SessionState) { + return createSession(func(s *user.SessionState) { s.AccessRights = map[string]user.AccessDefinition{"test": { APIID: "test", Versions: []string{"v1", "v2"}, }} @@ -547,7 +520,7 @@ func testPrepareDefaultVersion() string { } func TestGetVersionFromRequest(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() versionInfo := apidef.VersionInfo{} @@ -555,27 +528,25 @@ func TestGetVersionFromRequest(t *testing.T) { versionInfo.Paths.BlackList = []string{"/bar"} t.Run("Header location", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.VersionData.NotVersioned = false - spec.VersionDefinition.Location = headerLocation + spec.VersionDefinition.Location = "header" spec.VersionDefinition.Key = "X-API-Version" spec.VersionData.Versions["v1"] = versionInfo }) - headers := map[string]string{"X-API-Version": "v1"} - ts.Run(t, []test.TestCase{ - {Path: "/foo", Code: http.StatusOK, Headers: headers}, - {Path: "/bar", Code: http.StatusForbidden, Headers: headers}, + {Path: "/foo", Code: http.StatusOK, Headers: map[string]string{"X-API-Version": "v1"}}, + {Path: "/bar", Code: http.StatusForbidden, Headers: map[string]string{"X-API-Version": "v1"}}, }...) }) t.Run("URL param location", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.VersionData.NotVersioned = false - spec.VersionDefinition.Location = urlParamLocation + spec.VersionDefinition.Location = "url-param" spec.VersionDefinition.Key = "version" spec.VersionData.Versions["v2"] = versionInfo }) @@ -587,10 +558,10 @@ func TestGetVersionFromRequest(t *testing.T) { }) t.Run("URL location", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.VersionData.NotVersioned = false - spec.VersionDefinition.Location = urlLocation + spec.VersionDefinition.Location = "url" spec.VersionData.Versions["v3"] = versionInfo }) @@ -602,8 +573,7 @@ func TestGetVersionFromRequest(t *testing.T) { } func BenchmarkGetVersionFromRequest(b *testing.B) { - b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() versionInfo := apidef.VersionInfo{} @@ -612,30 +582,28 @@ func BenchmarkGetVersionFromRequest(b *testing.B) { b.Run("Header location", func(b *testing.B) { b.ReportAllocs() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.VersionData.NotVersioned = false - spec.VersionDefinition.Location = headerLocation + spec.VersionDefinition.Location = "header" spec.VersionDefinition.Key = "X-API-Version" spec.VersionData.Versions["v1"] = versionInfo }) - headers := map[string]string{"X-API-Version": "v1"} - for i := 0; i < b.N; i++ { ts.Run(b, []test.TestCase{ - {Path: "/foo", Code: http.StatusOK, Headers: headers}, - {Path: "/bar", Code: http.StatusForbidden, Headers: headers}, + {Path: "/foo", Code: http.StatusOK, Headers: map[string]string{"X-API-Version": "v1"}}, + {Path: "/bar", Code: http.StatusForbidden, Headers: map[string]string{"X-API-Version": "v1"}}, }...) } }) b.Run("URL param location", func(b *testing.B) { b.ReportAllocs() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.VersionData.NotVersioned = false - spec.VersionDefinition.Location = urlParamLocation + spec.VersionDefinition.Location = "url-param" spec.VersionDefinition.Key = "version" spec.VersionData.Versions["v2"] = versionInfo }) @@ -650,10 +618,10 @@ func BenchmarkGetVersionFromRequest(b *testing.B) { b.Run("URL location", func(b *testing.B) { b.ReportAllocs() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.VersionData.NotVersioned = false - spec.VersionDefinition.Location = urlLocation + spec.VersionDefinition.Location = "url" spec.VersionData.Versions["v3"] = versionInfo }) @@ -667,7 +635,7 @@ func BenchmarkGetVersionFromRequest(b *testing.B) { } func TestSyncAPISpecsDashboardJSONFailure(t *testing.T) { - // Test Dashboard + // Mock Dashboard callNum := 0 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/system/apis" { @@ -694,7 +662,7 @@ func TestSyncAPISpecsDashboardJSONFailure(t *testing.T) { globalConf.DBAppConfOptions.ConnectionString = ts.URL config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() var wg sync.WaitGroup wg.Add(1) @@ -707,7 +675,7 @@ func TestSyncAPISpecsDashboardJSONFailure(t *testing.T) { handleRedisEvent(msg, handled, wg.Done) // Since we already know that reload is queued - ReloadTick <- time.Time{} + reloadTick <- time.Time{} // Wait for the reload to finish, then check it worked wg.Wait() @@ -724,7 +692,7 @@ func TestSyncAPISpecsDashboardJSONFailure(t *testing.T) { handleRedisEvent(msg, handled, wg2.Done) // Since we already know that reload is queued - ReloadTick <- time.Time{} + reloadTick <- time.Time{} // Wait for the reload to finish, then check it worked wg2.Wait() diff --git a/gateway/api_healthcheck.go b/api_healthcheck.go similarity index 94% rename from gateway/api_healthcheck.go rename to api_healthcheck.go index 01e54786d8f1..6c715a0a8f6e 100644 --- a/gateway/api_healthcheck.go +++ b/api_healthcheck.go @@ -1,8 +1,9 @@ -package gateway +package main import ( "strconv" "strings" + "sync" "time" "github.com/TykTechnologies/tyk/config" @@ -38,12 +39,16 @@ type DefaultHealthChecker struct { APIID string } +var healthWarn sync.Once + func (h *DefaultHealthChecker) Init(storeType storage.Handler) { - if !config.Global().HealthCheck.EnableHealthChecks { - return + if config.Global().HealthCheck.EnableHealthChecks { + log.Debug("Health Checker initialised.") + healthWarn.Do(func() { + log.Warning("The Health Checker is deprecated and we do no longer recommend its use.") + }) } - log.Info("Initializing HealthChecker") h.storage = storeType h.storage.Connect() } diff --git a/gateway/api_loader.go b/api_loader.go similarity index 72% rename from gateway/api_loader.go rename to api_loader.go index 4bbe118cc802..ebca4c1a2b64 100644 --- a/gateway/api_loader.go +++ b/api_loader.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "crypto/md5" @@ -19,7 +19,6 @@ import ( "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/coprocess" "github.com/TykTechnologies/tyk/storage" - "github.com/TykTechnologies/tyk/trace" ) type ChainObject struct { @@ -34,16 +33,16 @@ type ChainObject struct { Subrouter *mux.Router } -func prepareStorage() (storage.RedisCluster, storage.RedisCluster, storage.RedisCluster, RPCStorageHandler, RPCStorageHandler) { +func prepareStorage() (storage.RedisCluster, storage.RedisCluster, storage.RedisCluster, *RPCStorageHandler, *RPCStorageHandler) { redisStore := storage.RedisCluster{KeyPrefix: "apikey-", HashKeys: config.Global().HashKeys} redisOrgStore := storage.RedisCluster{KeyPrefix: "orgkey."} healthStore := storage.RedisCluster{KeyPrefix: "apihealth."} - rpcAuthStore := RPCStorageHandler{KeyPrefix: "apikey-", HashKeys: config.Global().HashKeys} - rpcOrgStore := RPCStorageHandler{KeyPrefix: "orgkey."} + rpcAuthStore := RPCStorageHandler{KeyPrefix: "apikey-", HashKeys: config.Global().HashKeys, UserKey: config.Global().SlaveOptions.APIKey, Address: config.Global().SlaveOptions.ConnectionString} + rpcOrgStore := RPCStorageHandler{KeyPrefix: "orgkey.", UserKey: config.Global().SlaveOptions.APIKey, Address: config.Global().SlaveOptions.ConnectionString} FallbackKeySesionManager.Init(&redisStore) - return redisStore, redisOrgStore, healthStore, rpcAuthStore, rpcOrgStore + return redisStore, redisOrgStore, healthStore, &rpcAuthStore, &rpcOrgStore } func skipSpecBecauseInvalid(spec *APISpec, logger *logrus.Entry) bool { @@ -185,10 +184,10 @@ func processSpec(spec *APISpec, apisByListen map[string]int, sessionStore = rpcAuthStore } - // Health checkers are initialised per spec so that each API handler has it's own connection and redis storage pool + // Health checkers are initialised per spec so that each API handler has it's own connection and redis sotorage pool spec.Init(authStore, sessionStore, healthStore, orgStore) - // Set up all the JSVM middleware + //Set up all the JSVM middleware var mwAuthCheckFunc apidef.MiddlewareDefinition mwPreFuncs := []apidef.MiddlewareDefinition{} mwPostFuncs := []apidef.MiddlewareDefinition{} @@ -268,45 +267,85 @@ func processSpec(spec *APISpec, apisByListen map[string]int, cacheStore.Connect() var chain http.Handler - var chainArray []alice.Constructor - var authArray []alice.Constructor if spec.UseKeylessAccess { chainDef.Open = true logger.Info("Checking security policy: Open") - } - handleCORS(&chainArray, spec) + // Add pre-process MW + chainArray := []alice.Constructor{} + handleCORS(&chainArray, spec) - for _, obj := range mwPreFuncs { - if mwDriver == apidef.GoPluginDriver { - mwAppendEnabled( - &chainArray, - &GoPluginMiddleware{ - BaseMiddleware: baseMid, - Path: obj.Path, - SymbolName: obj.Name, - }, - ) - } else if mwDriver != apidef.OttoDriver { - coprocessLog.Debug("Registering coprocess middleware, hook name: ", obj.Name, "hook type: Pre", ", driver: ", mwDriver) - mwAppendEnabled(&chainArray, &CoProcessMiddleware{baseMid, coprocess.HookType_Pre, obj.Name, mwDriver, obj.RawBodyOnly, nil}) - } else { - chainArray = append(chainArray, createDynamicMiddleware(obj.Name, true, obj.RequireSession, baseMid)) + for _, obj := range mwPreFuncs { + if mwDriver != apidef.OttoDriver { + + coprocessLog.Debug("Registering coprocess middleware, hook name: ", obj.Name, "hook type: Pre", ", driver: ", mwDriver) + mwAppendEnabled(&chainArray, &CoProcessMiddleware{baseMid, coprocess.HookType_Pre, obj.Name, mwDriver}) + } else { + chainArray = append(chainArray, createDynamicMiddleware(obj.Name, true, obj.RequireSession, baseMid)) + } + } + + mwAppendEnabled(&chainArray, &RateCheckMW{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &IPWhiteListMiddleware{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &IPBlackListMiddleware{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &CertificateCheckMW{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &OrganizationMonitor{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &RateLimitForAPI{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &ValidateJSON{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &MiddlewareContextVars{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &VersionCheck{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &RequestSizeLimitMiddleware{baseMid}) + mwAppendEnabled(&chainArray, &TrackEndpointMiddleware{baseMid}) + + mwAppendEnabled(&chainArray, &TransformMiddleware{baseMid}) + mwAppendEnabled(&chainArray, &TransformJQMiddleware{baseMid}) + mwAppendEnabled(&chainArray, &TransformHeaders{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &RedisCacheMiddleware{BaseMiddleware: baseMid, CacheStore: cacheStore}) + mwAppendEnabled(&chainArray, &VirtualEndpoint{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &URLRewriteMiddleware{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &TransformMethod{BaseMiddleware: baseMid}) + + for _, obj := range mwPostFuncs { + if mwDriver != apidef.OttoDriver { + + coprocessLog.Debug("Registering coprocess middleware, hook name: ", obj.Name, "hook type: Post", ", driver: ", mwDriver) + mwAppendEnabled(&chainArray, &CoProcessMiddleware{baseMid, coprocess.HookType_Post, obj.Name, mwDriver}) + } else { + chainArray = append(chainArray, createDynamicMiddleware(obj.Name, false, obj.RequireSession, baseMid)) + } } - } - mwAppendEnabled(&chainArray, &RateCheckMW{BaseMiddleware: baseMid}) - mwAppendEnabled(&chainArray, &IPWhiteListMiddleware{BaseMiddleware: baseMid}) - mwAppendEnabled(&chainArray, &IPBlackListMiddleware{BaseMiddleware: baseMid}) - mwAppendEnabled(&chainArray, &CertificateCheckMW{BaseMiddleware: baseMid}) - mwAppendEnabled(&chainArray, &OrganizationMonitor{BaseMiddleware: baseMid}) - mwAppendEnabled(&chainArray, &VersionCheck{BaseMiddleware: baseMid}) - mwAppendEnabled(&chainArray, &RequestSizeLimitMiddleware{baseMid}) - mwAppendEnabled(&chainArray, &MiddlewareContextVars{BaseMiddleware: baseMid}) - mwAppendEnabled(&chainArray, &TrackEndpointMiddleware{baseMid}) + // for KeyLessAccess we can't support rate limiting, versioning or access rules + chain = alice.New(chainArray...).Then(&DummyProxyHandler{SH: SuccessHandler{baseMid}}) + + } else { + var chainArray []alice.Constructor + var authArray []alice.Constructor + + handleCORS(&chainArray, spec) + + // Add pre-process MW + for _, obj := range mwPreFuncs { + if mwDriver != apidef.OttoDriver { + + coprocessLog.Debug("Registering coprocess middleware, hook name: ", obj.Name, "hook type: Pre", ", driver: ", mwDriver) + mwAppendEnabled(&chainArray, &CoProcessMiddleware{baseMid, coprocess.HookType_Pre, obj.Name, mwDriver}) + } else { + chainArray = append(chainArray, createDynamicMiddleware(obj.Name, true, obj.RequireSession, baseMid)) + } + } + + mwAppendEnabled(&chainArray, &RateCheckMW{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &IPWhiteListMiddleware{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &IPBlackListMiddleware{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &CertificateCheckMW{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &OrganizationMonitor{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &VersionCheck{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &RequestSizeLimitMiddleware{baseMid}) + mwAppendEnabled(&chainArray, &MiddlewareContextVars{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &TrackEndpointMiddleware{baseMid}) - if !spec.UseKeylessAccess { // Select the keying method to use for setting session states if mwAppendEnabled(&authArray, &Oauth2KeyExists{baseMid}) { logger.Info("Checking security policy: OAuth") @@ -330,33 +369,22 @@ func processSpec(spec *APISpec, apisByListen map[string]int, coprocessAuth := EnableCoProcess && mwDriver != apidef.OttoDriver && spec.EnableCoProcessAuth ottoAuth := !coprocessAuth && mwDriver == apidef.OttoDriver && spec.EnableCoProcessAuth - gopluginAuth := !coprocessAuth && !ottoAuth && mwDriver == apidef.GoPluginDriver && spec.UseGoPluginAuth if coprocessAuth { // TODO: check if mwAuthCheckFunc is available/valid coprocessLog.Debug("Registering coprocess middleware, hook name: ", mwAuthCheckFunc.Name, "hook type: CustomKeyCheck", ", driver: ", mwDriver) newExtractor(spec, baseMid) - mwAppendEnabled(&authArray, &CoProcessMiddleware{baseMid, coprocess.HookType_CustomKeyCheck, mwAuthCheckFunc.Name, mwDriver, mwAuthCheckFunc.RawBodyOnly, nil}) + mwAppendEnabled(&authArray, &CoProcessMiddleware{baseMid, coprocess.HookType_CustomKeyCheck, mwAuthCheckFunc.Name, mwDriver}) } if ottoAuth { + logger.Info("----> Checking security policy: JS Plugin") authArray = append(authArray, createDynamicMiddleware(mwAuthCheckFunc.Name, true, false, baseMid)) } - if gopluginAuth { - mwAppendEnabled( - &authArray, - &GoPluginMiddleware{ - BaseMiddleware: baseMid, - Path: mwAuthCheckFunc.Path, - SymbolName: mwAuthCheckFunc.Name, - }, - ) - } - if spec.UseStandardAuth || len(authArray) == 0 { logger.Info("Checking security policy: Token") authArray = append(authArray, createMiddleware(&AuthKey{baseMid})) @@ -365,59 +393,39 @@ func processSpec(spec *APISpec, apisByListen map[string]int, chainArray = append(chainArray, authArray...) for _, obj := range mwPostAuthCheckFuncs { - if mwDriver == apidef.GoPluginDriver { - mwAppendEnabled( - &chainArray, - &GoPluginMiddleware{ - BaseMiddleware: baseMid, - Path: obj.Path, - SymbolName: obj.Name, - }, - ) - } else { - coprocessLog.Debug("Registering coprocess middleware, hook name: ", obj.Name, "hook type: Pre", ", driver: ", mwDriver) - mwAppendEnabled(&chainArray, &CoProcessMiddleware{baseMid, coprocess.HookType_PostKeyAuth, obj.Name, mwDriver, obj.RawBodyOnly, nil}) - } + + coprocessLog.Debug("Registering coprocess middleware, hook name: ", obj.Name, "hook type: Pre", ", driver: ", mwDriver) + mwAppendEnabled(&chainArray, &CoProcessMiddleware{baseMid, coprocess.HookType_PostKeyAuth, obj.Name, mwDriver}) } mwAppendEnabled(&chainArray, &StripAuth{baseMid}) mwAppendEnabled(&chainArray, &KeyExpired{baseMid}) mwAppendEnabled(&chainArray, &AccessRightsCheck{baseMid}) - mwAppendEnabled(&chainArray, &GranularAccessMiddleware{baseMid}) mwAppendEnabled(&chainArray, &RateLimitAndQuotaCheck{baseMid}) - } - - mwAppendEnabled(&chainArray, &RateLimitForAPI{BaseMiddleware: baseMid}) - mwAppendEnabled(&chainArray, &ValidateJSON{BaseMiddleware: baseMid}) - mwAppendEnabled(&chainArray, &TransformMiddleware{baseMid}) - mwAppendEnabled(&chainArray, &TransformJQMiddleware{baseMid}) - mwAppendEnabled(&chainArray, &TransformHeaders{BaseMiddleware: baseMid}) - mwAppendEnabled(&chainArray, &URLRewriteMiddleware{BaseMiddleware: baseMid}) - mwAppendEnabled(&chainArray, &TransformMethod{BaseMiddleware: baseMid}) - mwAppendEnabled(&chainArray, &RedisCacheMiddleware{BaseMiddleware: baseMid, CacheStore: &cacheStore}) - mwAppendEnabled(&chainArray, &VirtualEndpoint{BaseMiddleware: baseMid}) - - for _, obj := range mwPostFuncs { - if mwDriver == apidef.GoPluginDriver { - mwAppendEnabled( - &chainArray, - &GoPluginMiddleware{ - BaseMiddleware: baseMid, - Path: obj.Path, - SymbolName: obj.Name, - }, - ) - } else if mwDriver != apidef.OttoDriver { - coprocessLog.Debug("Registering coprocess middleware, hook name: ", obj.Name, "hook type: Post", ", driver: ", mwDriver) - mwAppendEnabled(&chainArray, &CoProcessMiddleware{baseMid, coprocess.HookType_Post, obj.Name, mwDriver, obj.RawBodyOnly, nil}) - } else { - chainArray = append(chainArray, createDynamicMiddleware(obj.Name, false, obj.RequireSession, baseMid)) + mwAppendEnabled(&chainArray, &RateLimitForAPI{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &GranularAccessMiddleware{baseMid}) + mwAppendEnabled(&chainArray, &ValidateJSON{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &TransformMiddleware{baseMid}) + mwAppendEnabled(&chainArray, &TransformJQMiddleware{baseMid}) + mwAppendEnabled(&chainArray, &TransformHeaders{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &URLRewriteMiddleware{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &RedisCacheMiddleware{BaseMiddleware: baseMid, CacheStore: cacheStore}) + mwAppendEnabled(&chainArray, &TransformMethod{BaseMiddleware: baseMid}) + mwAppendEnabled(&chainArray, &VirtualEndpoint{BaseMiddleware: baseMid}) + + for _, obj := range mwPostFuncs { + if mwDriver != apidef.OttoDriver { + + coprocessLog.Debug("Registering coprocess middleware, hook name: ", obj.Name, "hook type: Post", ", driver: ", mwDriver) + mwAppendEnabled(&chainArray, &CoProcessMiddleware{baseMid, coprocess.HookType_Post, obj.Name, mwDriver}) + } else { + chainArray = append(chainArray, createDynamicMiddleware(obj.Name, false, obj.RequireSession, baseMid)) + } } - } - chain = alice.New(chainArray...).Then(&DummyProxyHandler{SH: SuccessHandler{baseMid}}) + // Use createMiddleware(&ModifiedMiddleware{baseMid}) to run custom middleware + chain = alice.New(chainArray...).Then(&DummyProxyHandler{SH: SuccessHandler{baseMid}}) - if !spec.UseKeylessAccess { var simpleArray []alice.Constructor mwAppendEnabled(&simpleArray, &IPWhiteListMiddleware{baseMid}) mwAppendEnabled(&simpleArray, &IPBlackListMiddleware{BaseMiddleware: baseMid}) @@ -438,11 +446,7 @@ func processSpec(spec *APISpec, apisByListen map[string]int, logger.Debug("Setting Listen Path: ", spec.Proxy.ListenPath) - if trace.IsEnabled() { - chainDef.ThisHandler = trace.Handle(spec.Name, chain) - } else { - chainDef.ThisHandler = chain - } + chainDef.ThisHandler = chain chainDef.ListenOn = spec.Proxy.ListenPath + "{rest:.*}" chainDef.Domain = spec.Domain @@ -484,7 +488,7 @@ func (d *DummyProxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if found, err := isLoop(r); found { if err != nil { handler := ErrorHandler{*d.SH.Base()} - handler.HandleError(w, r, err.Error(), http.StatusInternalServerError, true) + handler.HandleError(w, r, err.Error(), http.StatusInternalServerError) return } @@ -503,7 +507,7 @@ func (d *DummyProxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { handler = targetAPI.middlewareChain } else { handler := ErrorHandler{*d.SH.Base()} - handler.HandleError(w, r, "Can't detect loop target", http.StatusInternalServerError, true) + handler.HandleError(w, r, "Can't detect loop target", http.StatusInternalServerError) return } } @@ -589,6 +593,8 @@ func loadApps(specs []*APISpec, muxer *mux.Router) { return len(specs[i].Proxy.ListenPath) > len(specs[j].Proxy.ListenPath) }) + chainChannel := make(chan *ChainObject) + // Create a new handler for each API spec loadList := make([]*ChainObject, len(specs)) apisByListen := countApisByListenHash(specs) @@ -603,12 +609,6 @@ func loadApps(specs []*APISpec, muxer *mux.Router) { for _, spec := range specs { hosts = append(hosts, spec.Domain) } - - if trace.IsEnabled() { - for _, spec := range specs { - trace.AddTracer(spec.Name) - } - } // Decreasing sort by length and chars, so that the order of // creation of the host sub-routers is deterministic and // consistent with the order of the paths. @@ -631,23 +631,32 @@ func loadApps(specs []*APISpec, muxer *mux.Router) { } for i, spec := range specs { - subrouter := hostRouters[spec.Domain] - if subrouter == nil { - mainLog.WithFields(logrus.Fields{ - "domain": spec.Domain, - "api_id": spec.APIID, - }).Warning("Trying to load API with Domain when custom domains are disabled.") - subrouter = muxer - } + go func(spec *APISpec, i int) { + subrouter := hostRouters[spec.Domain] + if subrouter == nil { + mainLog.WithFields(logrus.Fields{ + "domain": spec.Domain, + "api_id": spec.APIID, + }).Warning("Trying to load API with Domain when custom domains are disabled.") + subrouter = muxer + } - chainObj := processSpec(spec, apisByListen, &redisStore, &redisOrgStore, &healthStore, &rpcAuthStore, &rpcOrgStore, subrouter, logrus.NewEntry(log)) - apisMu.Lock() - spec.middlewareChain = chainObj.ThisHandler - apisMu.Unlock() + chainObj := processSpec(spec, apisByListen, redisStore, redisOrgStore, healthStore, rpcAuthStore, rpcOrgStore, subrouter, logrus.NewEntry(log)) + + chainObj.Index = i + chainChannel <- chainObj + apisMu.Lock() + spec.middlewareChain = chainObj.ThisHandler + apisMu.Unlock() + }(spec, i) // TODO: This will not deal with skipped APis well tmpSpecRegister[spec.APIID] = spec - loadList[i] = chainObj + } + + for range specs { + chObj := <-chainChannel + loadList[chObj.Index] = chObj } for _, chainObj := range loadList { diff --git a/gateway/api_test.go b/api_test.go similarity index 89% rename from gateway/api_test.go rename to api_test.go index e7b328cb21e9..2a9d29cf8849 100644 --- a/gateway/api_test.go +++ b/api_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/json" @@ -11,7 +11,7 @@ import ( "github.com/garyburd/redigo/redis" "github.com/gorilla/mux" - uuid "github.com/satori/go.uuid" + "github.com/satori/go.uuid" "fmt" @@ -41,7 +41,7 @@ const apiTestDef = `{ }` func loadSampleAPI(t *testing.T, def string) { - spec := CreateSpecTest(t, def) + spec := createSpecTest(t, def) loadApps([]*APISpec{spec}, discardMuxer) } @@ -54,12 +54,12 @@ func TestHealthCheckEndpoint(t *testing.T) { globalConf := config.Global() globalConf.HealthCheck.EnableHealthChecks = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI() + buildAndLoadAPI() ts.Run(t, []test.TestCase{ {Path: "/tyk/health/?api_id=test", AdminAuth: true, Code: 200}, @@ -83,11 +83,11 @@ func TestApiHandlerPostDupPath(t *testing.T) { t.Run("Sequentieal order", func(t *testing.T) { // Load initial API - BuildAndLoadAPI( + buildAndLoadAPI( func(spec *APISpec) { spec.APIID = "1" }, ) - BuildAndLoadAPI( + buildAndLoadAPI( func(spec *APISpec) { spec.APIID = "1" }, func(spec *APISpec) { spec.APIID = "2" }, func(spec *APISpec) { spec.APIID = "3" }, @@ -102,7 +102,7 @@ func TestApiHandlerPostDupPath(t *testing.T) { }) t.Run("Should re-order", func(t *testing.T) { - BuildAndLoadAPI( + buildAndLoadAPI( func(spec *APISpec) { spec.APIID = "2" }, func(spec *APISpec) { spec.APIID = "3" }, ) @@ -114,7 +114,7 @@ func TestApiHandlerPostDupPath(t *testing.T) { }) t.Run("Restore original order", func(t *testing.T) { - BuildAndLoadAPI( + buildAndLoadAPI( func(spec *APISpec) { spec.APIID = "1" }, func(spec *APISpec) { spec.APIID = "2" }, func(spec *APISpec) { spec.APIID = "3" }, @@ -130,20 +130,20 @@ func TestApiHandlerPostDupPath(t *testing.T) { } func TestKeyHandler(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.Auth.UseParam = true }) // Access right not specified - masterKey := CreateStandardSession() + masterKey := createStandardSession() masterKeyJSON, _ := json.Marshal(masterKey) // with access - withAccess := CreateStandardSession() + withAccess := createStandardSession() withAccess.AccessRights = map[string]user.AccessDefinition{"test": { APIID: "test", Versions: []string{"v1"}, }} @@ -159,14 +159,14 @@ func TestKeyHandler(t *testing.T) { }}, } policiesMu.Unlock() - withPolicy := CreateStandardSession() + withPolicy := createStandardSession() withPolicy.ApplyPolicies = []string{ "abc_policy", } withPolicyJSON, _ := json.Marshal(withPolicy) // with invalid policy - withBadPolicy := CreateStandardSession() + withBadPolicy := createStandardSession() withBadPolicy.AccessRights = map[string]user.AccessDefinition{"test": { APIID: "test", Versions: []string{"v1"}, }} @@ -233,7 +233,7 @@ func TestKeyHandler(t *testing.T) { }...) }) - knownKey := CreateSession() + knownKey := createSession() t.Run("Get key", func(t *testing.T) { ts.Run(t, []test.TestCase{ @@ -270,64 +270,6 @@ func TestKeyHandler(t *testing.T) { }) } -func TestKeyHandler_UpdateKey(t *testing.T) { - const testAPIID = "testAPIID" - - ts := StartTest() - defer ts.Close() - - BuildAndLoadAPI(func(spec *APISpec) { - spec.APIID = testAPIID - spec.UseKeylessAccess = false - spec.Auth.UseParam = true - }) - - pID := CreatePolicy(func(p *user.Policy) { - p.Partitions.RateLimit = true - }) - - pID2 := CreatePolicy(func(p *user.Policy) { - p.Partitions.Quota = true - }) - - session, key := ts.CreateSession(func(s *user.SessionState) { - s.ApplyPolicies = []string{pID} - s.AccessRights = map[string]user.AccessDefinition{testAPIID: { - APIID: testAPIID, Versions: []string{"v1"}, - }} - }) - - t.Run("Add policy not enforcing acl", func(t *testing.T) { - session.ApplyPolicies = append(session.ApplyPolicies, pID2) - sessionData, _ := json.Marshal(session) - path := fmt.Sprintf("/tyk/keys/%s", key) - - _, _ = ts.Run(t, []test.TestCase{ - {Method: http.MethodPut, Path: path, Data: sessionData, AdminAuth: true, Code: 200}, - }...) - - sessionState, found := FallbackKeySesionManager.SessionDetail(key, false) - if !found || sessionState.AccessRights[testAPIID].APIID != testAPIID || len(sessionState.ApplyPolicies) != 2 { - t.Fatal("Adding policy to the list failed") - } - }) - - t.Run("Remove policy not enforcing acl", func(t *testing.T) { - session.ApplyPolicies = []string{} - sessionData, _ := json.Marshal(session) - path := fmt.Sprintf("/tyk/keys/%s", key) - - _, _ = ts.Run(t, []test.TestCase{ - {Method: http.MethodPut, Path: path, Data: sessionData, AdminAuth: true, Code: 200}, - }...) - - sessionState, found := FallbackKeySesionManager.SessionDetail(key, false) - if !found || sessionState.AccessRights[testAPIID].APIID != testAPIID || len(sessionState.ApplyPolicies) != 0 { - t.Fatal("Removing policy from the list failed") - } - }) -} - func TestHashKeyHandler(t *testing.T) { globalConf := config.Global() // make it to use hashes for Redis keys @@ -335,7 +277,7 @@ func TestHashKeyHandler(t *testing.T) { // enable hashed keys listing globalConf.EnableHashedKeysListing = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() hashTests := []struct { hashFunction string @@ -371,9 +313,9 @@ func TestHashKeyHandlerLegacyWithHashFunc(t *testing.T) { // settings to create BA session with legacy key format globalConf.HashKeyFunction = "" config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // create session with legacy key format @@ -417,12 +359,12 @@ func TestHashKeyHandlerLegacyWithHashFunc(t *testing.T) { } func testHashKeyHandlerHelper(t *testing.T, expectedHashSize int) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI() + buildAndLoadAPI() - withAccess := CreateStandardSession() + withAccess := createStandardSession() withAccess.AccessRights = map[string]user.AccessDefinition{"test": { APIID: "test", Versions: []string{"v1"}, }} @@ -543,7 +485,7 @@ func testHashKeyHandlerHelper(t *testing.T, expectedHashSize int) { } func testHashFuncAndBAHelper(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() session := testPrepareBasicAuth(false) @@ -580,14 +522,14 @@ func TestHashKeyListingDisabled(t *testing.T) { globalConf.EnableHashedKeysListing = false config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI() + buildAndLoadAPI() - withAccess := CreateStandardSession() + withAccess := createStandardSession() withAccess.AccessRights = map[string]user.AccessDefinition{"test": { APIID: "test", Versions: []string{"v1"}, }} @@ -698,14 +640,14 @@ func TestHashKeyHandlerHashingDisabled(t *testing.T) { globalConf.HashKeys = false config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI() + buildAndLoadAPI() - withAccess := CreateStandardSession() + withAccess := createStandardSession() withAccess.AccessRights = map[string]user.AccessDefinition{"test": { APIID: "test", Versions: []string{"v1"}, }} @@ -780,10 +722,10 @@ func TestHashKeyHandlerHashingDisabled(t *testing.T) { } func TestInvalidateCache(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI() + buildAndLoadAPI() ts.Run(t, []test.TestCase{ {Method: "DELETE", Path: "/tyk/cache/test", AdminAuth: true, Code: 200}, @@ -792,10 +734,10 @@ func TestInvalidateCache(t *testing.T) { } func TestGetOAuthClients(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseOauth2 = true }) @@ -816,10 +758,10 @@ func TestGetOAuthClients(t *testing.T) { } func TestCreateOAuthClient(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI( + buildAndLoadAPI( func(spec *APISpec) { spec.UseOauth2 = true }, @@ -829,7 +771,7 @@ func TestCreateOAuthClient(t *testing.T) { }, ) - CreatePolicy(func(p *user.Policy) { + createPolicy(func(p *user.Policy) { p.ID = "p1" p.AccessRights = map[string]user.AccessDefinition{ "test": { @@ -837,7 +779,7 @@ func TestCreateOAuthClient(t *testing.T) { }, } }) - CreatePolicy(func(p *user.Policy) { + createPolicy(func(p *user.Policy) { p.ID = "p2" p.AccessRights = map[string]user.AccessDefinition{ "test": { @@ -967,7 +909,7 @@ func TestGroupResetHandler(t *testing.T) { // If we don't wait for the subscription to be done, we might do // the reload before pub/sub is in place to receive our message. <-didSubscribe - req := withAuth(TestReq(t, "GET", uri, nil)) + req := withAuth(testReq(t, "GET", uri, nil)) mainRouter.ServeHTTP(recorder, req) @@ -992,7 +934,7 @@ func TestHotReloadSingle(t *testing.T) { var wg sync.WaitGroup wg.Add(1) reloadURLStructure(wg.Done) - ReloadTick <- time.Time{} + reloadTick <- time.Time{} wg.Wait() if mainRouter == oldRouter { t.Fatal("router wasn't swapped") @@ -1010,7 +952,7 @@ func TestHotReloadMany(t *testing.T) { reloadURLStructure(wg.Done) } // pick it up and finish it - ReloadTick <- time.Time{} + reloadTick <- time.Time{} wg.Wait() // 5 reloads, but this time slower - the reload worker has time @@ -1019,7 +961,7 @@ func TestHotReloadMany(t *testing.T) { wg.Add(1) reloadURLStructure(wg.Done) // pick it up and finish it - ReloadTick <- time.Time{} + reloadTick <- time.Time{} wg.Wait() } } @@ -1030,7 +972,7 @@ func BenchmarkApiReload(b *testing.B) { specs := make([]*APISpec, 100) for i := 0; i < 100; i++ { - specs[i] = BuildAndLoadAPI(func(spec *APISpec) { + specs[i] = buildAndLoadAPI(func(spec *APISpec) { spec.APIID = strconv.Itoa(i + 1) })[0] } @@ -1082,7 +1024,7 @@ func TestApiLoaderLongestPathFirst(t *testing.T) { globalConf.EnableCustomDomains = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() type hostAndPath struct { host, path string @@ -1102,16 +1044,16 @@ func TestApiLoaderLongestPathFirst(t *testing.T) { var apis []*APISpec for hp := range inputs { - apis = append(apis, BuildAPI(func(spec *APISpec) { + apis = append(apis, buildAPI(func(spec *APISpec) { spec.APIID = uuid.NewV4().String() spec.Domain = hp.host spec.Proxy.ListenPath = "/" + hp.path })[0]) } - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - LoadAPI(apis...) + loadAPI(apis...) var testCases []test.TestCase diff --git a/apidef/api_definitions.go b/apidef/api_definitions.go index 41a79db339c6..65489c227d7e 100644 --- a/apidef/api_definitions.go +++ b/apidef/api_definitions.go @@ -42,11 +42,10 @@ const ( RequestXML RequestInputType = "xml" RequestJSON RequestInputType = "json" - OttoDriver MiddlewareDriver = "otto" - PythonDriver MiddlewareDriver = "python" - LuaDriver MiddlewareDriver = "lua" - GrpcDriver MiddlewareDriver = "grpc" - GoPluginDriver MiddlewareDriver = "goplugin" + OttoDriver MiddlewareDriver = "otto" + PythonDriver MiddlewareDriver = "python" + LuaDriver MiddlewareDriver = "lua" + GrpcDriver MiddlewareDriver = "grpc" BodySource IdExtractorSource = "body" HeaderSource IdExtractorSource = "header" @@ -84,12 +83,6 @@ type EndPointMeta struct { MethodActions map[string]EndpointMethodMeta `bson:"method_actions" json:"method_actions"` } -type CacheMeta struct { - Method string `bson:"method" json:"method"` - Path string `bson:"path" json:"path"` - CacheKeyRegex string `bson:"cache_key_regex" json:"cache_key_regex"` -} - type RequestInputType string type TemplateData struct { @@ -151,7 +144,6 @@ type CircuitBreakerMeta struct { type StringRegexMap struct { MatchPattern string `bson:"match_rx" json:"match_rx"` - Reverse bool `bson:"reverse" json:"reverse"` matchRegex *regexp.Regexp } @@ -176,7 +168,7 @@ type URLRewriteMeta struct { MatchPattern string `bson:"match_pattern" json:"match_pattern"` RewriteTo string `bson:"rewrite_to" json:"rewrite_to"` Triggers []RoutingTrigger `bson:"triggers" json:"triggers"` - MatchRegexp *regexp.Regexp `json:"-"` + MatchRegexp *regexp.Regexp } type VirtualMeta struct { @@ -210,7 +202,6 @@ type ExtendedPathsSet struct { WhiteList []EndPointMeta `bson:"white_list" json:"white_list,omitempty"` BlackList []EndPointMeta `bson:"black_list" json:"black_list,omitempty"` Cached []string `bson:"cache" json:"cache,omitempty"` - AdvanceCacheConfig []CacheMeta `bson:"advance_cache_config" json:"advance_cache_config,omitempty"` Transform []TemplateMeta `bson:"transform" json:"transform,omitempty"` TransformResponse []TemplateMeta `bson:"transform_response" json:"transform_response,omitempty"` TransformJQ []TransformJQMeta `bson:"transform_jq" json:"transform_jq,omitempty"` @@ -271,7 +262,6 @@ type MiddlewareDefinition struct { Name string `bson:"name" json:"name"` Path string `bson:"path" json:"path"` RequireSession bool `bson:"require_session" json:"require_session"` - RawBodyOnly bool `bson:"raw_body_only" json:"raw_body_only"` } type MiddlewareIdExtractor struct { @@ -336,8 +326,6 @@ type OpenIDOptions struct { } // APIDefinition represents the configuration for a single proxied API and it's versions. -// -// swagger:model type APIDefinition struct { Id bson.ObjectId `bson:"_id,omitempty" json:"id,omitempty"` Name string `bson:"name" json:"name"` @@ -368,7 +356,6 @@ type APIDefinition struct { PinnedPublicKeys map[string]string `bson:"pinned_public_keys" json:"pinned_public_keys"` EnableJWT bool `bson:"enable_jwt" json:"enable_jwt"` UseStandardAuth bool `bson:"use_standard_auth" json:"use_standard_auth"` - UseGoPluginAuth bool `bson:"use_go_plugin_auth" json:"use_go_plugin_auth"` EnableCoProcessAuth bool `bson:"enable_coprocess_auth" json:"enable_coprocess_auth"` JWTSigningMethod string `bson:"jwt_signing_method" json:"jwt_signing_method"` JWTSource string `bson:"jwt_source" json:"jwt_source"` @@ -380,8 +367,6 @@ type APIDefinition struct { JWTExpiresAtValidationSkew uint64 `bson:"jwt_expires_at_validation_skew" json:"jwt_expires_at_validation_skew"` JWTNotBeforeValidationSkew uint64 `bson:"jwt_not_before_validation_skew" json:"jwt_not_before_validation_skew"` JWTSkipKid bool `bson:"jwt_skip_kid" json:"jwt_skip_kid"` - JWTScopeToPolicyMapping map[string]string `bson:"jwt_scope_to_policy_mapping" json:"jwt_scope_to_policy_mapping"` - JWTScopeClaimName string `bson:"jwt_scope_claim_name" json:"jwt_scope_claim_name"` NotificationsDetails NotificationsManager `bson:"notifications" json:"notifications"` EnableSignatureChecking bool `bson:"enable_signature_checking" json:"enable_signature_checking"` HmacAllowedClockSkew float64 `bson:"hmac_allowed_clock_skew" json:"hmac_allowed_clock_skew"` @@ -582,48 +567,25 @@ func (a *APIDefinition) DecodeFromDB() { } } -func (s *StringRegexMap) Check(value string) (match string) { - if s.matchRegex == nil { - return - } - +func (s *StringRegexMap) Check(value string) string { return s.matchRegex.FindString(value) } -func (s *StringRegexMap) FindStringSubmatch(value string) (matched bool, match []string) { - if s.matchRegex == nil { - return - } - - match = s.matchRegex.FindStringSubmatch(value) - if !s.Reverse { - matched = len(match) > 0 - } else { - matched = len(match) == 0 - } - - return +func (s *StringRegexMap) FindStringSubmatch(value string) []string { + return s.matchRegex.FindStringSubmatch(value) } -func (s *StringRegexMap) FindAllStringSubmatch(value string, n int) (matched bool, matches [][]string) { - matches = s.matchRegex.FindAllStringSubmatch(value, n) - if !s.Reverse { - matched = len(matches) > 0 - } else { - matched = len(matches) == 0 - } - - return +func (s *StringRegexMap) FindAllStringSubmatch(value string, n int) [][]string { + return s.matchRegex.FindAllStringSubmatch(value, n) } func (s *StringRegexMap) Init() error { var err error if s.matchRegex, err = regexp.Compile(s.MatchPattern); err != nil { log.WithError(err).WithField("MatchPattern", s.MatchPattern). - Error("Could not compile matchRegex for StringRegexMap") + Error("Could not compile regexp for StringRegexMap") return err } - return nil } @@ -734,17 +696,16 @@ func DummyAPI() APIDefinition { } return APIDefinition{ - VersionData: versionData, - ConfigData: map[string]interface{}{}, - AllowedIPs: []string{}, - PinnedPublicKeys: map[string]string{}, - ResponseProcessors: []ResponseProcessor{}, - ClientCertificates: []string{}, - BlacklistedIPs: []string{}, - TagHeaders: []string{}, - UpstreamCertificates: map[string]string{}, - JWTScopeToPolicyMapping: map[string]string{}, - HmacAllowedAlgorithms: []string{}, + VersionData: versionData, + ConfigData: map[string]interface{}{}, + AllowedIPs: []string{}, + PinnedPublicKeys: map[string]string{}, + ResponseProcessors: []ResponseProcessor{}, + ClientCertificates: []string{}, + BlacklistedIPs: []string{}, + TagHeaders: []string{}, + UpstreamCertificates: map[string]string{}, + HmacAllowedAlgorithms: []string{}, CustomMiddleware: MiddlewareSection{ Post: []MiddlewareDefinition{}, Pre: []MiddlewareDefinition{}, diff --git a/apidef/importer/blueprint.go b/apidef/importer/blueprint.go index 3cdd89179938..b60ede98753d 100644 --- a/apidef/importer/blueprint.go +++ b/apidef/importer/blueprint.go @@ -109,7 +109,7 @@ func (b *BluePrintAST) ConvertIntoApiVersion(asMock bool) (apidef.VersionInfo, e for _, resourceGroup := range b.ResourceGroups { if len(resourceGroup.Resources) < 1 { - return versionInfo, errors.New("no resources defined in the resource group") + return versionInfo, errors.New("no resourcs defined in the resource group") } for _, resource := range resourceGroup.Resources { @@ -124,7 +124,7 @@ func (b *BluePrintAST) ConvertIntoApiVersion(asMock bool) (apidef.VersionInfo, e endPointMethodMeta := apidef.EndpointMethodMeta{} code, err := strconv.Atoi(action.Examples[0].Responses[0].Name) if err != nil { - log.Warning("Could not generate response code from Name field, using 200") + log.Warning("Could not genrate response code form Name field, using 200") code = 200 } endPointMethodMeta.Code = code diff --git a/apidef/importer/importer.go b/apidef/importer/importer.go index 675616c6de6c..d6d178344322 100644 --- a/apidef/importer/importer.go +++ b/apidef/importer/importer.go @@ -26,8 +26,6 @@ func GetImporterForSource(source APIImporterSource) (APIImporter, error) { return &BluePrintAST{}, nil case SwaggerSource: return &SwaggerAST{}, nil - case WSDLSource: - return &WSDLDef{}, nil default: return nil, errors.New("source not matched, failing") } diff --git a/apidef/importer/swagger.go b/apidef/importer/swagger.go index 7c2773046b14..56ba0e9ed06c 100644 --- a/apidef/importer/swagger.go +++ b/apidef/importer/swagger.go @@ -6,7 +6,7 @@ import ( "io" "strings" - uuid "github.com/satori/go.uuid" + "github.com/satori/go.uuid" "github.com/TykTechnologies/tyk/apidef" ) diff --git a/apidef/importer/wsdl.go b/apidef/importer/wsdl.go deleted file mode 100644 index 4391e702ff81..000000000000 --- a/apidef/importer/wsdl.go +++ /dev/null @@ -1,445 +0,0 @@ -package importer - -import ( - "encoding/xml" - "errors" - "io" - "net/http" - "strings" - - "github.com/TykTechnologies/tyk/apidef" - uuid "github.com/satori/go.uuid" -) - -const WSDLSource APIImporterSource = "wsdl" - -var portName = map[string]string{} -var bindingList = map[string]*WSDLBinding{} - -func (*WSDLDef) SetServicePortMapping(input map[string]string) { - for k, v := range input { - portName[k] = v - } -} - -const ( - NS_WSDL20 = "http://www.w3.org/ns/wsdl" - NS_WSDL = "http://schemas.xmlsoap.org/wsdl/" - NS_SOAP = "http://schemas.xmlsoap.org/wsdl/soap/" - NS_SOAP12 = "http://schemas.xmlsoap.org/wsdl/soap12/" - NS_HTTP = "http://schemas.xmlsoap.org/wsdl/http/" -) - -const ( - PROT_HTTP = "http" - PROT_SOAP = "soap" - PROT_SOAP_12 = "soap12" -) - -type WSDLDef struct { - Definition WSDL `xml:"http://schemas.xmlsoap.org/wsdl/ definitions"` -} - -type WSDL struct { - Services []*WSDLService `xml:"http://schemas.xmlsoap.org/wsdl/ service"` - Bindings []*WSDLBinding `xml:"http://schemas.xmlsoap.org/wsdl/ binding"` -} - -type WSDLService struct { - Name string `xml:"name,attr"` - Ports []*WSDLPort `xml:"http://schemas.xmlsoap.org/wsdl/ port"` -} - -type WSDLPort struct { - Name string `xml:"name,attr"` - Binding string `xml:"binding,attr"` - Address WSDLAddress `xml:"address"` -} - -type WSDLAddress struct { - Location string `xml:"location,attr"` -} - -type WSDLBinding struct { - Name string `xml:"name,attr"` - Operations []*WSDLOperation `xml:"http://schemas.xmlsoap.org/wsdl/ operation"` - Protocol string - Method string - isSupportedProtocol bool -} - -type WSDLOperation struct { - Name string `xml:"name,attr"` - Endpoint string - IsUrlReplacement bool -} - -func (def *WSDLDef) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - if start.Name.Space == NS_WSDL20 { - return errors.New("WSDL 2.0 is not supported") - } else if start.Name.Space == NS_WSDL && start.Name.Local == "definitions" { - return d.DecodeElement(&def.Definition, &start) - } else { - return errors.New("Invalid WSDL file. WSDL definition must start contain element") - } -} - -func (b *WSDLBinding) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - //Get value of name attribute - for _, attr := range start.Attr { - if attr.Name.Local == "name" { - b.Name = attr.Value - break - } - } - - if b.Name == "" { - return errors.New("Binding name is empty. Malformed wsdl") - } - - //Fetch protocol specific data - //If soap/soap12 is used, set Method to POST - //If http is used, get value of verb attribute - //If any other protocol is used, then skip - for { - tok, err := d.Token() - if err != nil { - log.Error("Error will parsing WSDL file: ", err) - return err - } - - switch t := tok.(type) { - case xml.StartElement: - { - switch t.Name.Local { - case "binding": - { - switch t.Name.Space { - case NS_SOAP, NS_SOAP12: - { - b.isSupportedProtocol = true - if t.Name.Space == NS_SOAP { - b.Protocol = PROT_SOAP - } else { - b.Protocol = PROT_SOAP_12 - } - - //Get transport protocol - //TODO if transport protocol is different from http - var transport string - for _, attr := range t.Attr { - if attr.Name.Local == "transport" { - transport = attr.Value - break - } - } - parts := strings.Split(transport, "/") - if parts[len(parts)-1] == "http" { - b.Method = http.MethodPost - } else { - b.isSupportedProtocol = false - } - - } - case NS_HTTP: - { - b.isSupportedProtocol = true - b.Protocol = PROT_HTTP - for _, attr := range t.Attr { - if attr.Name.Local == "verb" { - b.Method = attr.Value - break - } - } - - } - default: - { - log.Debugf("Unsupported binding protocol is used %s:%s", t.Name.Space, t.Name.Local) - b.isSupportedProtocol = false - return nil - } - } - } - case "operation": - { - if t.Name.Space == NS_WSDL && b.isSupportedProtocol { - op := new(WSDLOperation) - if err := d.DecodeElement(op, &t); err != nil { - return err - } - b.Operations = append(b.Operations, op) - } - } - default: - { - if err := d.Skip(); err != nil { - return err - } - } - } - } - case xml.EndElement: - { - if t.Name.Space == NS_WSDL && t.Name.Local == "binding" { - bindingList[b.Name] = b - return nil - } - } - } - } -} - -func (op *WSDLOperation) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - for _, attr := range start.Attr { - if attr.Name.Local == "name" { - op.Name = attr.Value - break - } - } - - if op.Name == "" { - return errors.New("Operation name is empty. Malformed wsdl") - } - - var protocol string - - for { - tok, err := d.Token() - if err != nil { - return err - } - - switch t := tok.(type) { - case xml.StartElement: - { - if t.Name.Local == "operation" { - switch t.Name.Space { - case NS_SOAP, NS_SOAP12: - { - protocol = PROT_SOAP - break - } - case NS_HTTP: - { - protocol = PROT_HTTP - for _, attr := range t.Attr { - if attr.Name.Local == "location" { - op.Endpoint = attr.Value - break - } - } - break - } - default: - { - if err := d.Skip(); err != nil { - return err - } - } - - } - } - - if protocol == PROT_HTTP { - if t.Name.Local == "urlReplacement" { - op.IsUrlReplacement = true - endpoint := op.Endpoint - tmp := strings.Replace(endpoint, "(", "{", -1) - new_endpoint := strings.Replace(tmp, ")", "}", -1) - - op.Endpoint = new_endpoint - - } - } else { - if err := d.Skip(); err != nil { - return err - } - } - } - case xml.EndElement: - { - if t.Name.Space == NS_WSDL && t.Name.Local == "operation" { - return nil - } - } - } - - } -} - -func (s *WSDLDef) LoadFrom(r io.Reader) error { - return xml.NewDecoder(r).Decode(&s) -} - -func (def *WSDLDef) ToAPIDefinition(orgId, upstreamURL string, as_mock bool) (*apidef.APIDefinition, error) { - ad := apidef.APIDefinition{ - Name: def.Definition.Services[0].Name, - Active: true, - UseKeylessAccess: true, - OrgID: orgId, - APIID: uuid.NewV4().String(), - } - - ad.VersionDefinition.Key = "version" - ad.VersionDefinition.Location = "header" - ad.VersionData.Versions = make(map[string]apidef.VersionInfo) - ad.Proxy.ListenPath = "/" + def.Definition.Services[0].Name + "/" - ad.Proxy.StripListenPath = true - ad.Proxy.TargetURL = upstreamURL - - if as_mock { - log.Warning("Mocks not supported for WSDL definitions, ignoring option") - } - - versionData, err := def.ConvertIntoApiVersion(false) - if err != nil { - return nil, err - } - - def.InsertIntoAPIDefinitionAsVersion(versionData, &ad, "1.0.0") - ad.VersionData.DefaultVersion = "1.0.0" - return &ad, nil -} - -func trimNamespace(s string) string { - parts := strings.SplitN(s, ":", 2) - if len(parts) == 1 { - return parts[0] - } else { - return parts[1] - } -} - -func (def *WSDLDef) ConvertIntoApiVersion(bool) (apidef.VersionInfo, error) { - versionInfo := apidef.VersionInfo{} - versionInfo.UseExtendedPaths = true - versionInfo.Name = "1.0.0" - versionInfo.ExtendedPaths.TrackEndpoints = make([]apidef.TrackEndpointMeta, 0) - versionInfo.ExtendedPaths.URLRewrite = make([]apidef.URLRewriteMeta, 0) - versionInfo.ExtendedPaths.Internal = make([]apidef.InternalMeta, 0) - - var foundPort bool - var serviceCount int - - for _, service := range def.Definition.Services { - foundPort = false - if service.Name == "" { - continue - } - for _, port := range service.Ports { - portName := portName[service.Name] - if portName == "" { - portName = service.Ports[0].Name - } - if port.Name == portName { - foundPort = true - - bindingName := trimNamespace(port.Binding) - - binding := bindingList[bindingName] - if binding == nil { - log.Errorf("Binding for port %s of service %s not found. Termination processing of the service", port.Name, service.Name) - - foundPort = false - break - } - - if !binding.isSupportedProtocol { - log.Errorf("Unsupported transport protocol. Skipping process of the service %s", service.Name) - foundPort = false - break - } - - if len(binding.Operations) == 0 { - log.Errorf("No operation found for binding %s of service %s\n", binding.Name, service.Name) - break - } - - serviceCount++ - method := binding.Method - - //Create endpoints for each operation - for _, op := range binding.Operations { - operationTrackEndpoint := apidef.TrackEndpointMeta{} - operationUrlRewrite := apidef.URLRewriteMeta{} - path := "" - - if binding.Protocol == PROT_HTTP { - if op.Endpoint[0] == '/' { - path = service.Name + op.Endpoint - } else { - path = service.Name + "/" + op.Endpoint - } - } else { - path = service.Name + "/" + op.Name - } - - //Add each operation in trackendpoint - operationTrackEndpoint.Path = path - operationTrackEndpoint.Method = method - - versionInfo.ExtendedPaths.TrackEndpoints = append(versionInfo.ExtendedPaths.TrackEndpoints, operationTrackEndpoint) - - //Rewrite operation to service endpoint - operationUrlRewrite.Method = method - operationUrlRewrite.Path = path - - if binding.Protocol == PROT_HTTP { - if op.IsUrlReplacement == true { - pattern := ReplaceWildCards(op.Endpoint) - operationUrlRewrite.MatchPattern = "(" + pattern + ")" - } else { - operationUrlRewrite.MatchPattern = "(" + op.Endpoint + ".*)" - } - operationUrlRewrite.RewriteTo = port.Address.Location + "$1" - } else { - operationUrlRewrite.MatchPattern = path - operationUrlRewrite.RewriteTo = port.Address.Location - } - - versionInfo.ExtendedPaths.URLRewrite = append(versionInfo.ExtendedPaths.URLRewrite, operationUrlRewrite) - } - - break - } - } - - if foundPort == false { - log.Errorf("Port for service %s not found. Skiping processing of the service", service.Name) - } - } - - if serviceCount == 0 { - return versionInfo, errors.New("Error processing wsdl file") - } - - return versionInfo, nil -} - -func (def *WSDLDef) InsertIntoAPIDefinitionAsVersion(version apidef.VersionInfo, apidef *apidef.APIDefinition, versionName string) error { - apidef.VersionData.NotVersioned = false - apidef.VersionData.Versions[versionName] = version - return nil -} - -func ReplaceWildCards(endpoint string) string { - var result []rune - var inside bool - - for _, s := range endpoint { - if s == '{' { - inside = true - continue - } else if s == '}' { - inside = false - result = append(result, '.', '*') - continue - } - - if inside == false { - result = append(result, s) - } - } - return string(result) -} diff --git a/apidef/importer/wsdl_test.go b/apidef/importer/wsdl_test.go deleted file mode 100644 index 8dfd56d18946..000000000000 --- a/apidef/importer/wsdl_test.go +++ /dev/null @@ -1,1055 +0,0 @@ -package importer - -import ( - "bytes" - "testing" -) - -type testWSDLInput struct { - wsdlDefinition string - isInvalidInput bool - data []testWSDLData -} - -type testWSDLData struct { - servicePortNameMapping map[string]string - noOfEndpoints int - endpoints []endpointData - returnErr bool -} - -type endpointData struct { - method string - path string - matchPattern string - rewritePath string -} - -var testData = []testWSDLInput{ - { - wsdlDefinition: holidayService, - data: []testWSDLData{ - { - servicePortNameMapping: map[string]string{"HolidayService2": "HolidayService2Soap"}, - noOfEndpoints: 6, - endpoints: []endpointData{ - { - path: "HolidayService2/GetHolidaysForDateRange", - method: "POST", - matchPattern: "HolidayService2/GetHolidaysForDateRange", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx", - }, - { - path: "HolidayService2/GetCountriesAvailable", - method: "POST", - matchPattern: "HolidayService2/GetCountriesAvailable", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx", - }, - { - path: "HolidayService2/GetHolidaysAvailable", - method: "POST", - matchPattern: "HolidayService2/GetHolidaysAvailable", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx", - }, - { - path: "HolidayService2/GetHolidaysForMonth", - method: "POST", - matchPattern: "HolidayService2/GetHolidaysForMonth", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx", - }, - { - path: "HolidayService2/GetHolidaysForYear", - method: "POST", - matchPattern: "HolidayService2/GetHolidaysForYear", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx", - }, - { - path: "HolidayService2/GetHolidayDate", - method: "POST", - matchPattern: "HolidayService2/GetHolidayDate", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx", - }, - }, - }, - { - servicePortNameMapping: map[string]string{"HolidayService2": "HolidayService2HttpGet"}, - noOfEndpoints: 6, - endpoints: []endpointData{ - { - path: "HolidayService2/GetHolidaysForDateRange", - method: "GET", - matchPattern: "(/GetHolidaysForDateRange.*)", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx$1", - }, - { - path: "HolidayService2/GetCountriesAvailable", - method: "GET", - matchPattern: "(/GetCountriesAvailable.*)", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx$1", - }, - { - path: "HolidayService2/GetHolidaysAvailable", - method: "GET", - matchPattern: "(/GetHolidaysAvailable.*)", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx$1", - }, - { - path: "HolidayService2/GetHolidaysForMonth", - method: "GET", - matchPattern: "(/GetHolidaysForMonth.*)", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx$1", - }, - { - path: "HolidayService2/GetHolidaysForYear", - method: "GET", - matchPattern: "(/GetHolidaysForYear.*)", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx$1", - }, - { - path: "HolidayService2/GetHolidayDate", - method: "GET", - matchPattern: "(/GetHolidayDate.*)", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx$1", - }, - }, - }, - { - servicePortNameMapping: map[string]string{"HolidayService2": "HolidayService2HttpPost"}, - noOfEndpoints: 6, - endpoints: []endpointData{ - { - path: "HolidayService2/GetHolidaysForDateRange", - method: "POST", - matchPattern: "(/GetHolidaysForDateRange.*)", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx$1", - }, - { - path: "HolidayService2/GetCountriesAvailable", - method: "POST", - matchPattern: "(/GetCountriesAvailable.*)", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx$1", - }, - { - path: "HolidayService2/GetHolidaysAvailable", - method: "POST", - matchPattern: "(/GetHolidaysAvailable.*)", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx$1", - }, - { - path: "HolidayService2/GetHolidaysForMonth", - method: "POST", - matchPattern: "(/GetHolidaysForMonth.*)", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx$1", - }, - { - path: "HolidayService2/GetHolidaysForYear", - method: "POST", - matchPattern: "(/GetHolidaysForYear.*)", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx$1", - }, - { - path: "HolidayService2/GetHolidayDate", - method: "POST", - matchPattern: "(/GetHolidayDate.*)", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx$1", - }, - }, - }, - { - servicePortNameMapping: map[string]string{"HolidayService2": ""}, - noOfEndpoints: 6, - endpoints: []endpointData{ - { - path: "HolidayService2/GetHolidaysForDateRange", - method: "POST", - matchPattern: "HolidayService2/GetHolidaysForDateRange", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx", - }, - { - path: "HolidayService2/GetCountriesAvailable", - method: "POST", - matchPattern: "HolidayService2/GetCountriesAvailable", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx", - }, - { - path: "HolidayService2/GetHolidaysAvailable", - method: "POST", - matchPattern: "HolidayService2/GetHolidaysAvailable", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx", - }, - { - path: "HolidayService2/GetHolidaysForMonth", - method: "POST", - matchPattern: "HolidayService2/GetHolidaysForMonth", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx", - }, - { - path: "HolidayService2/GetHolidaysForYear", - method: "POST", - matchPattern: "HolidayService2/GetHolidaysForYear", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx", - }, - { - path: "HolidayService2/GetHolidayDate", - method: "POST", - matchPattern: "HolidayService2/GetHolidayDate", - rewritePath: "http://www.holidaywebservice.com/HolidayService_v2/HolidayService2.asmx", - }, - }, - }, - { - //invalid portName is provided - //should throw an error - servicePortNameMapping: map[string]string{"HolidayService2": "something"}, - returnErr: true, - }, - }, - }, - { - //smtp protocol is not supported - //should throw error - wsdlDefinition: smtpExample, - data: []testWSDLData{ - { - servicePortNameMapping: map[string]string{"StockQuoteService": "StockQuotePort"}, - returnErr: true, - }, - }, - }, - { - //Invalid input - wsdlDefinition: "", - isInvalidInput: true, - }, - { - //Invalid input - wsdlDefinition: wsdl_2_0_example, - isInvalidInput: true, - }, -} - -func TestToAPIDefinition_WSDL(t *testing.T) { - for _, input := range testData { - wsdl_imp := &WSDLDef{} - buff := bytes.NewBufferString(input.wsdlDefinition) - - err := wsdl_imp.LoadFrom(buff) - if err != nil { - if input.isInvalidInput { - continue - } else { - t.Fatal(err) - } - } - - for _, data := range input.data { - wsdl_imp.SetServicePortMapping(data.servicePortNameMapping) - def, err := wsdl_imp.ToAPIDefinition("testOrg", "http://test.com", false) - - if err != nil { - if !data.returnErr { - t.Fatal(err) - } else { - continue - } - } - - if def.VersionData.NotVersioned { - t.Fatal("WSDL import must always be versioned") - } - - if len(def.VersionData.Versions) > 1 { - t.Fatal("There should only be one version") - } - - v, ok := def.VersionData.Versions["1.0.0"] - if !ok { - t.Fatal("Version could not be found") - } - - if len(v.ExtendedPaths.TrackEndpoints) != data.noOfEndpoints { - t.Fatalf("Expected %v endpoints, found %v\n", data.noOfEndpoints, len(v.ExtendedPaths.TrackEndpoints)) - } - - for _, endpoint := range data.endpoints { - for _, rewriteData := range v.ExtendedPaths.URLRewrite { - - if rewriteData.Path == endpoint.path { - if rewriteData.Method != endpoint.method { - t.Fatalf("Invalid endpoint method. Expected %s found %s", endpoint.method, rewriteData.Method) - } - - if rewriteData.MatchPattern != endpoint.matchPattern { - t.Fatalf("Invalid matchPattern. Expected %s found %s", endpoint.matchPattern, rewriteData.MatchPattern) - } - - if rewriteData.RewriteTo != endpoint.rewritePath { - t.Fatalf("Invalid rewrite path. Expected %s found %s", endpoint.rewritePath, rewriteData.RewriteTo) - } - } - - } - } - } - } -} - -var holidayService string = ` - - - Web service that calculates holiday dates. (Version 2.0.1) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Get the available countries. - - - - - Get the available holidays for a specified country. - - - - - Get the date of a specific holiday. - - - - - Get the holidays for a date range. - - - - - Get the holidays for an entire year. - - - - - Get the holidays for a specific month. - - - - - - - Get the available countries. - - - - - Get the available holidays for a specified country. - - - - - Get the date of a specific holiday. - - - - - Get the holidays for a date range. - - - - - Get the holidays for an entire year. - - - - - Get the holidays for a specific month. - - - - - - - Get the available countries. - - - - - Get the available holidays for a specified country. - - - - - Get the date of a specific holiday. - - - - - Get the holidays for a date range. - - - - - Get the holidays for an entire year. - - - - - Get the holidays for a specific month. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Web service that calculates holiday dates. (Version 2.0.1) - - - - - - - - - - - - - - -` - -var smtpExample string = ` - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -` - -var wsdl_2_0_example = ` - - - - - This document describes my Service. You can find additional information in - the following web page: http://yoursite.com/MyService/help.html - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -` diff --git a/apidef/schema.go b/apidef/schema.go index 1d46c8bc92ee..0f74f374d2f8 100644 --- a/apidef/schema.go +++ b/apidef/schema.go @@ -48,12 +48,9 @@ const Schema = `{ "openid_options": { "type": ["object", "null"] }, - "use_standard_auth": { + "use_standard_auth":{ "type": "boolean" }, - "use_go_plugin_auth": { - "type": "boolean" - }, "enable_coprocess_auth": { "type": "boolean" }, diff --git a/gateway/auth_manager.go b/auth_manager.go similarity index 97% rename from gateway/auth_manager.go rename to auth_manager.go index cee4eaf087bf..6b92de35653b 100644 --- a/gateway/auth_manager.go +++ b/auth_manager.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/base64" @@ -30,7 +30,7 @@ type AuthorisationHandler interface { type SessionHandler interface { Init(store storage.Handler) UpdateSession(keyName string, session *user.SessionState, resetTTLTo int64, hashed bool) error - RemoveSession(keyName string, hashed bool) bool + RemoveSession(keyName string, hashed bool) SessionDetail(keyName string, hashed bool) (user.SessionState, bool) Sessions(filter string) []string Store() storage.Handler @@ -261,13 +261,13 @@ func (b *DefaultSessionManager) UpdateSession(keyName string, session *user.Sess } // RemoveSession removes session from storage -func (b *DefaultSessionManager) RemoveSession(keyName string, hashed bool) bool { +func (b *DefaultSessionManager) RemoveSession(keyName string, hashed bool) { defer b.clearCacheForKey(keyName, hashed) if hashed { - return b.store.DeleteRawKey(b.store.GetKeyPrefix() + keyName) + b.store.DeleteRawKey(b.store.GetKeyPrefix() + keyName) } else { - return b.store.DeleteKey(keyName) + b.store.DeleteKey(keyName) } } diff --git a/gateway/batch_requests.go b/batch_requests.go similarity index 99% rename from gateway/batch_requests.go rename to batch_requests.go index c5ad20033f84..8822655015dc 100644 --- a/gateway/batch_requests.go +++ b/batch_requests.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "crypto/tls" diff --git a/gateway/batch_requests_test.go b/batch_requests_test.go similarity index 94% rename from gateway/batch_requests_test.go rename to batch_requests_test.go index 33e6a758cdc0..e06f6c0eed4b 100644 --- a/gateway/batch_requests_test.go +++ b/batch_requests_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "crypto/tls" @@ -40,10 +40,10 @@ const testBatchRequest = `{ }` func TestBatch(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/v1/" spec.EnableBatchRequestSupport = true }) @@ -144,12 +144,12 @@ func TestVirtualEndpointBatch(t *testing.T) { globalConf.Security.Certificates.Upstream = map[string]string{upstreamHost: clientCertID} config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" virtualMeta := apidef.VirtualMeta{ ResponseFunctionName: "batchTest", @@ -158,7 +158,7 @@ func TestVirtualEndpointBatch(t *testing.T) { Path: "/virt", Method: "GET", } - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.UseExtendedPaths = true v.ExtendedPaths = apidef.ExtendedPathsSet{ Virtual: []apidef.VirtualMeta{virtualMeta}, diff --git a/bin/ci-swagger.sh b/bin/ci-swagger.sh deleted file mode 100755 index b983e6da62e5..000000000000 --- a/bin/ci-swagger.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -swagger2fileName="swagger2.yaml" -tempOpenAPIFileName="temp-swagger.yml" -tempUpdatedOpenAPIFileName="temp-swagger2.yml" -openAPIspecfileName="swagger.yml" - -fatal() { - echo "$@" >&2 - exit 1 -} - -swagger generate spec -o "$swagger2fileName" - -if [ $? -ne 0 ]; then - fatal "could not generate swagger2.0 spec to the specified path, $swagger2fileName" -fi - -swagger validate "$swagger2fileName" - -if [ $? -ne 0 ]; then - fatal "swagger spec is invalid... swagger spec is located at $swagger2fileName" -fi - -api-spec-converter --from=swagger_2 --to=openapi_3 --syntax=yaml "$swagger2fileName" > "$tempOpenAPIFileName" - -if [ $? -ne 0 ]; then - fatal "could not convert swagger2.0 spec to opeenapi 3.0" -fi - -## clean up -rm "$swagger2fileName" - -## If running this on macOS, you might need to change sed to gsed - -sed -n '1,/components:/p' $openAPIspecfileName > $tempUpdatedOpenAPIFileName - -if [ $? -ne 0 ]; then - fatal "replace operation failed step 1" -fi - -lineToStartReplaceFrom=$(grep -n "responses:" swagger.yml | tail -1 | awk '{split($0,a,":"); print a[1]}') - -sed -n "$lineToStartReplaceFrom,/components:/p" $openAPIspecfileName >> $tempUpdatedOpenAPIFileName -if [ $? -ne 0 ]; then - fatal "replace operation failed" -fi - -mv $tempUpdatedOpenAPIFileName $openAPIspecfileName - -## Ideally, CI should push $openAPIspecfileName to GitHub -## but for now, it can be committed by users and pushed alonside their changes. diff --git a/bin/ci-test.sh b/bin/ci-test.sh deleted file mode 100755 index 883563f096cd..000000000000 --- a/bin/ci-test.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -TEST_TIMEOUT=3m - -# print a command and execute it -show() { - echo "$@" >&2 - eval "$@" -} - -fatal() { - echo "$@" >&2 - exit 1 -} - -race="" -if [[ ${LATEST_GO} ]]; then - FMT_FILES=$(gofmt -l . | grep -v vendor) - if [[ -n $FMT_FILES ]]; then - fatal "Run 'gofmt -w' on these files:\n$FMT_FILES" - fi - - echo "gofmt check is ok!" - - IMP_FILES="$(goimports -l . | grep -v vendor)" - if [[ -n $IMP_FILES ]]; then - fatal "Run 'goimports -w' on these files:\n$IMP_FILES" - fi - - echo "goimports check is ok!" - - # Run with race if latest - race="-race" -fi - -PKGS="$(go list -tags "coprocess python grpc" ./...)" - -go get -t - -# build Go-plugin used in tests -go build ${race} -o ./test/goplugins/goplugins.so -buildmode=plugin ./test/goplugins || fatal "building Go-plugin failed" - -for pkg in $PKGS; do - tags="" - - # TODO: Remove skipRace variable after solving race conditions in tests. - skipRace=false - if [[ ${pkg} == *"coprocess/grpc" ]]; then - tags="-tags 'coprocess grpc'" - skipRace=true - elif [[ ${pkg} == *"coprocess/python" ]]; then - tags="-tags 'coprocess python'" - elif [[ ${pkg} == *"coprocess" ]]; then - tags="-tags 'coprocess'" - skipRace=true - elif [[ ${pkg} == *"goplugin" ]]; then - tags="-tags 'goplugin'" - fi - - race="" - - # Some tests should not be run with -race. Therefore, test them with penultimate Go version. - # And, test with -race in latest Go version. - if [[ ${LATEST_GO} && ${skipRace} = false ]]; then - race="-race" - fi - - show go test -v ${race} -timeout ${TEST_TIMEOUT} -coverprofile=test.cov $pkg ${tags} || fatal "Test Failed" - show go vet ${tags} $pkg || fatal "go vet errored" -done diff --git a/gateway/cert.go b/cert.go similarity index 99% rename from gateway/cert.go rename to cert.go index cd774d5f3f8b..efdaaa8db8d2 100644 --- a/gateway/cert.go +++ b/cert.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "crypto/tls" @@ -257,7 +257,6 @@ func getTLSConfigForClient(baseConfig *tls.Config, listenPort int) func(hello *t } apisMu.RLock() - defer apisMu.RUnlock() // Dynamically add API specific certificates for _, spec := range apiSpecs { @@ -296,6 +295,7 @@ func getTLSConfigForClient(baseConfig *tls.Config, listenPort int) func(hello *t } } } + apisMu.RUnlock() return newConfig, nil } diff --git a/gateway/cert_go1.10_test.go b/cert_go1.10_test.go similarity index 89% rename from gateway/cert_go1.10_test.go rename to cert_go1.10_test.go index 6acb1be940bd..56b4564407fd 100644 --- a/gateway/cert_go1.10_test.go +++ b/cert_go1.10_test.go @@ -1,6 +1,6 @@ // +build go1.10 -package gateway +package main import ( "crypto/tls" @@ -42,12 +42,12 @@ func TestPublicKeyPinning(t *testing.T) { // For host using pinning, it should ignore standard verification in all cases, e.g setting variable below does nothing globalConf.ProxySSLInsecureSkipVerify = false config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.PinnedPublicKeys = map[string]string{"127.0.0.1": pubID} spec.Proxy.TargetURL = upstream.URL @@ -57,10 +57,10 @@ func TestPublicKeyPinning(t *testing.T) { }) t.Run("Pub key not match", func(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.PinnedPublicKeys = map[string]string{"127.0.0.1": "wrong"} spec.Proxy.TargetURL = upstream.URL @@ -73,12 +73,12 @@ func TestPublicKeyPinning(t *testing.T) { globalConf := config.Global() globalConf.Security.PinnedPublicKeys = map[string]string{"127.0.0.1": "wrong"} config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.TargetURL = upstream.URL }) @@ -95,14 +95,14 @@ func TestPublicKeyPinning(t *testing.T) { globalConf := config.Global() globalConf.ProxySSLInsecureSkipVerify = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() defer proxy.Stop() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.TargetURL = upstream.URL spec.Proxy.Transport.ProxyURL = proxy.URL @@ -124,16 +124,16 @@ func TestProxyTransport(t *testing.T) { // force creating new transport on each reque globalConf.MaxConnTime = -1 config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //matching ciphers t.Run("Global: Cipher match", func(t *testing.T) { globalConf.ProxySSLCipherSuites = []string{"TLS_RSA_WITH_AES_128_CBC_SHA"} config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.TargetURL = upstream.URL }) @@ -143,7 +143,7 @@ func TestProxyTransport(t *testing.T) { t.Run("Global: Cipher not match", func(t *testing.T) { globalConf.ProxySSLCipherSuites = []string{"TLS_RSA_WITH_RC4_128_SHA"} config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.TargetURL = upstream.URL }) @@ -153,7 +153,7 @@ func TestProxyTransport(t *testing.T) { t.Run("API: Cipher override", func(t *testing.T) { globalConf.ProxySSLCipherSuites = []string{"TLS_RSA_WITH_RC4_128_SHA"} config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.TargetURL = upstream.URL spec.Proxy.Transport.SSLCipherSuites = []string{"TLS_RSA_WITH_AES_128_CBC_SHA"} @@ -165,7 +165,7 @@ func TestProxyTransport(t *testing.T) { t.Run("API: MinTLS not match", func(t *testing.T) { globalConf.ProxySSLMinVersion = 772 config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.TargetURL = upstream.URL spec.Proxy.Transport.SSLCipherSuites = []string{"TLS_RSA_WITH_AES_128_CBC_SHA"} @@ -177,7 +177,7 @@ func TestProxyTransport(t *testing.T) { t.Run("API: Invalid proxy", func(t *testing.T) { globalConf.ProxySSLMinVersion = 771 config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.TargetURL = upstream.URL spec.Proxy.Transport.SSLCipherSuites = []string{"TLS_RSA_WITH_AES_128_CBC_SHA"} @@ -198,7 +198,7 @@ func TestProxyTransport(t *testing.T) { }) defer proxy.Stop() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.Transport.SSLCipherSuites = []string{"TLS_RSA_WITH_AES_128_CBC_SHA"} spec.Proxy.Transport.ProxyURL = proxy.URL diff --git a/gateway/cert_test.go b/cert_test.go similarity index 77% rename from gateway/cert_test.go rename to cert_test.go index 05a3b0f276de..41fd7310c5c8 100644 --- a/gateway/cert_test.go +++ b/cert_test.go @@ -1,8 +1,7 @@ -package gateway +package main import ( "bytes" - "context" "crypto/rand" "crypto/rsa" "crypto/tls" @@ -16,17 +15,9 @@ import ( "net/http/httptest" "os" "path/filepath" - "strings" "testing" "time" - "google.golang.org/grpc" - pb "google.golang.org/grpc/examples/helloworld/helloworld" - - "google.golang.org/grpc/credentials" - - "golang.org/x/net/http2" - "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/certs" "github.com/TykTechnologies/tyk/config" @@ -106,12 +97,12 @@ func TestGatewayTLS(t *testing.T) { globalConf := config.Global() globalConf.HttpServerOptions.UseSSL = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) @@ -133,12 +124,12 @@ func TestGatewayTLS(t *testing.T) { }} globalConf.HttpServerOptions.UseSSL = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) @@ -155,12 +146,12 @@ func TestGatewayTLS(t *testing.T) { globalConf.HttpServerOptions.SSLCertificates = []string{certPath} globalConf.HttpServerOptions.UseSSL = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) @@ -180,12 +171,12 @@ func TestGatewayTLS(t *testing.T) { globalConf.HttpServerOptions.SSLCertificates = []string{certID} globalConf.HttpServerOptions.UseSSL = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) @@ -203,7 +194,7 @@ func TestGatewayControlAPIMutualTLS(t *testing.T) { globalConf.HttpServerOptions.UseSSL = true globalConf.Security.ControlAPIUseMutualTLS = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() dir, _ := ioutil.TempDir("", "certs") @@ -226,7 +217,7 @@ func TestGatewayControlAPIMutualTLS(t *testing.T) { globalConf.HttpServerOptions.SSLCertificates = []string{certID} config.SetGlobal(globalConf) - ts := StartTest() + ts := newTykTestServer() defer ts.Close() defer func() { @@ -275,9 +266,9 @@ func TestAPIMutualTLS(t *testing.T) { globalConf.ListenPort = 0 globalConf.HttpServerOptions.SSLCertificates = []string{certID} config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // Initialize client certificates @@ -287,7 +278,7 @@ func TestAPIMutualTLS(t *testing.T) { t.Run("API without mutual TLS", func(t *testing.T) { client := getTLSClient(&clientCert, serverCertPem) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Domain = "localhost" spec.Proxy.ListenPath = "/" }) @@ -298,7 +289,7 @@ func TestAPIMutualTLS(t *testing.T) { t.Run("MutualTLSCertificate not set", func(t *testing.T) { client := getTLSClient(nil, nil) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Domain = "localhost" spec.Proxy.ListenPath = "/" spec.UseMutualTLSAuth = true @@ -315,7 +306,7 @@ func TestAPIMutualTLS(t *testing.T) { client := getTLSClient(&clientCert, serverCertPem) clientCertID, _ := CertificateManager.Add(clientCertPem, "") - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Domain = "localhost" spec.Proxy.ListenPath = "/" spec.UseMutualTLSAuth = true @@ -342,7 +333,7 @@ func TestAPIMutualTLS(t *testing.T) { clientCertID2, _ := CertificateManager.Add(clientCertPem2, "") defer CertificateManager.Delete(clientCertID2) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Domain = "localhost" spec.Proxy.ListenPath = "/" spec.UseMutualTLSAuth = true @@ -360,7 +351,7 @@ func TestAPIMutualTLS(t *testing.T) { defer CertificateManager.Delete(clientCertID) loadAPIS := func(certs ...string) { - BuildAndLoadAPI( + buildAndLoadAPI( func(spec *APISpec) { spec.Proxy.ListenPath = "/with_mutual" spec.UseMutualTLSAuth = true @@ -457,9 +448,9 @@ func TestUpstreamMutualTLS(t *testing.T) { globalConf := config.Global() globalConf.ProxySSLInsecureSkipVerify = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() clientCertID, _ := CertificateManager.Add(combinedClientPEM, "") @@ -467,7 +458,7 @@ func TestUpstreamMutualTLS(t *testing.T) { pool.AddCert(clientCert.Leaf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.TargetURL = upstream.URL spec.UpstreamCertificates = map[string]string{ @@ -492,12 +483,12 @@ func TestKeyWithCertificateTLS(t *testing.T) { globalConf.HttpServerOptions.UseSSL = true globalConf.HttpServerOptions.SSLCertificates = []string{serverCertID} config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.BaseIdentityProvidedBy = apidef.AuthToken spec.Auth.UseCertificate = true @@ -511,7 +502,7 @@ func TestKeyWithCertificateTLS(t *testing.T) { }) t.Run("Cert known", func(t *testing.T) { - CreateSession(func(s *user.SessionState) { + createSession(func(s *user.SessionState) { s.Certificate = clientCertID s.AccessRights = map[string]user.AccessDefinition{"test": { APIID: "test", Versions: []string{"v1"}, @@ -531,9 +522,9 @@ func TestAPICertificate(t *testing.T) { globalConf.HttpServerOptions.UseSSL = true globalConf.HttpServerOptions.SSLCertificates = []string{} config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() client := &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{ @@ -541,7 +532,7 @@ func TestAPICertificate(t *testing.T) { }}} t.Run("Cert set via API", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Certificates = []string{serverCertID} spec.UseKeylessAccess = true spec.Proxy.ListenPath = "/" @@ -551,7 +542,7 @@ func TestAPICertificate(t *testing.T) { }) t.Run("Cert unknown", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = true spec.Proxy.ListenPath = "/" }) @@ -567,7 +558,7 @@ func TestCertificateHandlerTLS(t *testing.T) { clientPEM, _, _, clientCert := genCertificate(&x509.Certificate{}) clientCertID := certs.HexSHA256(clientCert.Certificate[0]) - ts := StartTest() + ts := newTykTestServer() defer ts.Close() t.Run("List certificates, empty", func(t *testing.T) { @@ -626,12 +617,12 @@ func TestCipherSuites(t *testing.T) { globalConf.HttpServerOptions.Ciphers = []string{"TLS_RSA_WITH_RC4_128_SHA", "TLS_RSA_WITH_3DES_EDE_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA"} globalConf.HttpServerOptions.SSLCertificates = []string{serverCertID} config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) @@ -657,151 +648,3 @@ func TestCipherSuites(t *testing.T) { ts.Run(t, test.TestCase{Client: client, Path: "/", ErrorMatch: "tls: handshake failure"}) }) } - -func TestHTTP2(t *testing.T) { - expected := "HTTP/2.0" - - // Certificates - _, _, _, clientCert := genCertificate(&x509.Certificate{}) - serverCertPem, _, combinedPEM, _ := genServerCertificate() - certID, _ := CertificateManager.Add(combinedPEM, "") - defer CertificateManager.Delete(certID) - - // Upstream server supporting HTTP/2 - upstream := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - actual := r.Proto - if expected != actual { - t.Fatalf("Tyk-Upstream connection protocol is expected %s, actual %s", expected, actual) - } - - fmt.Fprintln(w, "Hello, I am an HTTP/2 Server") - - })) - upstream.TLS = new(tls.Config) - upstream.TLS.NextProtos = []string{"h2"} - upstream.StartTLS() - defer upstream.Close() - - // Tyk - globalConf := config.Global() - globalConf.ProxySSLInsecureSkipVerify = true - globalConf.ProxyEnableHttp2 = true - globalConf.HttpServerOptions.EnableHttp2 = true - globalConf.HttpServerOptions.SSLCertificates = []string{certID} - globalConf.HttpServerOptions.UseSSL = true - config.SetGlobal(globalConf) - defer ResetTestConfig() - - ts := StartTest() - defer ts.Close() - - BuildAndLoadAPI(func(spec *APISpec) { - spec.Proxy.ListenPath = "/" - spec.UseKeylessAccess = true - spec.Proxy.TargetURL = upstream.URL - }) - - // HTTP/2 client - http2Client := getTLSClient(&clientCert, serverCertPem) - http2.ConfigureTransport(http2Client.Transport.(*http.Transport)) - - ts.Run(t, test.TestCase{Client: http2Client, Path: "", Code: 200, Proto: "HTTP/2.0", BodyMatch: "Hello, I am an HTTP/2 Server"}) -} - -// server is used to implement helloworld.GreeterServer. -type server struct{} - -// SayHello implements helloworld.GreeterServer -func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { - log.Printf("Received: %v", in.Name) - return &pb.HelloReply{Message: "Hello " + in.Name}, nil -} - -func TestGRPC(t *testing.T) { - - _, _, combinedPEM, _ := genServerCertificate() - certID, _ := CertificateManager.Add(combinedPEM, "") - defer CertificateManager.Delete(certID) - - // gRPC server - s := startGRPCServer(t) - defer s.GracefulStop() - - // Tyk - globalConf := config.Global() - globalConf.ProxySSLInsecureSkipVerify = true - globalConf.ProxyEnableHttp2 = true - globalConf.HttpServerOptions.EnableHttp2 = true - globalConf.HttpServerOptions.SSLCertificates = []string{certID} - globalConf.HttpServerOptions.UseSSL = true - config.SetGlobal(globalConf) - defer ResetTestConfig() - - ts := StartTest() - defer ts.Close() - - BuildAndLoadAPI(func(spec *APISpec) { - spec.Proxy.ListenPath = "/" - spec.UseKeylessAccess = true - spec.Proxy.TargetURL = "https://localhost:50051" - }) - - address := strings.TrimPrefix(ts.URL, "https://") - name := "Furkan" - - // gRPC client - creds := credentials.NewTLS(&tls.Config{ - InsecureSkipVerify: true, - }) - - conn, err := grpc.Dial(address, grpc.WithTransportCredentials(creds)) - if err != nil { - t.Fatalf("did not connect: %v", err) - } - defer conn.Close() - c := pb.NewGreeterClient(conn) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - r, err := c.SayHello(ctx, &pb.HelloRequest{Name: name}) - if err != nil { - t.Fatalf("could not greet: %v", err) - } - - // Test result - expected := "Hello " + name - actual := r.Message - - if expected != actual { - t.Fatalf("Expected %s, actual %s", expected, actual) - } -} - -func startGRPCServer(t *testing.T) *grpc.Server { - // Server - lis, err := net.Listen("tcp", ":50051") - if err != nil { - t.Fatalf("failed to listen: %v", err) - } - - cert, key, _, _ := genCertificate(&x509.Certificate{}) - certificate, _ := tls.X509KeyPair(cert, key) - creds := credentials.NewServerTLSFromCert(&certificate) - - if err != nil { - t.Fatalf("failed to listen: %v", err) - } - - s := grpc.NewServer(grpc.Creds(creds)) - - pb.RegisterGreeterServer(s, &server{}) - - go func() { - err := s.Serve(lis) - if err != nil { - t.Fatalf("failed to serve: %v", err) - } - }() - - return s -} diff --git a/certs/manager.go b/certs/manager.go index 850c0c2d212e..9dc540ff26c3 100644 --- a/certs/manager.go +++ b/certs/manager.go @@ -19,7 +19,7 @@ import ( "time" "github.com/Sirupsen/logrus" - cache "github.com/pmylund/go-cache" + "github.com/pmylund/go-cache" ) // StorageHandler is a standard interface to a storage backend, diff --git a/checkup/checkup.go b/checkup/checkup.go index f27125aefa0d..6b11bfa848ae 100644 --- a/checkup/checkup.go +++ b/checkup/checkup.go @@ -9,7 +9,7 @@ import ( ) var ( - log = logger.Get().WithField("prefix", "checkup") + log = logger.Get() defaultConfigs = config.Config{ Secret: "352d20ee67be67f6340b4c0605b044b7", NodeSecret: "352d20ee67be67f6340b4c0605b044b7", @@ -21,37 +21,8 @@ const ( minFileDescriptors = 80000 ) -func Run(c config.Config) { - legacyRateLimiters(c) - allowInsecureConfigs(c) - healthCheck(c) - fileDescriptors() - cpus() - defaultSecrets(c) -} +func CheckFileDescriptors() { -func legacyRateLimiters(c config.Config) { - if c.ManagementNode { - return - } - if c.EnableSentinelRateLimiter || c.EnableRedisRollingLimiter { - log.Warning("SentinelRateLimiter & RedisRollingLimiter are deprecated") - } -} - -func allowInsecureConfigs(c config.Config) { - if c.AllowInsecureConfigs { - log.Warning("Insecure configuration allowed: allow_insecure_configs: true") - } -} - -func healthCheck(c config.Config) { - if c.HealthCheck.EnableHealthChecks { - log.Warn("Health Checker is deprecated and not recommended") - } -} - -func fileDescriptors() { rlimit := &syscall.Rlimit{} err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, rlimit) if err == nil && rlimit.Cur < minFileDescriptors { @@ -61,7 +32,8 @@ func fileDescriptors() { } } -func cpus() { +func CheckCpus() { + cpus := runtime.NumCPU() if cpus < minCPU { log.Warningf("Num CPUs %d too low for production use. Min %d recommended.\n"+ @@ -70,7 +42,8 @@ func cpus() { } } -func defaultSecrets(c config.Config) { +func CheckDefaultSecrets(c config.Config) { + if c.Secret == defaultConfigs.Secret { log.Warningf("Default secret `%s` should be changed for production.", defaultConfigs.Secret) } diff --git a/cli/bundler/bundler_test.go b/cli/bundler/bundler_test.go index 5a8651ce55d4..0941a4227d69 100644 --- a/cli/bundler/bundler_test.go +++ b/cli/bundler/bundler_test.go @@ -82,7 +82,7 @@ func TestBuild(t *testing.T) { filename := writeManifestFile(t, "{", defaultManifestPath) bundler.manifestPath = filename err = bundler.Build(ctx) - if !strings.Contains(err.Error(), "unexpected end of JSON input") { + if !strings.Contains("unexpected end of JSON input", err.Error()) { t.Fatalf("Expected JSON error, got: %s", err.Error()) } filename = writeManifestFile(t, &apidef.BundleManifest{ diff --git a/cli/cli.go b/cli/cli.go index 5ca82028053d..1ee3d1470b9f 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -2,15 +2,14 @@ package cli import ( "fmt" - "io/ioutil" "os" - "github.com/TykTechnologies/tyk/cli/linter" - kingpin "gopkg.in/alecthomas/kingpin.v2" "github.com/TykTechnologies/tyk/cli/bundler" "github.com/TykTechnologies/tyk/cli/importer" + "github.com/TykTechnologies/tyk/cli/lint" + logger "github.com/TykTechnologies/tyk/log" ) @@ -74,11 +73,7 @@ func Init(version string, confPaths []string) { // Linter: lintCmd := app.Command("lint", "Runs a linter on Tyk configuration file") lintCmd.Action(func(c *kingpin.ParseContext) error { - confSchema, err := ioutil.ReadFile("cli/linter/schema.json") - if err != nil { - return err - } - path, lines, err := linter.Run(string(confSchema), confPaths) + path, lines, err := lint.Run(confPaths) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) diff --git a/cli/importer/importer.go b/cli/importer/importer.go index 8e64bc135e0a..ffd908be3147 100644 --- a/cli/importer/importer.go +++ b/cli/importer/importer.go @@ -17,7 +17,7 @@ import ( const ( cmdName = "import" - cmdDesc = "Imports a BluePrint/Swagger/WSDL file" + cmdDesc = "Imports a BluePrint/Swagger file" ) var ( @@ -30,8 +30,6 @@ type Importer struct { input *string swaggerMode *bool bluePrintMode *bool - wsdlMode *bool - portNames *string createAPI *bool orgID *string upstreamTarget *string @@ -47,11 +45,9 @@ func init() { // AddTo initializes an importer object. func AddTo(app *kingpin.Application) { cmd := app.Command(cmdName, cmdDesc) - imp.input = cmd.Arg("input file", "e.g. blueprint.json, swagger.json, service.wsdl etc.").String() + imp.input = cmd.Arg("input file", "e.g. blueprint.json, swagger.json, etc.").String() imp.swaggerMode = cmd.Flag("swagger", "Use Swagger mode").Bool() imp.bluePrintMode = cmd.Flag("blueprint", "Use BluePrint mode").Bool() - imp.wsdlMode = cmd.Flag("wsdl", "Use WSDL mode").Bool() - imp.portNames = cmd.Flag("port-names", "Specify port name of each service in the WSDL file. Input format is comma separated list of serviceName:portName").String() imp.createAPI = cmd.Flag("create-api", "Creates a new API definition from the blueprint").Bool() imp.orgID = cmd.Flag("org-id", "assign the API Definition to this org_id (required with create-api").String() imp.upstreamTarget = cmd.Flag("upstream-target", "set the upstream target for the definition").PlaceHolder("URL").String() @@ -75,12 +71,6 @@ func (i *Importer) Import(ctx *kingpin.ParseContext) (err error) { log.Fatal(err) os.Exit(1) } - } else if *i.wsdlMode { - err = i.handleWSDLMode() - if err != nil { - log.Fatal(err) - os.Exit(1) - } } else { log.Fatal(errUnknownMode) os.Exit(1) @@ -89,42 +79,6 @@ func (i *Importer) Import(ctx *kingpin.ParseContext) (err error) { return nil } -func (i *Importer) validateInput() error { - - if *i.createAPI { - if *i.upstreamTarget == "" || *i.orgID == "" { - return fmt.Errorf("No upstream target or org ID defined, these are both required") - } - } else { - if *i.forAPI == "" { - return fmt.Errorf("If adding to an API, the path to the definition must be listed") - } - - if *i.asVersion == "" { - return fmt.Errorf("No version defined for this import operation, please set an import ID using the --as-version flag") - } - } - - return nil -} - -func (i *Importer) processPortNames() map[string]string { - p := make(map[string]string) - - if *i.portNames == "" { - return p - } - - pairs := strings.Split(*i.portNames, ",") - - for _, v := range pairs { - components := strings.Split(v, ":") - p[components[0]] = components[1] - } - - return p -} - func (i *Importer) handleBluePrintMode() error { if !*i.createAPI { // Different branch, here we need an API Definition to modify @@ -232,52 +186,6 @@ func (i *Importer) handleSwaggerMode() error { return nil } -func (i *Importer) handleWSDLMode() error { - var def *apidef.APIDefinition - - //Process Input - if err := i.validateInput(); err != nil { - return err - } - serviceportMapping := i.processPortNames() - - //Load WSDL file - w, err := i.wsdlLoadFile(*i.input) - if err != nil { - return fmt.Errorf("File load error: %v", err) - } - - w.SetServicePortMapping(serviceportMapping) - - if *i.createAPI { - //Create new API - def, err = w.ToAPIDefinition(*i.orgID, *i.upstreamTarget, *i.asMock) - if err != nil { - return fmt.Errorf("Failed to create API Defintition from file") - } - } else { - //Add into existing API - def, err = i.apiDefLoadFile(*i.forAPI) - if err != nil { - return fmt.Errorf("failed to load and decode file data for API Definition: %v", err) - } - - versionData, err := w.ConvertIntoApiVersion(*i.asMock) - if err != nil { - return fmt.Errorf("Conversion into API Def failed: %v", err) - } - - if err := w.InsertIntoAPIDefinitionAsVersion(versionData, def, *i.asVersion); err != nil { - return fmt.Errorf("Insertion failed: %v", err) - } - - } - - i.printDef(def) - - return nil -} - func (i *Importer) printDef(def *apidef.APIDefinition) { asJSON, err := json.MarshalIndent(def, "", " ") if err != nil { @@ -307,24 +215,6 @@ func (i *Importer) swaggerLoadFile(path string) (*importer.SwaggerAST, error) { return swagger.(*importer.SwaggerAST), nil } -func (i *Importer) wsdlLoadFile(path string) (*importer.WSDLDef, error) { - wsdl, err := importer.GetImporterForSource(importer.WSDLSource) - if err != nil { - return nil, err - } - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - if err := wsdl.LoadFrom(f); err != nil { - return nil, err - } - - return wsdl.(*importer.WSDLDef), nil -} - func (i *Importer) bluePrintLoadFile(path string) (*importer.BluePrintAST, error) { blueprint, err := importer.GetImporterForSource(importer.ApiaryBluePrint) if err != nil { diff --git a/cli/linter/linter.go b/cli/lint/lint.go similarity index 95% rename from cli/linter/linter.go rename to cli/lint/lint.go index e743fac595c4..d63f00908ce5 100644 --- a/cli/linter/linter.go +++ b/cli/lint/lint.go @@ -1,4 +1,4 @@ -package linter +package lint import ( "encoding/json" @@ -14,13 +14,13 @@ import ( // Run will lint the configuration file. It will return the path to the // config file that was checked, a list of warnings and an error, if any // happened. -func Run(schm string, paths []string) (string, []string, error) { +func Run(paths []string) (string, []string, error) { addFormats(&schema.FormatCheckers) var conf config.Config if err := config.Load(paths, &conf); err != nil { return "", nil, err } - schemaLoader := schema.NewBytesLoader([]byte(schm)) + schemaLoader := schema.NewBytesLoader([]byte(confSchema)) var orig map[string]interface{} f, err := os.Open(conf.OriginalPath) diff --git a/cli/linter/linter_test.go b/cli/lint/lint_test.go similarity index 76% rename from cli/linter/linter_test.go rename to cli/lint/lint_test.go index 193aaf53dcb7..fb874dd4a8aa 100644 --- a/cli/linter/linter_test.go +++ b/cli/lint/lint_test.go @@ -1,4 +1,4 @@ -package linter +package lint import ( "encoding/json" @@ -75,22 +75,6 @@ var tests = []struct { "BadPolicySource", `{"policies": {"policy_source": "internet"}}`, `policies.policy_source: policies.policy_source must be one of the following: "", "service", "rpc"`, }, - { - "MalformedDnsCacheEntry", `{"dns_cache": { "enabled": true, "tttl": 10} }`, - `tttl: Additional property tttl is not allowed`, - }, - { - "BadDnsCacheTTL", `{"dns_cache": { "enabled": false, "ttl": -2 } }`, - `dns_cache.ttl: Must be greater than or equal to -1`, - }, - { - "ExtraDnsCacheCheckInterval", `{"dns_cache": { "enabled": true, "ttl": -1, "check_interval": 2500 } }`, - `check_interval: Additional property check_interval is not allowed`, - }, - { - "InvalidDnsCacheMultipleIPsHandleStrategy", `{"dns_cache": { "enabled": true, "ttl": 1, "multiple_ips_handle_strategy": "true" } }`, - `dns_cache.multiple_ips_handle_strategy: dns_cache.multiple_ips_handle_strategy must be one of the following: "pick_first", "random", "no_cache"`, - }, } func allContains(got, want []string) bool { @@ -116,13 +100,7 @@ func TestLint(t *testing.T) { t.Fatal(err) } f.Close() - - confSchema, err := ioutil.ReadFile("cli/linter/schema.json") - if err != nil { - t.Fatal(err) - } - - _, got, err := Run(string(confSchema), []string{f.Name()}) + _, got, err := Run([]string{f.Name()}) if err != nil { got = []string{err.Error()} } diff --git a/cli/lint/schema.go b/cli/lint/schema.go new file mode 100644 index 000000000000..1948496134b4 --- /dev/null +++ b/cli/lint/schema.go @@ -0,0 +1,781 @@ +package lint + +const confSchema = `{ +"$schema": "http://json-schema.org/draft-04/schema#", +"type": "object", +"additionalProperties": false, +"definitions": { + "StorageOptions": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "database": { + "type": "integer" + }, + "enable_cluster": { + "type": "boolean" + }, + "use_ssl":{ + "type": "boolean" + }, + "ssl_insecure_skip_verify":{ + "type": "boolean" + }, + "host": { + "type": "string", + "format": "host-no-port" + }, + "hosts": { + "type": ["array", "null"] + }, + "optimisation_max_active": { + "type": "integer" + }, + "optimisation_max_idle": { + "type": "integer" + }, + "timeout": { + "type": "integer" + }, + "password": { + "type": "string" + }, + "port": { + "type": "integer" + }, + "type": { + "type": "string", + "enum": ["", "redis"] + }, + "username": { + "type": "string" + } + } + } +}, +"properties": { + "allow_insecure_configs": { + "type": "boolean" + }, + "allow_master_keys": { + "type": "boolean" + }, + "allow_remote_config": { + "type": "boolean" + }, + "analytics_config": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "enable_detailed_recording": { + "type": "boolean" + }, + "enable_geo_ip": { + "type": "boolean" + }, + "geo_ip_db_path": { + "type": "string", + "format": "path" + }, + "ignored_ips": { + "type": ["array", "null"] + }, + "normalise_urls": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "custom_patterns": { + "type": ["array", "null"] + }, + "enabled": { + "type": "boolean" + }, + "normalise_numbers": { + "type": "boolean" + }, + "normalise_uuids": { + "type": "boolean" + } + } + }, + "pool_size": { + "type": "integer" + }, + "records_buffer_size": { + "type": "integer" + }, + "storage_expiration_time": { + "type": "integer" + }, + "type": { + "type": "string" + } + } + }, + "app_path": { + "type": "string", + "format": "path" + }, + "auth_override": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "auth_provider": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "meta": { + "type": ["array", "null"] + }, + "name": { + "type": "string" + }, + "storage_engine": { + "type": "string" + } + } + }, + "force_auth_provider": { + "type": "boolean" + }, + "force_session_provider": { + "type": "boolean" + }, + "session_provider": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "meta": { + "type": ["array", "null"] + }, + "name": { + "type": "string" + }, + "storage_engine": { + "type": "string" + } + } + } + } + }, + "bundle_base_url": { + "type": "string" + }, + "cache_storage": { + "$ref": "#/definitions/StorageOptions" + }, + "close_connections": { + "type": "boolean" + }, + "proxy_close_connections": { + "type": "boolean" + }, + "close_idle_connections": { + "type": "boolean" + }, + "control_api_hostname": { + "type": "string" + }, + "control_api_port": { + "type": "integer" + }, + "coprocess_options": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "coprocess_grpc_server": { + "type": "string" + }, + "enable_coprocess": { + "type": "boolean" + }, + "python_path_prefix": { + "type": "string" + } + } + }, + "db_app_conf_options": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "connection_string": { + "type": "string" + }, + "node_is_segmented": { + "type": "boolean" + }, + "tags": { + "type": ["array", "null"], + "items": { + "type": "string" + } + } + } + }, + "version_header":{ + "type": "string" + }, + "disable_dashboard_zeroconf": { + "type": "boolean" + }, + "disable_virtual_path_blobs": { + "type": "boolean" + }, + "drl_notification_frequency": { + "type": "integer" + }, + "enable_analytics": { + "type": "boolean" + }, + "enable_api_segregation": { + "type": "boolean" + }, + "enable_bundle_downloader": { + "type": "boolean" + }, + "enable_custom_domains": { + "type": "boolean" + }, + "enable_jsvm": { + "type": "boolean" + }, + "jsvm_timeout": { + "type": "integer" + }, + "enable_non_transactional_rate_limiter": { + "type": "boolean" + }, + "enable_redis_rolling_limiter": { + "type": "boolean" + }, + "enable_sentinel_rate_limiter": { + "type": "boolean" + }, + "enable_separate_cache_store": { + "type": "boolean" + }, + "enforce_org_data_age": { + "type": "boolean" + }, + "enforce_org_data_detail_logging": { + "type": "boolean" + }, + "enforce_org_quotas": { + "type": "boolean" + }, + "event_handlers": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "events": { + "type": ["object", "null"], + "additionalProperties": false + } + } + }, + "event_trigers_defunct": { + "type": ["array", "null"] + }, + "event_triggers_defunct": { + "type": [ + "array", + "null" + ] + }, + "experimental_process_org_off_thread": { + "type": "boolean" + }, + "force_global_session_lifetime": { + "type": "boolean" + }, + "global_session_lifetime": { + "type": "integer" + }, + "graylog_network_addr": { + "type": "string" + }, + "hash_keys": { + "type": "boolean" + }, + "hash_key_function": { + "type": "string", + "enum": ["", "murmur32", "murmur64", "murmur128", "sha256"] + }, + "health_check": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "enable_health_checks": { + "type": "boolean" + }, + "health_check_value_timeouts": { + "type": "integer" + } + } + }, + "hide_generator_header": { + "type": "boolean" + }, + "hostname": { + "type": "string" + }, + "http_server_options": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "certificates": { + "type": ["array", "null"], + "items": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "domain_name": { + "type": "string" + }, + "cert_file": { + "type": "string" + }, + "key_file": { + "type": "string" + } + } + } + }, + "enable_websockets": { + "type": "boolean" + }, + "flush_interval": { + "type": "integer" + }, + "min_version": { + "type": "integer" + }, + "override_defaults": { + "type": "boolean" + }, + "read_timeout": { + "type": "integer" + }, + "server_name": { + "type": "string" + }, + "skip_url_cleaning": { + "type": "boolean" + }, + "skip_target_path_escaping": { + "type": "boolean" + }, + "ssl_insecure_skip_verify": { + "type": "boolean" + }, + "use_ssl": { + "type": "boolean" + }, + "use_ssl_le": { + "type": "boolean" + }, + "enable_http2": { + "type": "boolean" + }, + "write_timeout": { + "type": "integer" + }, + "ssl_certificates": { + "type": ["array", "null"], + "items": { + "type": "string" + } + }, + "ssl_ciphers":{ + "type": ["array", "null"], + "items": { + "type": "string" + } + } + } + }, + "legacy_enable_allowance_countdown": { + "type": "boolean" + }, + "listen_address": { + "type": "string", + "format": "host-no-port" + }, + "listen_port": { + "type": "integer" + }, + "local_session_cache": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "cached_session_eviction": { + "type": "integer" + }, + "cached_session_timeout": { + "type": "integer" + }, + "disable_cached_session_state": { + "type": "boolean" + } + } + }, + "log_level": { + "type": "string", + "enum": ["", "debug", "info", "warn", "error"] + }, + "logstash_network_addr": { + "type": "string" + }, + "logstash_transport": { + "type": "string" + }, + "management_node": { + "type": "boolean" + }, + "max_idle_connections_per_host": { + "type": "integer" + }, + "max_idle_connections": { + "type": "integer" + }, + "max_conn_time": { + "type": "integer" + }, + "middleware_path": { + "type": "string", + "format": "path" + }, + "monitor": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "configuration": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "event_timeout": { + "type": "integer" + }, + "header_map": { + "type": ["array", "null"] + }, + "method": { + "type": "string" + }, + "target_path": { + "type": "string" + }, + "template_path": { + "type": "string", + "format": "path" + } + } + }, + "enable_trigger_monitors": { + "type": "boolean" + }, + "global_trigger_limit": { + "type": "integer" + }, + "monitor_org_keys": { + "type": "boolean" + }, + "monitor_user_keys": { + "type": "boolean" + } + } + }, + "node_secret": { + "type": "string" + }, + "oauth_redirect_uri_separator": { + "type": "string" + }, + "oauth_refresh_token_expire": { + "type": "integer" + }, + "oauth_token_expire": { + "type": "integer" + }, + "oauth_token_expired_retain_period": { + "type": "integer" + }, + "optimisations_use_async_session_write": { + "type": "boolean" + }, + "session_update_pool_size":{ + "type": "integer" + }, + "session_update_buffer_size":{ + "type": "integer" + }, + "pid_file_location": { + "type": "string" + }, + "policies": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "allow_explicit_policy_id": { + "type": "boolean" + }, + "policy_connection_string": { + "type": "string" + }, + "policy_record_name": { + "type": "string" + }, + "policy_source": { + "type": "string", + "enum": ["", "service", "rpc"] + } + } + }, + "proxy_default_timeout": { + "type": "integer" + }, + "proxy_enable_http2": { + "type": "boolean" + }, + "proxy_ssl_insecure_skip_verify": { + "type": "boolean" + }, + "proxy_ssl_min_version": { + "type": "integer" + }, + "proxy_ssl_ciphers": { + "type": ["array", "null"], + "items": { + "type": "string" + } + }, + "public_key_path": { + "type": "string", + "format": "path" + }, + "reload_wait_time": { + "type": "integer" + }, + "secret": { + "type": "string" + }, + "sentry_code": { + "type": "string" + }, + "service_discovery": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "default_cache_timeout": { + "type": "integer" + } + } + }, + "slave_options": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "api_key": { + "type": "string" + }, + "bind_to_slugs": { + "type": "boolean" + }, + "call_timeout": { + "type": "integer" + }, + "connection_string": { + "type": "string" + }, + "disable_keyspace_sync": { + "type": "boolean" + }, + "enable_rpc_cache": { + "type": "boolean" + }, + "group_id": { + "type": "string" + }, + "ping_timeout": { + "type": "integer" + }, + "rpc_key": { + "type": "string" + }, + "ssl_insecure_skip_verify": { + "type": "boolean" + }, + "use_rpc": { + "type": "boolean" + }, + "use_ssl": { + "type": "boolean" + }, + "rpc_pool_size": { + "type": "integer" + } + } + }, + "statsd_connection_string": { + "type": "string" + }, + "statsd_prefix": { + "type": "string" + }, + "storage": { + "$ref": "#/definitions/StorageOptions" + }, + "suppress_default_org_store": { + "type": "boolean" + }, + "suppress_redis_signal_reload": { + "type": "boolean" + }, + "syslog_network_addr": { + "type": "string" + }, + "syslog_transport": { + "type": "string" + }, + "template_path": { + "type": "string", + "format": "path" + }, + "tyk_js_path": { + "type": "string", + "format": "path" + }, + "uptime_tests": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "config": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "checker_pool_size": { + "type": "integer" + }, + "enable_uptime_analytics": { + "type": "boolean" + }, + "failure_trigger_sample_size": { + "type": "integer" + }, + "time_wait": { + "type": "integer" + } + } + }, + "disable": { + "type": "boolean" + } + } + }, + "use_db_app_configs": { + "type": "boolean" + }, + "use_graylog": { + "type": "boolean" + }, + "use_logstash": { + "type": "boolean" + }, + "use_redis_log": { + "type": "boolean" + }, + "use_sentry": { + "type": "boolean" + }, + "use_syslog": { + "type": "boolean" + }, + "security": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "private_certificate_encoding_secret": { + "type": "string" + }, + "control_api_use_mutual_tls": { + "type": "boolean" + }, + "pinned_public_keys": { + "type": ["array", "null"], + "items": { + "type": "object" + } + }, + "certificates": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "upstream": { + "type": ["object", "null"] + }, + "apis": { + "type": ["array", "null"], + "items": { + "type": "string" + } + }, + "control_api": { + "type": ["array", "null"], + "items": { + "type": "string" + } + }, + "dashboard_api": { + "type": ["array", "null"], + "items": { + "type": "string" + } + }, + "mdcb_api": { + "type": ["array", "null"], + "items": { + "type": "string" + } + } + } + } + } + }, + "enable_key_logging": { + "type": "boolean" + }, + "newrelic": { + "type": ["object", "null"], + "additionalProperties": false, + "properties": { + "app_name": { + "type": "string" + }, + "license_key": { + "type": "string" + } + } + }, + "enable_hashed_keys_listing": { + "type": "boolean" + }, + "min_token_length": { + "type": "integer" + }, + "disable_regexp_cache": { + "type": "boolean" + }, + "regexp_cache_expire": { + "type": "integer" + }, + "proxy_ssl_disable_renegotiation": { + "type": "boolean" + }, + "health_check_endpoint_name": { + "type": "string" + }, + "enable_http_profiler": { + "type": "boolean" + } +} +}` diff --git a/cli/linter/schema.json b/cli/linter/schema.json index feb7349fa6a8..f0a15d33360e 100644 --- a/cli/linter/schema.json +++ b/cli/linter/schema.json @@ -532,27 +532,6 @@ "error" ] }, - "enable_http_profiler": { - "type": "boolean" - }, - "tracing": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string" - }, - "enabled": { - "type": "boolean" - }, - "options": { - "type": [ - "object", - "null" - ] - } - } - }, "logstash_network_addr": { "type": "string" }, @@ -961,9 +940,6 @@ }, "health_check_endpoint_name": { "type": "string" - }, - "enable_http_profiler": { - "type": "boolean" } } } \ No newline at end of file diff --git a/config/config.go b/config/config.go index f5dcde41204b..cf3a1441e8c0 100644 --- a/config/config.go +++ b/config/config.go @@ -6,8 +6,6 @@ import ( "io" "io/ioutil" "os" - "path/filepath" - "runtime" "sync" "sync/atomic" @@ -18,50 +16,10 @@ import ( "github.com/TykTechnologies/tyk/regexp" ) -type IPsHandleStrategy string - -var ( - log = logger.Get() - global atomic.Value - globalMu sync.Mutex - - Default = Config{ - ListenPort: 8080, - Secret: "352d20ee67be67f6340b4c0605b044b7", - TemplatePath: "templates", - MiddlewarePath: "middleware", - AppPath: "apps/", - Storage: StorageOptionsConf{ - Type: "redis", - Host: "localhost", - MaxIdle: 100, - Port: 6379, - }, - AnalyticsConfig: AnalyticsConfigConfig{ - IgnoredIPs: make([]string, 0), - }, - DnsCache: DnsCacheConfig{ - Enabled: false, - TTL: dnsCacheDefaultTtl, - CheckInterval: dnsCacheDefaultCheckInterval, - MultipleIPsHandleStrategy: NoCacheStrategy, - }, - } -) - -const ( - envPrefix = "TYK_GW" - - dnsCacheDefaultTtl = 3600 - dnsCacheDefaultCheckInterval = 60 +var log = logger.Get() - PickFirstStrategy IPsHandleStrategy = "pick_first" - RandomStrategy IPsHandleStrategy = "random" - NoCacheStrategy IPsHandleStrategy = "no_cache" - - DefaultDashPolicySource = "service" - DefaultDashPolicyRecordName = "tyk_policies" -) +var global atomic.Value +var globalMu sync.Mutex type PoliciesConfig struct { PolicySource string `json:"policy_source"` @@ -124,13 +82,6 @@ type HealthCheckConfig struct { HealthCheckValueTimeout int64 `json:"health_check_value_timeouts"` } -type DnsCacheConfig struct { - Enabled bool `json:"enabled"` - TTL int64 `json:"ttl"` - CheckInterval int64 `json:"-" ignored:"true"` //controls cache cleanup interval. By convention shouldn't be exposed to config or env_variable_setup - MultipleIPsHandleStrategy IPsHandleStrategy `json:"multiple_ips_handle_strategy"` -} - type MonitorConfig struct { EnableTriggerMonitors bool `json:"enable_trigger_monitors"` Config WebHookHandlerConf `json:"configuration"` @@ -175,7 +126,6 @@ type HttpServerOptionsConfig struct { WriteTimeout int `json:"write_timeout"` UseSSL bool `json:"use_ssl"` UseLE_SSL bool `json:"use_ssl_le"` - EnableHttp2 bool `json:"enable_http2"` SSLInsecureSkipVerify bool `json:"ssl_insecure_skip_verify"` EnableWebSockets bool `json:"enable_websockets"` Certificates []CertData `json:"certificates"` @@ -237,22 +187,6 @@ type NewRelicConfig struct { LicenseKey string `json:"license_key"` } -type Tracer struct { - // The name of the tracer to initialize. For instance appdash, to use appdash - // tracer - Name string `json:"name"` - - // If true then this tracer will be activated and all tracing data will be sent - // to this tracer.NoOp tracer is used otherwise which collects traces but - // discard them. - Enabled bool `json:"enabled"` - - // Key value pairs used to initialize the tracer. These are tracer specific, - // each tracer requires different options to operate. Please see trace package - // for options required by supported tracer implementation. - Options map[string]interface{} `json:"options"` -} - // Config is the configuration object used by tyk to set up various parameters. type Config struct { // OriginalPath is the path to the config file that was read. If @@ -260,144 +194,110 @@ type Config struct { // was written. OriginalPath string `json:"-"` - HostName string `json:"hostname"` - ListenAddress string `json:"listen_address"` - ListenPort int `json:"listen_port"` - ControlAPIHostname string `json:"control_api_hostname"` - ControlAPIPort int `json:"control_api_port"` - Secret string `json:"secret"` - NodeSecret string `json:"node_secret"` - PIDFileLocation string `json:"pid_file_location"` - AllowInsecureConfigs bool `json:"allow_insecure_configs"` - PublicKeyPath string `json:"public_key_path"` - AllowRemoteConfig bool `bson:"allow_remote_config" json:"allow_remote_config"` - Security SecurityConfig `json:"security"` - HttpServerOptions HttpServerOptionsConfig `json:"http_server_options"` - ReloadWaitTime int `bson:"reload_wait_time" json:"reload_wait_time"` - VersionHeader string `json:"version_header"` - UseAsyncSessionWrite bool `json:"optimisations_use_async_session_write"` - SuppressRedisSignalReload bool `json:"suppress_redis_signal_reload"` - - // Gateway Security Policies - HashKeys bool `json:"hash_keys"` - HashKeyFunction string `json:"hash_key_function"` - EnableHashedKeysListing bool `json:"enable_hashed_keys_listing"` - MinTokenLength int `json:"min_token_length"` - EnableAPISegregation bool `json:"enable_api_segregation"` - TemplatePath string `json:"template_path"` - Policies PoliciesConfig `json:"policies"` - - // CE Configurations - AppPath string `json:"app_path"` - - // Dashboard Configurations - UseDBAppConfigs bool `json:"use_db_app_configs"` - DBAppConfOptions DBAppConfOptionsConfig `json:"db_app_conf_options"` - Storage StorageOptionsConf `json:"storage"` - DisableDashboardZeroConf bool `json:"disable_dashboard_zeroconf"` - - // Slave Configurations - SlaveOptions SlaveOptionsConfig `json:"slave_options"` - ManagementNode bool `json:"management_node"` - AuthOverride AuthOverrideConf `json:"auth_override"` - - // Rate Limiting Strategy - EnableNonTransactionalRateLimiter bool `json:"enable_non_transactional_rate_limiter"` - EnableSentinelRateLimiter bool `json:"enable_sentinel_rate_limiter"` - EnableRedisRollingLimiter bool `json:"enable_redis_rolling_limiter"` - DRLNotificationFrequency int `json:"drl_notification_frequency"` - - // Organization configurations - EnforceOrgDataAge bool `json:"enforce_org_data_age"` - EnforceOrgDataDetailLogging bool `json:"enforce_org_data_detail_logging"` - EnforceOrgQuotas bool `json:"enforce_org_quotas"` - ExperimentalProcessOrgOffThread bool `json:"experimental_process_org_off_thread"` - Monitor MonitorConfig `json:"monitor"` - - // Client-Gateway Configuration - MaxIdleConns int `bson:"max_idle_connections" json:"max_idle_connections"` - MaxIdleConnsPerHost int `bson:"max_idle_connections_per_host" json:"max_idle_connections_per_host"` - MaxConnTime int64 `json:"max_conn_time"` - CloseIdleConnections bool `json:"close_idle_connections"` - CloseConnections bool `json:"close_connections"` - EnableCustomDomains bool `json:"enable_custom_domains"` - // If AllowMasterKeys is set to true, session objects (key definitions) that do not have explicit access rights set - // will be allowed by Tyk. This means that keys that are created have access to ALL APIs, which in many cases is - // unwanted behaviour unless you are sure about what you are doing. - AllowMasterKeys bool `json:"allow_master_keys"` - - // Gateway-Service Configuration - ServiceDiscovery ServiceDiscoveryConf `json:"service_discovery"` - ProxySSLInsecureSkipVerify bool `json:"proxy_ssl_insecure_skip_verify"` - ProxyEnableHttp2 bool `json:"proxy_enable_http2"` - ProxySSLMinVersion uint16 `json:"proxy_ssl_min_version"` - ProxySSLCipherSuites []string `json:"proxy_ssl_ciphers"` - ProxyDefaultTimeout float64 `json:"proxy_default_timeout"` - ProxySSLDisableRenegotiation bool `json:"proxy_ssl_disable_renegotiation"` - ProxyCloseConnections bool `json:"proxy_close_connections"` - UptimeTests UptimeTestsConfig `json:"uptime_tests"` - HealthCheck HealthCheckConfig `json:"health_check"` - OauthRefreshExpire int64 `json:"oauth_refresh_token_expire"` - OauthTokenExpire int32 `json:"oauth_token_expire"` - OauthTokenExpiredRetainPeriod int32 `json:"oauth_token_expired_retain_period"` - OauthRedirectUriSeparator string `json:"oauth_redirect_uri_separator"` - EnableKeyLogging bool `json:"enable_key_logging"` - - // Proxy analytics configuration - EnableAnalytics bool `json:"enable_analytics"` - AnalyticsConfig AnalyticsConfigConfig `json:"analytics_config"` - - // Cache - DnsCache DnsCacheConfig `json:"dns_cache"` - DisableRegexpCache bool `json:"disable_regexp_cache"` - RegexpCacheExpire int32 `json:"regexp_cache_expire"` - LocalSessionCache LocalSessionCacheConf `json:"local_session_cache"` - EnableSeperateCacheStore bool `json:"enable_separate_cache_store"` - CacheStorage StorageOptionsConf `json:"cache_storage"` - - // Middleware/Plugin Configuration - EnableBundleDownloader bool `bson:"enable_bundle_downloader" json:"enable_bundle_downloader"` - BundleBaseURL string `bson:"bundle_base_url" json:"bundle_base_url"` - EnableJSVM bool `json:"enable_jsvm"` - JSVMTimeout int `json:"jsvm_timeout"` - DisableVirtualPathBlobs bool `json:"disable_virtual_path_blobs"` - TykJSPath string `json:"tyk_js_path"` - MiddlewarePath string `json:"middleware_path"` - CoProcessOptions CoProcessConfig `json:"coprocess_options"` - - // Monitoring, Logging & Profiling - LogLevel string `json:"log_level"` - HealthCheckEndpointName string `json:"health_check_endpoint_name"` - Tracer Tracer `json:"tracing"` - NewRelic NewRelicConfig `json:"newrelic"` - HTTPProfile bool `json:"enable_http_profiler"` - UseRedisLog bool `json:"use_redis_log"` - SentryCode string `json:"sentry_code"` - UseSentry bool `json:"use_sentry"` - UseSyslog bool `json:"use_syslog"` - UseGraylog bool `json:"use_graylog"` - UseLogstash bool `json:"use_logstash"` - GraylogNetworkAddr string `json:"graylog_network_addr"` - LogstashNetworkAddr string `json:"logstash_network_addr"` - SyslogTransport string `json:"syslog_transport"` - LogstashTransport string `json:"logstash_transport"` - SyslogNetworkAddr string `json:"syslog_network_addr"` - StatsdConnectionString string `json:"statsd_connection_string"` - StatsdPrefix string `json:"statsd_prefix"` - - // Event System - EventHandlers apidef.EventHandlerMetaConfig `json:"event_handlers"` - EventTriggers map[apidef.TykEvent][]TykEventHandler `json:"event_trigers_defunct"` // Deprecated: Config.GetEventTriggers instead. - EventTriggersDefunct map[apidef.TykEvent][]TykEventHandler `json:"event_triggers_defunct"` // Deprecated: Config.GetEventTriggers instead. - - // TODO: These config options are not documented - What do they do? - SessionUpdatePoolSize int `json:"session_update_pool_size"` - SessionUpdateBufferSize int `json:"session_update_buffer_size"` - SupressDefaultOrgStore bool `json:"suppress_default_org_store"` - LegacyEnableAllowanceCountdown bool `bson:"legacy_enable_allowance_countdown" json:"legacy_enable_allowance_countdown"` - GlobalSessionLifetime int64 `bson:"global_session_lifetime" json:"global_session_lifetime"` - ForceGlobalSessionLifetime bool `bson:"force_global_session_lifetime" json:"force_global_session_lifetime"` - HideGeneratorHeader bool `json:"hide_generator_header"` + ListenAddress string `json:"listen_address"` + ListenPort int `json:"listen_port"` + Secret string `json:"secret"` + NodeSecret string `json:"node_secret"` + TemplatePath string `json:"template_path"` + TykJSPath string `json:"tyk_js_path"` + MiddlewarePath string `json:"middleware_path"` + Policies PoliciesConfig `json:"policies"` + UseDBAppConfigs bool `json:"use_db_app_configs"` + DBAppConfOptions DBAppConfOptionsConfig `json:"db_app_conf_options"` + DisableDashboardZeroConf bool `json:"disable_dashboard_zeroconf"` + AppPath string `json:"app_path"` + Storage StorageOptionsConf `json:"storage"` + EnableSeperateCacheStore bool `json:"enable_separate_cache_store"` + CacheStorage StorageOptionsConf `json:"cache_storage"` + EnableAnalytics bool `json:"enable_analytics"` + AnalyticsConfig AnalyticsConfigConfig `json:"analytics_config"` + HealthCheck HealthCheckConfig `json:"health_check"` + UseAsyncSessionWrite bool `json:"optimisations_use_async_session_write"` + SessionUpdatePoolSize int `json:"session_update_pool_size"` + SessionUpdateBufferSize int `json:"session_update_buffer_size"` + AllowMasterKeys bool `json:"allow_master_keys"` + HashKeys bool `json:"hash_keys"` + HashKeyFunction string `json:"hash_key_function"` + SuppressRedisSignalReload bool `json:"suppress_redis_signal_reload"` + SupressDefaultOrgStore bool `json:"suppress_default_org_store"` + UseRedisLog bool `json:"use_redis_log"` + SentryCode string `json:"sentry_code"` + UseSentry bool `json:"use_sentry"` + UseSyslog bool `json:"use_syslog"` + UseGraylog bool `json:"use_graylog"` + UseLogstash bool `json:"use_logstash"` + GraylogNetworkAddr string `json:"graylog_network_addr"` + LogstashNetworkAddr string `json:"logstash_network_addr"` + SyslogTransport string `json:"syslog_transport"` + LogstashTransport string `json:"logstash_transport"` + SyslogNetworkAddr string `json:"syslog_network_addr"` + StatsdConnectionString string `json:"statsd_connection_string"` + StatsdPrefix string `json:"statsd_prefix"` + EnforceOrgDataAge bool `json:"enforce_org_data_age"` + EnforceOrgDataDetailLogging bool `json:"enforce_org_data_detail_logging"` + EnforceOrgQuotas bool `json:"enforce_org_quotas"` + ExperimentalProcessOrgOffThread bool `json:"experimental_process_org_off_thread"` + EnableNonTransactionalRateLimiter bool `json:"enable_non_transactional_rate_limiter"` + EnableSentinelRateLimiter bool `json:"enable_sentinel_rate_limiter"` + EnableRedisRollingLimiter bool `json:"enable_redis_rolling_limiter"` + ManagementNode bool `json:"management_node"` + Monitor MonitorConfig `json:"monitor"` + OauthRefreshExpire int64 `json:"oauth_refresh_token_expire"` + OauthTokenExpire int32 `json:"oauth_token_expire"` + OauthTokenExpiredRetainPeriod int32 `json:"oauth_token_expired_retain_period"` + OauthRedirectUriSeparator string `json:"oauth_redirect_uri_separator"` + SlaveOptions SlaveOptionsConfig `json:"slave_options"` + DisableVirtualPathBlobs bool `json:"disable_virtual_path_blobs"` + LocalSessionCache LocalSessionCacheConf `json:"local_session_cache"` + HttpServerOptions HttpServerOptionsConfig `json:"http_server_options"` + ServiceDiscovery ServiceDiscoveryConf `json:"service_discovery"` + ProxyCloseConnections bool `json:"proxy_close_connections"` + CloseConnections bool `json:"close_connections"` + AuthOverride AuthOverrideConf `json:"auth_override"` + UptimeTests UptimeTestsConfig `json:"uptime_tests"` + HostName string `json:"hostname"` + EnableAPISegregation bool `json:"enable_api_segregation"` + ControlAPIHostname string `json:"control_api_hostname"` + ControlAPIPort int `json:"control_api_port"` + EnableCustomDomains bool `json:"enable_custom_domains"` + EnableJSVM bool `json:"enable_jsvm"` + JSVMTimeout int `json:"jsvm_timeout"` + CoProcessOptions CoProcessConfig `json:"coprocess_options"` + HideGeneratorHeader bool `json:"hide_generator_header"` + EventHandlers apidef.EventHandlerMetaConfig `json:"event_handlers"` + EventTriggers map[apidef.TykEvent][]TykEventHandler `json:"event_trigers_defunct"` // Deprecated: Config.GetEventTriggers instead. + EventTriggersDefunct map[apidef.TykEvent][]TykEventHandler `json:"event_triggers_defunct"` // Deprecated: Config.GetEventTriggers instead. + PIDFileLocation string `json:"pid_file_location"` + AllowInsecureConfigs bool `json:"allow_insecure_configs"` + PublicKeyPath string `json:"public_key_path"` + CloseIdleConnections bool `json:"close_idle_connections"` + DRLNotificationFrequency int `json:"drl_notification_frequency"` + GlobalSessionLifetime int64 `bson:"global_session_lifetime" json:"global_session_lifetime"` + ForceGlobalSessionLifetime bool `bson:"force_global_session_lifetime" json:"force_global_session_lifetime"` + BundleBaseURL string `bson:"bundle_base_url" json:"bundle_base_url"` + EnableBundleDownloader bool `bson:"enable_bundle_downloader" json:"enable_bundle_downloader"` + AllowRemoteConfig bool `bson:"allow_remote_config" json:"allow_remote_config"` + LegacyEnableAllowanceCountdown bool `bson:"legacy_enable_allowance_countdown" json:"legacy_enable_allowance_countdown"` + MaxIdleConns int `bson:"max_idle_connections" json:"max_idle_connections"` + MaxIdleConnsPerHost int `bson:"max_idle_connections_per_host" json:"max_idle_connections_per_host"` + MaxConnTime int64 `json:"max_conn_time"` + ReloadWaitTime int `bson:"reload_wait_time" json:"reload_wait_time"` + ProxySSLInsecureSkipVerify bool `json:"proxy_ssl_insecure_skip_verify"` + ProxySSLMinVersion uint16 `json:"proxy_ssl_min_version"` + ProxySSLCipherSuites []string `json:"proxy_ssl_ciphers"` + ProxyDefaultTimeout int `json:"proxy_default_timeout"` + ProxySSLDisableRenegotiation bool `json:"proxy_ssl_disable_renegotiation"` + LogLevel string `json:"log_level"` + HTTPProfile bool `json:"enable_http_profiler"` + Security SecurityConfig `json:"security"` + EnableKeyLogging bool `json:"enable_key_logging"` + NewRelic NewRelicConfig `json:"newrelic"` + VersionHeader string `json:"version_header"` + EnableHashedKeysListing bool `json:"enable_hashed_keys_listing"` + MinTokenLength int `json:"min_token_length"` + DisableRegexpCache bool `json:"disable_regexp_cache"` + RegexpCacheExpire int32 `json:"regexp_cache_expire"` + HealthCheckEndpointName string `json:"health_check_endpoint_name"` } // GetEventTriggers returns event triggers. There was a typo in the json tag. @@ -438,6 +338,26 @@ type TykEventHandler interface { HandleEvent(EventMessage) } +const envPrefix = "TYK_GW" +const defaultListenPort = 8080 + +var Default = Config{ + ListenPort: 8080, + Secret: "352d20ee67be67f6340b4c0605b044b7", + TemplatePath: "templates", + MiddlewarePath: "middleware", + AppPath: "apps/", + Storage: StorageOptionsConf{ + Type: "redis", + Host: "localhost", + MaxIdle: 100, + Port: 6379, + }, + AnalyticsConfig: AnalyticsConfigConfig{ + IgnoredIPs: make([]string, 0), + }, +} + func init() { SetGlobal(Config{}) } @@ -463,11 +383,6 @@ func WriteConf(path string, conf *Config) error { // writeDefault will set conf to the default config and write it to disk // in path, if the path is non-empty. func WriteDefault(path string, conf *Config) error { - _, b, _, _ := runtime.Caller(0) - configPath := filepath.Dir(b) - rootPath := filepath.Dir(configPath) - Default.TemplatePath = filepath.Join(rootPath, "templates") - *conf = Default if err := envconfig.Process(envPrefix, conf); err != nil { return err diff --git a/config/config_test.go b/config/config_test.go index 911f59debe02..bad145df7692 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,75 +1,32 @@ package config import ( - "fmt" "io/ioutil" "os" "path/filepath" - "reflect" "strings" "testing" "github.com/TykTechnologies/tyk/apidef" ) -func TestDefaultValueAndWriteDefaultConf(t *testing.T) { - cases := []struct { - FieldName string - EnvVarName string - FieldGetter func(*Config) interface{} - - defaultValue interface{} - expectedValue interface{} - }{ - { - "ListenPort", "TYK_GW_LISTENPORT", - func(c *Config) interface{} { return c.ListenPort }, - 8080, 9090, - }, - { - "DnsCacheEnabled", "TYK_GW_DNSCACHE_ENABLED", - func(c *Config) interface{} { return c.DnsCache.Enabled }, - false, true, - }, - { - "DnsCacheTTL", "TYK_GW_DNSCACHE_TTL", - func(c *Config) interface{} { return c.DnsCache.TTL }, - int64(3600), int64(300), - }, - { - "CheckInterval", "TYK_GW_DNSCACHE_CHECKINTERVAL", - func(c *Config) interface{} { return c.DnsCache.CheckInterval }, - int64(60), - int64(60), //CheckInterval shouldn't be configured from *.conf and env var - }, - { - "CheckMultipleIPsHandleStrategy", "TYK_GW_DNSCACHE_MULTIPLEIPSHANDLESTRATEGY", - func(c *Config) interface{} { return c.DnsCache.MultipleIPsHandleStrategy }, - NoCacheStrategy, - RandomStrategy, - }, - } - - for _, tc := range cases { - t.Run(tc.FieldName, func(t *testing.T) { - conf := &Config{} - os.Unsetenv(tc.EnvVarName) - defer os.Unsetenv(tc.EnvVarName) - if err := WriteDefault("", conf); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(tc.FieldGetter(conf), tc.defaultValue) { - t.Fatalf("Expected %v to be set to its default %v, but got %v", tc.FieldName, tc.defaultValue, tc.FieldGetter(conf)) - } - expectedValue := fmt.Sprint(tc.expectedValue) - os.Setenv(tc.EnvVarName, expectedValue) - if err := WriteDefault("", conf); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(tc.FieldGetter(conf), tc.expectedValue) { - t.Fatalf("Expected %s to be set to %v, but got %v", tc.FieldName, tc.expectedValue, tc.FieldGetter(conf)) - } - }) +func TestWriteDefaultConf(t *testing.T) { + conf := &Config{} + os.Unsetenv("TYK_GW_LISTENPORT") + defer os.Unsetenv("TYK_GW_LISTENPORT") + if err := WriteDefault("", conf); err != nil { + t.Fatal(err) + } + if conf.ListenPort != 8080 { + t.Fatalf("Expected ListenPort to be set to its default") + } + *conf = Config{} + os.Setenv("TYK_GW_LISTENPORT", "9090") + if err := WriteDefault("", conf); err != nil { + t.Fatal(err) + } + if conf.ListenPort != 9090 { + t.Fatalf("Expected ListenPort to be set to 9090") } } diff --git a/gateway/coprocess.go b/coprocess.go similarity index 84% rename from gateway/coprocess.go rename to coprocess.go index efff8b2dd2ca..f83285f768cf 100644 --- a/gateway/coprocess.go +++ b/coprocess.go @@ -1,13 +1,10 @@ // +build coprocess -package gateway +package main import ( "bytes" "encoding/json" - "net/url" - "strings" - "time" "unicode/utf8" "github.com/Sirupsen/logrus" @@ -27,8 +24,6 @@ var ( // GlobalDispatcher will be implemented by the current CoProcess driver. GlobalDispatcher coprocess.Dispatcher - - CoProcessName apidef.MiddlewareDriver ) // CoProcessMiddleware is the basic CP middleware struct. @@ -37,9 +32,6 @@ type CoProcessMiddleware struct { HookType coprocess.HookType HookName string MiddlewareDriver apidef.MiddlewareDriver - RawBodyOnly bool - - successHandler *SuccessHandler } func (mw *CoProcessMiddleware) Name() string { @@ -53,13 +45,12 @@ func CreateCoProcessMiddleware(hookName string, hookType coprocess.HookType, mwD HookType: hookType, HookName: hookName, MiddlewareDriver: mwDriver, - successHandler: &SuccessHandler{baseMid}, } return createMiddleware(dMiddleware) } -func DoCoprocessReload() { +func doCoprocessReload() { if GlobalDispatcher != nil { log.WithFields(logrus.Fields{ "prefix": "coprocess", @@ -76,7 +67,7 @@ type CoProcessor struct { } // ObjectFromRequest constructs a CoProcessObject from a given http.Request. -func (c *CoProcessor) ObjectFromRequest(r *http.Request) (*coprocess.Object, error) { +func (c *CoProcessor) ObjectFromRequest(r *http.Request) *coprocess.Object { headers := ProtoMap(r.Header) host := r.Host @@ -86,15 +77,12 @@ func (c *CoProcessor) ObjectFromRequest(r *http.Request) (*coprocess.Object, err if host != "" { headers["Host"] = host } - scheme := "http" - if r.TLS != nil { - scheme = "https" - } + miniRequestObject := &coprocess.MiniRequestObject{ Headers: headers, SetHeaders: map[string]string{}, DeleteHeaders: []string{}, - Url: r.URL.String(), + Url: r.URL.Path, Params: ProtoMap(r.URL.Query()), AddParams: map[string]string{}, ExtendedParams: ProtoMap(nil), @@ -104,17 +92,13 @@ func (c *CoProcessor) ObjectFromRequest(r *http.Request) (*coprocess.Object, err }, Method: r.Method, RequestUri: r.RequestURI, - Scheme: scheme, + Scheme: r.URL.Scheme, } if r.Body != nil { defer r.Body.Close() - var err error - miniRequestObject.RawBody, err = ioutil.ReadAll(r.Body) - if err != nil { - return nil, err - } - if utf8.Valid(miniRequestObject.RawBody) && !c.Middleware.RawBodyOnly { + miniRequestObject.RawBody, _ = ioutil.ReadAll(r.Body) + if utf8.Valid(miniRequestObject.RawBody) { miniRequestObject.Body = string(miniRequestObject.RawBody) } } @@ -137,11 +121,7 @@ func (c *CoProcessor) ObjectFromRequest(r *http.Request) (*coprocess.Object, err if c.Middleware != nil { configDataAsJson := []byte("{}") if len(c.Middleware.Spec.ConfigData) > 0 { - var err error - configDataAsJson, err = json.Marshal(c.Middleware.Spec.ConfigData) - if err != nil { - return nil, err - } + configDataAsJson, _ = json.Marshal(c.Middleware.Spec.ConfigData) } object.Spec = map[string]string{ @@ -160,7 +140,7 @@ func (c *CoProcessor) ObjectFromRequest(r *http.Request) (*coprocess.Object, err } } - return object, nil + return object } // ObjectPostProcess does CoProcessObject post-processing (adding/removing headers or params, etc.). @@ -185,7 +165,7 @@ func (c *CoProcessor) ObjectPostProcess(object *coprocess.Object, r *http.Reques values.Set(p, v) } - r.URL, _ = url.ParseRequestURI(object.Request.Url) + r.URL.Path = object.Request.Url r.URL.RawQuery = values.Encode() } @@ -222,7 +202,6 @@ func (m *CoProcessMiddleware) EnabledForSpec() bool { log.WithFields(logrus.Fields{ "prefix": "coprocess", }).Debug("Enabling CP middleware.") - m.successHandler = &SuccessHandler{m.BaseMiddleware} return true } @@ -277,16 +256,9 @@ func (m *CoProcessMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Requ // HookType: coprocess.PreHook, } - object, err := coProcessor.ObjectFromRequest(r) - if err != nil { - logger.WithError(err).Error("Failed to build request object") - return errors.New("Middleware error"), 500 - } + object := coProcessor.ObjectFromRequest(r) - t1 := time.Now() returnObject, err := coProcessor.Dispatch(object) - t2 := time.Now() - if err != nil { logger.WithError(err).Error("Dispatch error") if m.HookType == coprocess.HookType_CustomKeyCheck { @@ -296,9 +268,6 @@ func (m *CoProcessMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Requ } } - ms := float64(t2.UnixNano()-t1.UnixNano()) * 0.000001 - m.logger.WithField("ms", ms).Debug("gRPC request processing took") - coProcessor.ObjectPostProcess(returnObject, r) var token string @@ -344,18 +313,6 @@ func (m *CoProcessMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Requ } w.WriteHeader(int(returnObject.Request.ReturnOverrides.ResponseCode)) w.Write([]byte(returnObject.Request.ReturnOverrides.ResponseError)) - - // Record analytics data: - res := new(http.Response) - res.Proto = "HTTP/1.0" - res.ProtoMajor = 1 - res.ProtoMinor = 0 - res.StatusCode = int(returnObject.Request.ReturnOverrides.ResponseCode) - res.Body = nopCloser{ - ReadSeeker: strings.NewReader(returnObject.Request.ReturnOverrides.ResponseError), - } - res.ContentLength = int64(len(returnObject.Request.ReturnOverrides.ResponseError)) - m.successHandler.RecordHit(r, int64(ms), int(returnObject.Request.ReturnOverrides.ResponseCode), res) return nil, mwStatusRespond } diff --git a/coprocess/coprocess_common.pb.go b/coprocess/coprocess_common.pb.go index 0192e9fd93c0..8c6a05d45df6 100644 --- a/coprocess/coprocess_common.pb.go +++ b/coprocess/coprocess_common.pb.go @@ -1,14 +1,35 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: coprocess_common.proto +/* +Package coprocess is a generated protocol buffer package. + +It is generated from these files: + coprocess_common.proto + coprocess_mini_request_object.proto + coprocess_object.proto + coprocess_return_overrides.proto + coprocess_session_state.proto + +It has these top-level messages: + StringSlice + MiniRequestObject + Object + Event + EventReply + ReturnOverrides + AccessSpec + AccessDefinition + BasicAuthData + JWTData + Monitor + SessionState +*/ package coprocess -import ( - fmt "fmt" - math "math" - - proto "github.com/golang/protobuf/proto" -) +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -19,7 +40,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type HookType int32 @@ -38,7 +59,6 @@ var HookType_name = map[int32]string{ 3: "PostKeyAuth", 4: "CustomKeyCheck", } - var HookType_value = map[string]int32{ "Unknown": 0, "Pre": 1, @@ -50,42 +70,16 @@ var HookType_value = map[string]int32{ func (x HookType) String() string { return proto.EnumName(HookType_name, int32(x)) } - -func (HookType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ad9b17a8ddc1be7d, []int{0} -} +func (HookType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } type StringSlice struct { - Items []string `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Items []string `protobuf:"bytes,1,rep,name=items" json:"items,omitempty"` } -func (m *StringSlice) Reset() { *m = StringSlice{} } -func (m *StringSlice) String() string { return proto.CompactTextString(m) } -func (*StringSlice) ProtoMessage() {} -func (*StringSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_ad9b17a8ddc1be7d, []int{0} -} - -func (m *StringSlice) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StringSlice.Unmarshal(m, b) -} -func (m *StringSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StringSlice.Marshal(b, m, deterministic) -} -func (m *StringSlice) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringSlice.Merge(m, src) -} -func (m *StringSlice) XXX_Size() int { - return xxx_messageInfo_StringSlice.Size(m) -} -func (m *StringSlice) XXX_DiscardUnknown() { - xxx_messageInfo_StringSlice.DiscardUnknown(m) -} - -var xxx_messageInfo_StringSlice proto.InternalMessageInfo +func (m *StringSlice) Reset() { *m = StringSlice{} } +func (m *StringSlice) String() string { return proto.CompactTextString(m) } +func (*StringSlice) ProtoMessage() {} +func (*StringSlice) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *StringSlice) GetItems() []string { if m != nil { @@ -95,13 +89,13 @@ func (m *StringSlice) GetItems() []string { } func init() { - proto.RegisterEnum("coprocess.HookType", HookType_name, HookType_value) proto.RegisterType((*StringSlice)(nil), "coprocess.StringSlice") + proto.RegisterEnum("coprocess.HookType", HookType_name, HookType_value) } -func init() { proto.RegisterFile("coprocess_common.proto", fileDescriptor_ad9b17a8ddc1be7d) } +func init() { proto.RegisterFile("coprocess_common.proto", fileDescriptor0) } -var fileDescriptor_ad9b17a8ddc1be7d = []byte{ +var fileDescriptor0 = []byte{ // 167 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8e, 0xb1, 0x0a, 0xc2, 0x30, 0x10, 0x40, 0xad, 0xad, 0xb6, 0xbd, 0x82, 0x86, 0x43, 0xc4, 0x51, 0x74, 0x11, 0x07, 0x17, 0xbf, diff --git a/coprocess/coprocess_mini_request_object.pb.go b/coprocess/coprocess_mini_request_object.pb.go index 9efd8025aa1a..7c10ed664c57 100644 --- a/coprocess/coprocess_mini_request_object.pb.go +++ b/coprocess/coprocess_mini_request_object.pb.go @@ -3,68 +3,36 @@ package coprocess -import ( - fmt "fmt" - math "math" - - proto "github.com/golang/protobuf/proto" -) +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - type MiniRequestObject struct { - Headers map[string]string `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - SetHeaders map[string]string `protobuf:"bytes,2,rep,name=set_headers,json=setHeaders,proto3" json:"set_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - DeleteHeaders []string `protobuf:"bytes,3,rep,name=delete_headers,json=deleteHeaders,proto3" json:"delete_headers,omitempty"` - Body string `protobuf:"bytes,4,opt,name=body,proto3" json:"body,omitempty"` - Url string `protobuf:"bytes,5,opt,name=url,proto3" json:"url,omitempty"` - Params map[string]string `protobuf:"bytes,6,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - AddParams map[string]string `protobuf:"bytes,7,rep,name=add_params,json=addParams,proto3" json:"add_params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - ExtendedParams map[string]string `protobuf:"bytes,8,rep,name=extended_params,json=extendedParams,proto3" json:"extended_params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - DeleteParams []string `protobuf:"bytes,9,rep,name=delete_params,json=deleteParams,proto3" json:"delete_params,omitempty"` - ReturnOverrides *ReturnOverrides `protobuf:"bytes,10,opt,name=return_overrides,json=returnOverrides,proto3" json:"return_overrides,omitempty"` - Method string `protobuf:"bytes,11,opt,name=method,proto3" json:"method,omitempty"` - RequestUri string `protobuf:"bytes,12,opt,name=request_uri,json=requestUri,proto3" json:"request_uri,omitempty"` - Scheme string `protobuf:"bytes,13,opt,name=scheme,proto3" json:"scheme,omitempty"` - RawBody []byte `protobuf:"bytes,14,opt,name=raw_body,json=rawBody,proto3" json:"raw_body,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MiniRequestObject) Reset() { *m = MiniRequestObject{} } -func (m *MiniRequestObject) String() string { return proto.CompactTextString(m) } -func (*MiniRequestObject) ProtoMessage() {} -func (*MiniRequestObject) Descriptor() ([]byte, []int) { - return fileDescriptor_7fc17e485a5ab6a4, []int{0} -} - -func (m *MiniRequestObject) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MiniRequestObject.Unmarshal(m, b) -} -func (m *MiniRequestObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MiniRequestObject.Marshal(b, m, deterministic) -} -func (m *MiniRequestObject) XXX_Merge(src proto.Message) { - xxx_messageInfo_MiniRequestObject.Merge(m, src) -} -func (m *MiniRequestObject) XXX_Size() int { - return xxx_messageInfo_MiniRequestObject.Size(m) -} -func (m *MiniRequestObject) XXX_DiscardUnknown() { - xxx_messageInfo_MiniRequestObject.DiscardUnknown(m) -} - -var xxx_messageInfo_MiniRequestObject proto.InternalMessageInfo + Headers map[string]string `protobuf:"bytes,1,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + SetHeaders map[string]string `protobuf:"bytes,2,rep,name=set_headers,json=setHeaders" json:"set_headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + DeleteHeaders []string `protobuf:"bytes,3,rep,name=delete_headers,json=deleteHeaders" json:"delete_headers,omitempty"` + Body string `protobuf:"bytes,4,opt,name=body" json:"body,omitempty"` + Url string `protobuf:"bytes,5,opt,name=url" json:"url,omitempty"` + Params map[string]string `protobuf:"bytes,6,rep,name=params" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + AddParams map[string]string `protobuf:"bytes,7,rep,name=add_params,json=addParams" json:"add_params,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ExtendedParams map[string]string `protobuf:"bytes,8,rep,name=extended_params,json=extendedParams" json:"extended_params,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + DeleteParams []string `protobuf:"bytes,9,rep,name=delete_params,json=deleteParams" json:"delete_params,omitempty"` + ReturnOverrides *ReturnOverrides `protobuf:"bytes,10,opt,name=return_overrides,json=returnOverrides" json:"return_overrides,omitempty"` + Method string `protobuf:"bytes,11,opt,name=method" json:"method,omitempty"` + RequestUri string `protobuf:"bytes,12,opt,name=request_uri,json=requestUri" json:"request_uri,omitempty"` + Scheme string `protobuf:"bytes,13,opt,name=scheme" json:"scheme,omitempty"` + RawBody []byte `protobuf:"bytes,14,opt,name=raw_body,json=rawBody,proto3" json:"raw_body,omitempty"` +} + +func (m *MiniRequestObject) Reset() { *m = MiniRequestObject{} } +func (m *MiniRequestObject) String() string { return proto.CompactTextString(m) } +func (*MiniRequestObject) ProtoMessage() {} +func (*MiniRequestObject) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } func (m *MiniRequestObject) GetHeaders() map[string]string { if m != nil { @@ -166,18 +134,11 @@ func (m *MiniRequestObject) GetRawBody() []byte { func init() { proto.RegisterType((*MiniRequestObject)(nil), "coprocess.MiniRequestObject") - proto.RegisterMapType((map[string]string)(nil), "coprocess.MiniRequestObject.AddParamsEntry") - proto.RegisterMapType((map[string]string)(nil), "coprocess.MiniRequestObject.ExtendedParamsEntry") - proto.RegisterMapType((map[string]string)(nil), "coprocess.MiniRequestObject.HeadersEntry") - proto.RegisterMapType((map[string]string)(nil), "coprocess.MiniRequestObject.ParamsEntry") - proto.RegisterMapType((map[string]string)(nil), "coprocess.MiniRequestObject.SetHeadersEntry") } -func init() { - proto.RegisterFile("coprocess_mini_request_object.proto", fileDescriptor_7fc17e485a5ab6a4) -} +func init() { proto.RegisterFile("coprocess_mini_request_object.proto", fileDescriptor1) } -var fileDescriptor_7fc17e485a5ab6a4 = []byte{ +var fileDescriptor1 = []byte{ // 452 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0xd1, 0x8b, 0xd3, 0x40, 0x10, 0xc6, 0x49, 0x7b, 0xd7, 0x5e, 0x26, 0xbd, 0xf6, 0x5c, 0x45, 0xd6, 0xbe, 0x18, 0x3c, 0x84, diff --git a/coprocess/coprocess_object.pb.go b/coprocess/coprocess_object.pb.go index 22b08ad05868..a80d9de1ffbb 100644 --- a/coprocess/coprocess_object.pb.go +++ b/coprocess/coprocess_object.pb.go @@ -3,12 +3,12 @@ package coprocess -import ( - context "context" - fmt "fmt" - math "math" +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" - proto "github.com/golang/protobuf/proto" +import ( + context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) @@ -17,48 +17,19 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - type Object struct { - HookType HookType `protobuf:"varint,1,opt,name=hook_type,json=hookType,proto3,enum=coprocess.HookType" json:"hook_type,omitempty"` - HookName string `protobuf:"bytes,2,opt,name=hook_name,json=hookName,proto3" json:"hook_name,omitempty"` - Request *MiniRequestObject `protobuf:"bytes,3,opt,name=request,proto3" json:"request,omitempty"` - Session *SessionState `protobuf:"bytes,4,opt,name=session,proto3" json:"session,omitempty"` - Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Spec map[string]string `protobuf:"bytes,6,rep,name=spec,proto3" json:"spec,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Object) Reset() { *m = Object{} } -func (m *Object) String() string { return proto.CompactTextString(m) } -func (*Object) ProtoMessage() {} -func (*Object) Descriptor() ([]byte, []int) { - return fileDescriptor_72698a2223f86099, []int{0} -} - -func (m *Object) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Object.Unmarshal(m, b) -} -func (m *Object) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Object.Marshal(b, m, deterministic) -} -func (m *Object) XXX_Merge(src proto.Message) { - xxx_messageInfo_Object.Merge(m, src) -} -func (m *Object) XXX_Size() int { - return xxx_messageInfo_Object.Size(m) -} -func (m *Object) XXX_DiscardUnknown() { - xxx_messageInfo_Object.DiscardUnknown(m) + HookType HookType `protobuf:"varint,1,opt,name=hook_type,json=hookType,enum=coprocess.HookType" json:"hook_type,omitempty"` + HookName string `protobuf:"bytes,2,opt,name=hook_name,json=hookName" json:"hook_name,omitempty"` + Request *MiniRequestObject `protobuf:"bytes,3,opt,name=request" json:"request,omitempty"` + Session *SessionState `protobuf:"bytes,4,opt,name=session" json:"session,omitempty"` + Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Spec map[string]string `protobuf:"bytes,6,rep,name=spec" json:"spec,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` } -var xxx_messageInfo_Object proto.InternalMessageInfo +func (m *Object) Reset() { *m = Object{} } +func (m *Object) String() string { return proto.CompactTextString(m) } +func (*Object) ProtoMessage() {} +func (*Object) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } func (m *Object) GetHookType() HookType { if m != nil { @@ -103,36 +74,13 @@ func (m *Object) GetSpec() map[string]string { } type Event struct { - Payload string `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Payload string `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` } -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_72698a2223f86099, []int{1} -} - -func (m *Event) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Event.Unmarshal(m, b) -} -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Event.Marshal(b, m, deterministic) -} -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) -} -func (m *Event) XXX_Size() int { - return xxx_messageInfo_Event.Size(m) -} -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) -} - -var xxx_messageInfo_Event proto.InternalMessageInfo +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } func (m *Event) GetPayload() string { if m != nil { @@ -142,74 +90,19 @@ func (m *Event) GetPayload() string { } type EventReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *EventReply) Reset() { *m = EventReply{} } -func (m *EventReply) String() string { return proto.CompactTextString(m) } -func (*EventReply) ProtoMessage() {} -func (*EventReply) Descriptor() ([]byte, []int) { - return fileDescriptor_72698a2223f86099, []int{2} -} - -func (m *EventReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EventReply.Unmarshal(m, b) -} -func (m *EventReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EventReply.Marshal(b, m, deterministic) -} -func (m *EventReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_EventReply.Merge(m, src) -} -func (m *EventReply) XXX_Size() int { - return xxx_messageInfo_EventReply.Size(m) -} -func (m *EventReply) XXX_DiscardUnknown() { - xxx_messageInfo_EventReply.DiscardUnknown(m) -} - -var xxx_messageInfo_EventReply proto.InternalMessageInfo +func (m *EventReply) Reset() { *m = EventReply{} } +func (m *EventReply) String() string { return proto.CompactTextString(m) } +func (*EventReply) ProtoMessage() {} +func (*EventReply) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } func init() { proto.RegisterType((*Object)(nil), "coprocess.Object") - proto.RegisterMapType((map[string]string)(nil), "coprocess.Object.MetadataEntry") - proto.RegisterMapType((map[string]string)(nil), "coprocess.Object.SpecEntry") proto.RegisterType((*Event)(nil), "coprocess.Event") proto.RegisterType((*EventReply)(nil), "coprocess.EventReply") } -func init() { proto.RegisterFile("coprocess_object.proto", fileDescriptor_72698a2223f86099) } - -var fileDescriptor_72698a2223f86099 = []byte{ - // 383 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x4f, 0xcf, 0x93, 0x40, - 0x10, 0xc6, 0x4b, 0xe9, 0x3f, 0xa6, 0xd6, 0xd4, 0xf5, 0x1f, 0xa1, 0x1a, 0x11, 0x2f, 0x9c, 0x50, - 0x31, 0x51, 0xd3, 0x5e, 0x6d, 0xe2, 0xa5, 0x9a, 0x50, 0xef, 0x64, 0x4b, 0x27, 0x29, 0xb6, 0xb0, - 0x2b, 0xbb, 0x6d, 0x42, 0xe2, 0xe7, 0x79, 0x3f, 0xe7, 0x9b, 0xee, 0x02, 0xa5, 0x6f, 0x4f, 0xef, - 0x6d, 0xe7, 0x99, 0xe7, 0x37, 0x0f, 0x33, 0x01, 0x5e, 0x25, 0x8c, 0x17, 0x2c, 0x41, 0x21, 0x62, - 0xb6, 0xf9, 0x8b, 0x89, 0x0c, 0x78, 0xc1, 0x24, 0x23, 0x56, 0xa3, 0x3b, 0x1f, 0x2e, 0x96, 0x2c, - 0xcd, 0xd3, 0xb8, 0xc0, 0x7f, 0x47, 0x14, 0xf2, 0xca, 0xef, 0xbc, 0xbd, 0x98, 0x04, 0x0a, 0x91, - 0xb2, 0x3c, 0x16, 0x92, 0x4a, 0xac, 0xda, 0xad, 0x98, 0x84, 0x65, 0x19, 0xcb, 0xb5, 0xee, 0xdd, - 0x99, 0x30, 0xf8, 0xad, 0xe6, 0x90, 0x4f, 0x60, 0xed, 0x18, 0xdb, 0xc7, 0xb2, 0xe4, 0x68, 0x1b, - 0xae, 0xe1, 0x3f, 0x0d, 0x9f, 0x07, 0x0d, 0x16, 0xfc, 0x64, 0x6c, 0xff, 0xa7, 0xe4, 0x18, 0x8d, - 0x76, 0xd5, 0x8b, 0xcc, 0x2a, 0x22, 0xa7, 0x19, 0xda, 0x5d, 0xd7, 0xf0, 0x2d, 0xdd, 0xfc, 0x45, - 0x33, 0x24, 0x5f, 0x61, 0x58, 0x7d, 0xa8, 0x6d, 0xba, 0x86, 0x3f, 0x0e, 0xdf, 0xb4, 0x86, 0xad, - 0xd2, 0x3c, 0x8d, 0x74, 0x57, 0xa7, 0x47, 0xb5, 0x99, 0x7c, 0x86, 0x61, 0xb5, 0x80, 0xdd, 0x53, - 0xdc, 0xeb, 0x16, 0xb7, 0xd6, 0x9d, 0xf5, 0x79, 0xb3, 0xa8, 0xf6, 0x91, 0x05, 0x8c, 0x32, 0x94, - 0x74, 0x4b, 0x25, 0xb5, 0xfb, 0xae, 0xe9, 0x8f, 0xc3, 0x77, 0x2d, 0x46, 0x07, 0x04, 0xab, 0xca, - 0xb1, 0xcc, 0x65, 0x51, 0x46, 0x0d, 0x40, 0x3e, 0x42, 0x4f, 0x70, 0x4c, 0xec, 0x81, 0x02, 0x67, - 0xb7, 0xe0, 0x9a, 0x63, 0xa2, 0x21, 0x65, 0x74, 0x16, 0x30, 0xb9, 0x9a, 0x45, 0xa6, 0x60, 0xee, - 0xb1, 0x54, 0x27, 0xb3, 0xa2, 0xf3, 0x93, 0xbc, 0x80, 0xfe, 0x89, 0x1e, 0x8e, 0xf5, 0x51, 0x74, - 0x31, 0xef, 0x7e, 0x37, 0x9c, 0x6f, 0x60, 0x35, 0xf3, 0x1e, 0x03, 0x7a, 0xef, 0xa1, 0xbf, 0x3c, - 0x61, 0x2e, 0x89, 0x0d, 0x43, 0x4e, 0xcb, 0x03, 0xa3, 0xdb, 0x0a, 0xac, 0x4b, 0xef, 0x09, 0x80, - 0xb2, 0x44, 0xc8, 0x0f, 0x65, 0xf8, 0x1f, 0xe0, 0x47, 0x2a, 0x38, 0x95, 0xc9, 0x0e, 0x0b, 0x12, - 0xc2, 0xa8, 0xae, 0xc8, 0xb3, 0x9b, 0x1d, 0x9d, 0x5b, 0xc9, 0xeb, 0x90, 0x39, 0x4c, 0x6a, 0x46, - 0x47, 0x4f, 0x5b, 0x2e, 0xa5, 0x38, 0x2f, 0x1f, 0x2a, 0x2a, 0xdb, 0xeb, 0x6c, 0x06, 0xea, 0xf7, - 0xfa, 0x72, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xaf, 0x58, 0x65, 0xdf, 0x02, 0x00, 0x00, -} - // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -218,9 +111,8 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// DispatcherClient is the client API for Dispatcher service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Dispatcher service + type DispatcherClient interface { Dispatch(ctx context.Context, in *Object, opts ...grpc.CallOption) (*Object, error) DispatchEvent(ctx context.Context, in *Event, opts ...grpc.CallOption) (*EventReply, error) @@ -236,7 +128,7 @@ func NewDispatcherClient(cc *grpc.ClientConn) DispatcherClient { func (c *dispatcherClient) Dispatch(ctx context.Context, in *Object, opts ...grpc.CallOption) (*Object, error) { out := new(Object) - err := c.cc.Invoke(ctx, "/coprocess.Dispatcher/Dispatch", in, out, opts...) + err := grpc.Invoke(ctx, "/coprocess.Dispatcher/Dispatch", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -245,14 +137,15 @@ func (c *dispatcherClient) Dispatch(ctx context.Context, in *Object, opts ...grp func (c *dispatcherClient) DispatchEvent(ctx context.Context, in *Event, opts ...grpc.CallOption) (*EventReply, error) { out := new(EventReply) - err := c.cc.Invoke(ctx, "/coprocess.Dispatcher/DispatchEvent", in, out, opts...) + err := grpc.Invoke(ctx, "/coprocess.Dispatcher/DispatchEvent", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// DispatcherServer is the server API for Dispatcher service. +// Server API for Dispatcher service + type DispatcherServer interface { Dispatch(context.Context, *Object) (*Object, error) DispatchEvent(context.Context, *Event) (*EventReply, error) @@ -314,3 +207,33 @@ var _Dispatcher_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "coprocess_object.proto", } + +func init() { proto.RegisterFile("coprocess_object.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 383 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x4f, 0xcf, 0x93, 0x40, + 0x10, 0xc6, 0x4b, 0xe9, 0x3f, 0xa6, 0xd6, 0xd4, 0xf5, 0x1f, 0xa1, 0x1a, 0x11, 0x2f, 0x9c, 0x50, + 0x31, 0x51, 0xd3, 0x5e, 0x6d, 0xe2, 0xa5, 0x9a, 0x50, 0xef, 0x64, 0x4b, 0x27, 0x29, 0xb6, 0xb0, + 0x2b, 0xbb, 0x6d, 0x42, 0xe2, 0xe7, 0x79, 0x3f, 0xe7, 0x9b, 0xee, 0x02, 0xa5, 0x6f, 0x4f, 0xef, + 0x6d, 0xe7, 0x99, 0xe7, 0x37, 0x0f, 0x33, 0x01, 0x5e, 0x25, 0x8c, 0x17, 0x2c, 0x41, 0x21, 0x62, + 0xb6, 0xf9, 0x8b, 0x89, 0x0c, 0x78, 0xc1, 0x24, 0x23, 0x56, 0xa3, 0x3b, 0x1f, 0x2e, 0x96, 0x2c, + 0xcd, 0xd3, 0xb8, 0xc0, 0x7f, 0x47, 0x14, 0xf2, 0xca, 0xef, 0xbc, 0xbd, 0x98, 0x04, 0x0a, 0x91, + 0xb2, 0x3c, 0x16, 0x92, 0x4a, 0xac, 0xda, 0xad, 0x98, 0x84, 0x65, 0x19, 0xcb, 0xb5, 0xee, 0xdd, + 0x99, 0x30, 0xf8, 0xad, 0xe6, 0x90, 0x4f, 0x60, 0xed, 0x18, 0xdb, 0xc7, 0xb2, 0xe4, 0x68, 0x1b, + 0xae, 0xe1, 0x3f, 0x0d, 0x9f, 0x07, 0x0d, 0x16, 0xfc, 0x64, 0x6c, 0xff, 0xa7, 0xe4, 0x18, 0x8d, + 0x76, 0xd5, 0x8b, 0xcc, 0x2a, 0x22, 0xa7, 0x19, 0xda, 0x5d, 0xd7, 0xf0, 0x2d, 0xdd, 0xfc, 0x45, + 0x33, 0x24, 0x5f, 0x61, 0x58, 0x7d, 0xa8, 0x6d, 0xba, 0x86, 0x3f, 0x0e, 0xdf, 0xb4, 0x86, 0xad, + 0xd2, 0x3c, 0x8d, 0x74, 0x57, 0xa7, 0x47, 0xb5, 0x99, 0x7c, 0x86, 0x61, 0xb5, 0x80, 0xdd, 0x53, + 0xdc, 0xeb, 0x16, 0xb7, 0xd6, 0x9d, 0xf5, 0x79, 0xb3, 0xa8, 0xf6, 0x91, 0x05, 0x8c, 0x32, 0x94, + 0x74, 0x4b, 0x25, 0xb5, 0xfb, 0xae, 0xe9, 0x8f, 0xc3, 0x77, 0x2d, 0x46, 0x07, 0x04, 0xab, 0xca, + 0xb1, 0xcc, 0x65, 0x51, 0x46, 0x0d, 0x40, 0x3e, 0x42, 0x4f, 0x70, 0x4c, 0xec, 0x81, 0x02, 0x67, + 0xb7, 0xe0, 0x9a, 0x63, 0xa2, 0x21, 0x65, 0x74, 0x16, 0x30, 0xb9, 0x9a, 0x45, 0xa6, 0x60, 0xee, + 0xb1, 0x54, 0x27, 0xb3, 0xa2, 0xf3, 0x93, 0xbc, 0x80, 0xfe, 0x89, 0x1e, 0x8e, 0xf5, 0x51, 0x74, + 0x31, 0xef, 0x7e, 0x37, 0x9c, 0x6f, 0x60, 0x35, 0xf3, 0x1e, 0x03, 0x7a, 0xef, 0xa1, 0xbf, 0x3c, + 0x61, 0x2e, 0x89, 0x0d, 0x43, 0x4e, 0xcb, 0x03, 0xa3, 0xdb, 0x0a, 0xac, 0x4b, 0xef, 0x09, 0x80, + 0xb2, 0x44, 0xc8, 0x0f, 0x65, 0xf8, 0x1f, 0xe0, 0x47, 0x2a, 0x38, 0x95, 0xc9, 0x0e, 0x0b, 0x12, + 0xc2, 0xa8, 0xae, 0xc8, 0xb3, 0x9b, 0x1d, 0x9d, 0x5b, 0xc9, 0xeb, 0x90, 0x39, 0x4c, 0x6a, 0x46, + 0x47, 0x4f, 0x5b, 0x2e, 0xa5, 0x38, 0x2f, 0x1f, 0x2a, 0x2a, 0xdb, 0xeb, 0x6c, 0x06, 0xea, 0xf7, + 0xfa, 0x72, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xaf, 0x58, 0x65, 0xdf, 0x02, 0x00, 0x00, +} diff --git a/coprocess/coprocess_return_overrides.pb.go b/coprocess/coprocess_return_overrides.pb.go index 67ef1a322360..d0a5bcc7123c 100644 --- a/coprocess/coprocess_return_overrides.pb.go +++ b/coprocess/coprocess_return_overrides.pb.go @@ -3,57 +3,25 @@ package coprocess -import ( - fmt "fmt" - math "math" - - proto "github.com/golang/protobuf/proto" -) +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - type ReturnOverrides struct { - ResponseCode int32 `protobuf:"varint,1,opt,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` - ResponseError string `protobuf:"bytes,2,opt,name=response_error,json=responseError,proto3" json:"response_error,omitempty"` - Headers map[string]string `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReturnOverrides) Reset() { *m = ReturnOverrides{} } -func (m *ReturnOverrides) String() string { return proto.CompactTextString(m) } -func (*ReturnOverrides) ProtoMessage() {} -func (*ReturnOverrides) Descriptor() ([]byte, []int) { - return fileDescriptor_7c6abd8ea4a81548, []int{0} -} - -func (m *ReturnOverrides) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReturnOverrides.Unmarshal(m, b) -} -func (m *ReturnOverrides) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReturnOverrides.Marshal(b, m, deterministic) -} -func (m *ReturnOverrides) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReturnOverrides.Merge(m, src) -} -func (m *ReturnOverrides) XXX_Size() int { - return xxx_messageInfo_ReturnOverrides.Size(m) -} -func (m *ReturnOverrides) XXX_DiscardUnknown() { - xxx_messageInfo_ReturnOverrides.DiscardUnknown(m) + ResponseCode int32 `protobuf:"varint,1,opt,name=response_code,json=responseCode" json:"response_code,omitempty"` + ResponseError string `protobuf:"bytes,2,opt,name=response_error,json=responseError" json:"response_error,omitempty"` + Headers map[string]string `protobuf:"bytes,3,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` } -var xxx_messageInfo_ReturnOverrides proto.InternalMessageInfo +func (m *ReturnOverrides) Reset() { *m = ReturnOverrides{} } +func (m *ReturnOverrides) String() string { return proto.CompactTextString(m) } +func (*ReturnOverrides) ProtoMessage() {} +func (*ReturnOverrides) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } func (m *ReturnOverrides) GetResponseCode() int32 { if m != nil { @@ -78,12 +46,11 @@ func (m *ReturnOverrides) GetHeaders() map[string]string { func init() { proto.RegisterType((*ReturnOverrides)(nil), "coprocess.ReturnOverrides") - proto.RegisterMapType((map[string]string)(nil), "coprocess.ReturnOverrides.HeadersEntry") } -func init() { proto.RegisterFile("coprocess_return_overrides.proto", fileDescriptor_7c6abd8ea4a81548) } +func init() { proto.RegisterFile("coprocess_return_overrides.proto", fileDescriptor3) } -var fileDescriptor_7c6abd8ea4a81548 = []byte{ +var fileDescriptor3 = []byte{ // 206 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xce, 0x2f, 0x28, 0xca, 0x4f, 0x4e, 0x2d, 0x2e, 0x8e, 0x2f, 0x4a, 0x2d, 0x29, 0x2d, 0xca, 0x8b, 0xcf, 0x2f, 0x4b, diff --git a/coprocess/coprocess_session_state.pb.go b/coprocess/coprocess_session_state.pb.go index 3a07445d8f43..6149fa78cadc 100644 --- a/coprocess/coprocess_session_state.pb.go +++ b/coprocess/coprocess_session_state.pb.go @@ -3,56 +3,24 @@ package coprocess -import ( - fmt "fmt" - math "math" - - proto "github.com/golang/protobuf/proto" -) +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - type AccessSpec struct { - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + Methods []string `protobuf:"bytes,2,rep,name=methods" json:"methods,omitempty"` } -func (m *AccessSpec) Reset() { *m = AccessSpec{} } -func (m *AccessSpec) String() string { return proto.CompactTextString(m) } -func (*AccessSpec) ProtoMessage() {} -func (*AccessSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_087f3e8bbcac7a63, []int{0} -} - -func (m *AccessSpec) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AccessSpec.Unmarshal(m, b) -} -func (m *AccessSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AccessSpec.Marshal(b, m, deterministic) -} -func (m *AccessSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_AccessSpec.Merge(m, src) -} -func (m *AccessSpec) XXX_Size() int { - return xxx_messageInfo_AccessSpec.Size(m) -} -func (m *AccessSpec) XXX_DiscardUnknown() { - xxx_messageInfo_AccessSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_AccessSpec proto.InternalMessageInfo +func (m *AccessSpec) Reset() { *m = AccessSpec{} } +func (m *AccessSpec) String() string { return proto.CompactTextString(m) } +func (*AccessSpec) ProtoMessage() {} +func (*AccessSpec) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } func (m *AccessSpec) GetUrl() string { if m != nil { @@ -69,39 +37,16 @@ func (m *AccessSpec) GetMethods() []string { } type AccessDefinition struct { - ApiName string `protobuf:"bytes,1,opt,name=api_name,json=apiName,proto3" json:"api_name,omitempty"` - ApiId string `protobuf:"bytes,2,opt,name=api_id,json=apiId,proto3" json:"api_id,omitempty"` - Versions []string `protobuf:"bytes,3,rep,name=versions,proto3" json:"versions,omitempty"` - AllowedUrls []*AccessSpec `protobuf:"bytes,4,rep,name=allowed_urls,json=allowedUrls,proto3" json:"allowed_urls,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ApiName string `protobuf:"bytes,1,opt,name=api_name,json=apiName" json:"api_name,omitempty"` + ApiId string `protobuf:"bytes,2,opt,name=api_id,json=apiId" json:"api_id,omitempty"` + Versions []string `protobuf:"bytes,3,rep,name=versions" json:"versions,omitempty"` + AllowedUrls []*AccessSpec `protobuf:"bytes,4,rep,name=allowed_urls,json=allowedUrls" json:"allowed_urls,omitempty"` } -func (m *AccessDefinition) Reset() { *m = AccessDefinition{} } -func (m *AccessDefinition) String() string { return proto.CompactTextString(m) } -func (*AccessDefinition) ProtoMessage() {} -func (*AccessDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_087f3e8bbcac7a63, []int{1} -} - -func (m *AccessDefinition) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AccessDefinition.Unmarshal(m, b) -} -func (m *AccessDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AccessDefinition.Marshal(b, m, deterministic) -} -func (m *AccessDefinition) XXX_Merge(src proto.Message) { - xxx_messageInfo_AccessDefinition.Merge(m, src) -} -func (m *AccessDefinition) XXX_Size() int { - return xxx_messageInfo_AccessDefinition.Size(m) -} -func (m *AccessDefinition) XXX_DiscardUnknown() { - xxx_messageInfo_AccessDefinition.DiscardUnknown(m) -} - -var xxx_messageInfo_AccessDefinition proto.InternalMessageInfo +func (m *AccessDefinition) Reset() { *m = AccessDefinition{} } +func (m *AccessDefinition) String() string { return proto.CompactTextString(m) } +func (*AccessDefinition) ProtoMessage() {} +func (*AccessDefinition) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } func (m *AccessDefinition) GetApiName() string { if m != nil { @@ -132,37 +77,14 @@ func (m *AccessDefinition) GetAllowedUrls() []*AccessSpec { } type BasicAuthData struct { - Password string `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` - Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BasicAuthData) Reset() { *m = BasicAuthData{} } -func (m *BasicAuthData) String() string { return proto.CompactTextString(m) } -func (*BasicAuthData) ProtoMessage() {} -func (*BasicAuthData) Descriptor() ([]byte, []int) { - return fileDescriptor_087f3e8bbcac7a63, []int{2} -} - -func (m *BasicAuthData) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BasicAuthData.Unmarshal(m, b) -} -func (m *BasicAuthData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BasicAuthData.Marshal(b, m, deterministic) -} -func (m *BasicAuthData) XXX_Merge(src proto.Message) { - xxx_messageInfo_BasicAuthData.Merge(m, src) -} -func (m *BasicAuthData) XXX_Size() int { - return xxx_messageInfo_BasicAuthData.Size(m) -} -func (m *BasicAuthData) XXX_DiscardUnknown() { - xxx_messageInfo_BasicAuthData.DiscardUnknown(m) + Password string `protobuf:"bytes,1,opt,name=password" json:"password,omitempty"` + Hash string `protobuf:"bytes,2,opt,name=hash" json:"hash,omitempty"` } -var xxx_messageInfo_BasicAuthData proto.InternalMessageInfo +func (m *BasicAuthData) Reset() { *m = BasicAuthData{} } +func (m *BasicAuthData) String() string { return proto.CompactTextString(m) } +func (*BasicAuthData) ProtoMessage() {} +func (*BasicAuthData) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } func (m *BasicAuthData) GetPassword() string { if m != nil { @@ -179,36 +101,13 @@ func (m *BasicAuthData) GetHash() string { } type JWTData struct { - Secret string `protobuf:"bytes,1,opt,name=secret,proto3" json:"secret,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Secret string `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` } -func (m *JWTData) Reset() { *m = JWTData{} } -func (m *JWTData) String() string { return proto.CompactTextString(m) } -func (*JWTData) ProtoMessage() {} -func (*JWTData) Descriptor() ([]byte, []int) { - return fileDescriptor_087f3e8bbcac7a63, []int{3} -} - -func (m *JWTData) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_JWTData.Unmarshal(m, b) -} -func (m *JWTData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_JWTData.Marshal(b, m, deterministic) -} -func (m *JWTData) XXX_Merge(src proto.Message) { - xxx_messageInfo_JWTData.Merge(m, src) -} -func (m *JWTData) XXX_Size() int { - return xxx_messageInfo_JWTData.Size(m) -} -func (m *JWTData) XXX_DiscardUnknown() { - xxx_messageInfo_JWTData.DiscardUnknown(m) -} - -var xxx_messageInfo_JWTData proto.InternalMessageInfo +func (m *JWTData) Reset() { *m = JWTData{} } +func (m *JWTData) String() string { return proto.CompactTextString(m) } +func (*JWTData) ProtoMessage() {} +func (*JWTData) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } func (m *JWTData) GetSecret() string { if m != nil { @@ -218,36 +117,13 @@ func (m *JWTData) GetSecret() string { } type Monitor struct { - TriggerLimits []float64 `protobuf:"fixed64,1,rep,packed,name=trigger_limits,json=triggerLimits,proto3" json:"trigger_limits,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Monitor) Reset() { *m = Monitor{} } -func (m *Monitor) String() string { return proto.CompactTextString(m) } -func (*Monitor) ProtoMessage() {} -func (*Monitor) Descriptor() ([]byte, []int) { - return fileDescriptor_087f3e8bbcac7a63, []int{4} -} - -func (m *Monitor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Monitor.Unmarshal(m, b) -} -func (m *Monitor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Monitor.Marshal(b, m, deterministic) -} -func (m *Monitor) XXX_Merge(src proto.Message) { - xxx_messageInfo_Monitor.Merge(m, src) -} -func (m *Monitor) XXX_Size() int { - return xxx_messageInfo_Monitor.Size(m) -} -func (m *Monitor) XXX_DiscardUnknown() { - xxx_messageInfo_Monitor.DiscardUnknown(m) + TriggerLimits []float64 `protobuf:"fixed64,1,rep,packed,name=trigger_limits,json=triggerLimits" json:"trigger_limits,omitempty"` } -var xxx_messageInfo_Monitor proto.InternalMessageInfo +func (m *Monitor) Reset() { *m = Monitor{} } +func (m *Monitor) String() string { return proto.CompactTextString(m) } +func (*Monitor) ProtoMessage() {} +func (*Monitor) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} } func (m *Monitor) GetTriggerLimits() []float64 { if m != nil { @@ -257,65 +133,42 @@ func (m *Monitor) GetTriggerLimits() []float64 { } type SessionState struct { - LastCheck int64 `protobuf:"varint,1,opt,name=last_check,json=lastCheck,proto3" json:"last_check,omitempty"` - Allowance float64 `protobuf:"fixed64,2,opt,name=allowance,proto3" json:"allowance,omitempty"` - Rate float64 `protobuf:"fixed64,3,opt,name=rate,proto3" json:"rate,omitempty"` - Per float64 `protobuf:"fixed64,4,opt,name=per,proto3" json:"per,omitempty"` - Expires int64 `protobuf:"varint,5,opt,name=expires,proto3" json:"expires,omitempty"` - QuotaMax int64 `protobuf:"varint,6,opt,name=quota_max,json=quotaMax,proto3" json:"quota_max,omitempty"` - QuotaRenews int64 `protobuf:"varint,7,opt,name=quota_renews,json=quotaRenews,proto3" json:"quota_renews,omitempty"` - QuotaRemaining int64 `protobuf:"varint,8,opt,name=quota_remaining,json=quotaRemaining,proto3" json:"quota_remaining,omitempty"` - QuotaRenewalRate int64 `protobuf:"varint,9,opt,name=quota_renewal_rate,json=quotaRenewalRate,proto3" json:"quota_renewal_rate,omitempty"` - AccessRights map[string]*AccessDefinition `protobuf:"bytes,10,rep,name=access_rights,json=accessRights,proto3" json:"access_rights,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - OrgId string `protobuf:"bytes,11,opt,name=org_id,json=orgId,proto3" json:"org_id,omitempty"` - OauthClientId string `protobuf:"bytes,12,opt,name=oauth_client_id,json=oauthClientId,proto3" json:"oauth_client_id,omitempty"` - OauthKeys map[string]string `protobuf:"bytes,13,rep,name=oauth_keys,json=oauthKeys,proto3" json:"oauth_keys,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - BasicAuthData *BasicAuthData `protobuf:"bytes,14,opt,name=basic_auth_data,json=basicAuthData,proto3" json:"basic_auth_data,omitempty"` - JwtData *JWTData `protobuf:"bytes,15,opt,name=jwt_data,json=jwtData,proto3" json:"jwt_data,omitempty"` - HmacEnabled bool `protobuf:"varint,16,opt,name=hmac_enabled,json=hmacEnabled,proto3" json:"hmac_enabled,omitempty"` - HmacSecret string `protobuf:"bytes,17,opt,name=hmac_secret,json=hmacSecret,proto3" json:"hmac_secret,omitempty"` - IsInactive bool `protobuf:"varint,18,opt,name=is_inactive,json=isInactive,proto3" json:"is_inactive,omitempty"` - ApplyPolicyId string `protobuf:"bytes,19,opt,name=apply_policy_id,json=applyPolicyId,proto3" json:"apply_policy_id,omitempty"` - DataExpires int64 `protobuf:"varint,20,opt,name=data_expires,json=dataExpires,proto3" json:"data_expires,omitempty"` - Monitor *Monitor `protobuf:"bytes,21,opt,name=monitor,proto3" json:"monitor,omitempty"` - EnableDetailedRecording bool `protobuf:"varint,22,opt,name=enable_detailed_recording,json=enableDetailedRecording,proto3" json:"enable_detailed_recording,omitempty"` - Metadata map[string]string `protobuf:"bytes,23,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Tags []string `protobuf:"bytes,24,rep,name=tags,proto3" json:"tags,omitempty"` - Alias string `protobuf:"bytes,25,opt,name=alias,proto3" json:"alias,omitempty"` - LastUpdated string `protobuf:"bytes,26,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` - IdExtractorDeadline int64 `protobuf:"varint,27,opt,name=id_extractor_deadline,json=idExtractorDeadline,proto3" json:"id_extractor_deadline,omitempty"` - SessionLifetime int64 `protobuf:"varint,28,opt,name=session_lifetime,json=sessionLifetime,proto3" json:"session_lifetime,omitempty"` - ApplyPolicies []string `protobuf:"bytes,29,rep,name=apply_policies,json=applyPolicies,proto3" json:"apply_policies,omitempty"` - Certificate string `protobuf:"bytes,30,opt,name=certificate,proto3" json:"certificate,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SessionState) Reset() { *m = SessionState{} } -func (m *SessionState) String() string { return proto.CompactTextString(m) } -func (*SessionState) ProtoMessage() {} -func (*SessionState) Descriptor() ([]byte, []int) { - return fileDescriptor_087f3e8bbcac7a63, []int{5} -} - -func (m *SessionState) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SessionState.Unmarshal(m, b) -} -func (m *SessionState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SessionState.Marshal(b, m, deterministic) -} -func (m *SessionState) XXX_Merge(src proto.Message) { - xxx_messageInfo_SessionState.Merge(m, src) -} -func (m *SessionState) XXX_Size() int { - return xxx_messageInfo_SessionState.Size(m) -} -func (m *SessionState) XXX_DiscardUnknown() { - xxx_messageInfo_SessionState.DiscardUnknown(m) -} - -var xxx_messageInfo_SessionState proto.InternalMessageInfo + LastCheck int64 `protobuf:"varint,1,opt,name=last_check,json=lastCheck" json:"last_check,omitempty"` + Allowance float64 `protobuf:"fixed64,2,opt,name=allowance" json:"allowance,omitempty"` + Rate float64 `protobuf:"fixed64,3,opt,name=rate" json:"rate,omitempty"` + Per float64 `protobuf:"fixed64,4,opt,name=per" json:"per,omitempty"` + Expires int64 `protobuf:"varint,5,opt,name=expires" json:"expires,omitempty"` + QuotaMax int64 `protobuf:"varint,6,opt,name=quota_max,json=quotaMax" json:"quota_max,omitempty"` + QuotaRenews int64 `protobuf:"varint,7,opt,name=quota_renews,json=quotaRenews" json:"quota_renews,omitempty"` + QuotaRemaining int64 `protobuf:"varint,8,opt,name=quota_remaining,json=quotaRemaining" json:"quota_remaining,omitempty"` + QuotaRenewalRate int64 `protobuf:"varint,9,opt,name=quota_renewal_rate,json=quotaRenewalRate" json:"quota_renewal_rate,omitempty"` + AccessRights map[string]*AccessDefinition `protobuf:"bytes,10,rep,name=access_rights,json=accessRights" json:"access_rights,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + OrgId string `protobuf:"bytes,11,opt,name=org_id,json=orgId" json:"org_id,omitempty"` + OauthClientId string `protobuf:"bytes,12,opt,name=oauth_client_id,json=oauthClientId" json:"oauth_client_id,omitempty"` + OauthKeys map[string]string `protobuf:"bytes,13,rep,name=oauth_keys,json=oauthKeys" json:"oauth_keys,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + BasicAuthData *BasicAuthData `protobuf:"bytes,14,opt,name=basic_auth_data,json=basicAuthData" json:"basic_auth_data,omitempty"` + JwtData *JWTData `protobuf:"bytes,15,opt,name=jwt_data,json=jwtData" json:"jwt_data,omitempty"` + HmacEnabled bool `protobuf:"varint,16,opt,name=hmac_enabled,json=hmacEnabled" json:"hmac_enabled,omitempty"` + HmacSecret string `protobuf:"bytes,17,opt,name=hmac_secret,json=hmacSecret" json:"hmac_secret,omitempty"` + IsInactive bool `protobuf:"varint,18,opt,name=is_inactive,json=isInactive" json:"is_inactive,omitempty"` + ApplyPolicyId string `protobuf:"bytes,19,opt,name=apply_policy_id,json=applyPolicyId" json:"apply_policy_id,omitempty"` + DataExpires int64 `protobuf:"varint,20,opt,name=data_expires,json=dataExpires" json:"data_expires,omitempty"` + Monitor *Monitor `protobuf:"bytes,21,opt,name=monitor" json:"monitor,omitempty"` + EnableDetailedRecording bool `protobuf:"varint,22,opt,name=enable_detailed_recording,json=enableDetailedRecording" json:"enable_detailed_recording,omitempty"` + Metadata map[string]string `protobuf:"bytes,23,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Tags []string `protobuf:"bytes,24,rep,name=tags" json:"tags,omitempty"` + Alias string `protobuf:"bytes,25,opt,name=alias" json:"alias,omitempty"` + LastUpdated string `protobuf:"bytes,26,opt,name=last_updated,json=lastUpdated" json:"last_updated,omitempty"` + IdExtractorDeadline int64 `protobuf:"varint,27,opt,name=id_extractor_deadline,json=idExtractorDeadline" json:"id_extractor_deadline,omitempty"` + SessionLifetime int64 `protobuf:"varint,28,opt,name=session_lifetime,json=sessionLifetime" json:"session_lifetime,omitempty"` + ApplyPolicies []string `protobuf:"bytes,29,rep,name=apply_policies,json=applyPolicies" json:"apply_policies,omitempty"` + Certificate string `protobuf:"bytes,30,opt,name=certificate" json:"certificate,omitempty"` +} + +func (m *SessionState) Reset() { *m = SessionState{} } +func (m *SessionState) String() string { return proto.CompactTextString(m) } +func (*SessionState) ProtoMessage() {} +func (*SessionState) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{5} } func (m *SessionState) GetLastCheck() int64 { if m != nil { @@ -534,14 +387,11 @@ func init() { proto.RegisterType((*JWTData)(nil), "coprocess.JWTData") proto.RegisterType((*Monitor)(nil), "coprocess.Monitor") proto.RegisterType((*SessionState)(nil), "coprocess.SessionState") - proto.RegisterMapType((map[string]*AccessDefinition)(nil), "coprocess.SessionState.AccessRightsEntry") - proto.RegisterMapType((map[string]string)(nil), "coprocess.SessionState.MetadataEntry") - proto.RegisterMapType((map[string]string)(nil), "coprocess.SessionState.OauthKeysEntry") } -func init() { proto.RegisterFile("coprocess_session_state.proto", fileDescriptor_087f3e8bbcac7a63) } +func init() { proto.RegisterFile("coprocess_session_state.proto", fileDescriptor4) } -var fileDescriptor_087f3e8bbcac7a63 = []byte{ +var fileDescriptor4 = []byte{ // 933 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x4f, 0x6f, 0x1b, 0xb7, 0x13, 0x85, 0x2c, 0xdb, 0x92, 0x66, 0x25, 0x5b, 0x61, 0xec, 0x84, 0xb6, 0xe3, 0xdf, 0x4f, 0x16, diff --git a/gateway/coprocess_api.go b/coprocess_api.go similarity index 93% rename from gateway/coprocess_api.go rename to coprocess_api.go index c13dad91fac9..25b72f9775b5 100644 --- a/gateway/coprocess_api.go +++ b/coprocess_api.go @@ -1,16 +1,16 @@ // +build coprocess // +build !grpc -package gateway +package main /* #include -#include "../coprocess/api.h" +#include "coprocess/api.h" #ifdef ENABLE_PYTHON -#include "../coprocess/python/dispatcher.h" -#include "../coprocess/python/binding.h" +#include "coprocess/python/dispatcher.h" +#include "coprocess/python/binding.h" #endif */ import "C" diff --git a/gateway/coprocess_bundle.go b/coprocess_bundle.go similarity index 99% rename from gateway/coprocess_bundle.go rename to coprocess_bundle.go index 584a5f1ff39a..a6d4539568d6 100644 --- a/gateway/coprocess_bundle.go +++ b/coprocess_bundle.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "github.com/Sirupsen/logrus" diff --git a/gateway/coprocess_bundle_test.go b/coprocess_bundle_test.go similarity index 90% rename from gateway/coprocess_bundle_test.go rename to coprocess_bundle_test.go index 3c0ef71dba85..718534a72abc 100644 --- a/gateway/coprocess_bundle_test.go +++ b/coprocess_bundle_test.go @@ -1,6 +1,6 @@ // +build !python -package gateway +package main import ( "crypto/md5" @@ -30,10 +30,10 @@ var grpcBundleWithAuthCheck = map[string]string{ } func TestBundleLoader(t *testing.T) { - bundleID := RegisterBundle("grpc_with_auth_check", grpcBundleWithAuthCheck) + bundleID := registerBundle("grpc_with_auth_check", grpcBundleWithAuthCheck) t.Run("Nonexistent bundle", func(t *testing.T) { - specs := BuildAndLoadAPI(func(spec *APISpec) { + specs := buildAndLoadAPI(func(spec *APISpec) { spec.CustomMiddlewareBundle = "nonexistent.zip" }) err := loadBundle(specs[0]) @@ -43,7 +43,7 @@ func TestBundleLoader(t *testing.T) { }) t.Run("Existing bundle with auth check", func(t *testing.T) { - specs := BuildAndLoadAPI(func(spec *APISpec) { + specs := buildAndLoadAPI(func(spec *APISpec) { spec.CustomMiddlewareBundle = bundleID }) spec := specs[0] diff --git a/gateway/coprocess_dummy.go b/coprocess_dummy.go similarity index 92% rename from gateway/coprocess_dummy.go rename to coprocess_dummy.go index f10546a8fc5e..8dd14755796d 100644 --- a/gateway/coprocess_dummy.go +++ b/coprocess_dummy.go @@ -1,6 +1,6 @@ // +build !coprocess -package gateway +package main import ( "net/http" @@ -33,9 +33,6 @@ type CoProcessMiddleware struct { HookType coprocess.HookType HookName string MiddlewareDriver apidef.MiddlewareDriver - RawBodyOnly bool - - successHandler *SuccessHandler } func (m *CoProcessMiddleware) Name() string { @@ -63,4 +60,4 @@ func CoProcessInit() error { return nil } -func DoCoprocessReload() {} +func doCoprocessReload() {} diff --git a/gateway/coprocess_events.go b/coprocess_events.go similarity index 98% rename from gateway/coprocess_events.go rename to coprocess_events.go index 82cf5777b6be..810e1d57ccb5 100644 --- a/gateway/coprocess_events.go +++ b/coprocess_events.go @@ -1,6 +1,6 @@ // +build coprocess -package gateway +package main import ( "encoding/json" diff --git a/gateway/coprocess_grpc.go b/coprocess_grpc.go similarity index 96% rename from gateway/coprocess_grpc.go rename to coprocess_grpc.go index 233268acbfb3..b0acb65e521d 100644 --- a/gateway/coprocess_grpc.go +++ b/coprocess_grpc.go @@ -1,7 +1,7 @@ // +build coprocess // +build grpc -package gateway +package main import ( "errors" @@ -18,6 +18,9 @@ import ( "github.com/TykTechnologies/tyk/coprocess" ) +// CoProcessName specifies the driver name. +const CoProcessName = apidef.GrpcDriver + // MessageType sets the default message type. var MessageType = coprocess.ProtobufMessage @@ -84,8 +87,6 @@ func (d *GRPCDispatcher) HandleMiddlewareCache(b *apidef.BundleManifest, basePat // NewCoProcessDispatcher wraps all the actions needed for this CP. func NewCoProcessDispatcher() (coprocess.Dispatcher, error) { - MessageType = coprocess.ProtobufMessage - CoProcessName = apidef.GrpcDriver if config.Global().CoProcessOptions.CoProcessGRPCServer == "" { return nil, errors.New("No gRPC URL is set") } diff --git a/coprocess/grpc/coprocess_grpc_test.go b/coprocess_grpc_test.go similarity index 93% rename from coprocess/grpc/coprocess_grpc_test.go rename to coprocess_grpc_test.go index 88dfd76ca411..67ab30661a09 100644 --- a/coprocess/grpc/coprocess_grpc_test.go +++ b/coprocess_grpc_test.go @@ -1,7 +1,7 @@ // +build coprocess // +build grpc -package grpc +package main import ( "bytes" @@ -10,7 +10,6 @@ import ( "mime/multipart" "net" "net/http" - "os" "strings" "testing" @@ -21,7 +20,6 @@ import ( "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/coprocess" - "github.com/TykTechnologies/tyk/gateway" "github.com/TykTechnologies/tyk/test" "github.com/TykTechnologies/tyk/user" ) @@ -122,9 +120,9 @@ func newTestGRPCServer() (s *grpc.Server) { } func loadTestGRPCAPIs() { - gateway.BuildAndLoadAPI(func(spec *gateway.APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.APIID = "1" - spec.OrgID = gateway.MockOrgID + spec.OrgID = mockOrgID spec.Auth = apidef.Auth{ AuthHeaderName: "authorization", } @@ -149,9 +147,9 @@ func loadTestGRPCAPIs() { }, Driver: apidef.GrpcDriver, } - }, func(spec *gateway.APISpec) { + }, func(spec *APISpec) { spec.APIID = "2" - spec.OrgID = gateway.MockOrgID + spec.OrgID = mockOrgID spec.Auth = apidef.Auth{ AuthHeaderName: "authorization", } @@ -176,7 +174,7 @@ func loadTestGRPCAPIs() { }, Driver: apidef.GrpcDriver, } - }, func(spec *gateway.APISpec) { + }, func(spec *APISpec) { spec.APIID = "3" spec.OrgID = "default" spec.Auth = apidef.Auth{ @@ -206,7 +204,7 @@ func loadTestGRPCAPIs() { }) } -func startTykWithGRPC() (*gateway.Test, *grpc.Server) { +func startTykWithGRPC() (*tykTestServer, *grpc.Server) { // Setup the gRPC server: listener, _ := net.Listen("tcp", grpcListenAddr) grpcServer := newTestGRPCServer() @@ -217,23 +215,19 @@ func startTykWithGRPC() (*gateway.Test, *grpc.Server) { EnableCoProcess: true, CoProcessGRPCServer: grpcListenPath, } - ts := gateway.StartTest(gateway.TestConfig{CoprocessConfig: cfg}) + ts := newTykTestServer(tykTestServerConfig{coprocessConfig: cfg}) // Load test APIs: loadTestGRPCAPIs() return &ts, grpcServer } -func TestMain(m *testing.M) { - os.Exit(gateway.InitTestMain(m)) -} - func TestGRPCDispatch(t *testing.T) { ts, grpcServer := startTykWithGRPC() defer ts.Close() defer grpcServer.Stop() - keyID := gateway.CreateSession(func(s *user.SessionState) { + keyID := createSession(func(s *user.SessionState) { s.MetaData = map[string]interface{}{ "testkey": map[string]interface{}{"nestedkey": "nestedvalue"}, "testkey2": "testvalue", @@ -255,7 +249,7 @@ func TestGRPCDispatch(t *testing.T) { if err != nil { t.Fatalf("Couldn't read response body: %s", err.Error()) } - var testResponse gateway.TestHttpResponse + var testResponse testHttpResponse err = json.Unmarshal(data, &testResponse) if err != nil { t.Fatalf("Couldn't unmarshal test response JSON: %s", err.Error()) @@ -270,7 +264,7 @@ func TestGRPCDispatch(t *testing.T) { }) t.Run("Pre Hook with UTF-8/non-UTF-8 request data", func(t *testing.T) { - fileData := gateway.GenerateTestBinaryData() + fileData := generateTestBinaryData() var buf bytes.Buffer multipartWriter := multipart.NewWriter(&buf) file, err := multipartWriter.CreateFormFile("file", "test.bin") @@ -316,7 +310,7 @@ func BenchmarkGRPCDispatch(b *testing.B) { defer ts.Close() defer grpcServer.Stop() - keyID := gateway.CreateSession(func(s *user.SessionState) {}) + keyID := createSession(func(s *user.SessionState) {}) headers := map[string]string{"authorization": keyID} b.Run("Pre Hook with SetHeaders", func(b *testing.B) { diff --git a/gateway/coprocess_helpers.go b/coprocess_helpers.go similarity index 99% rename from gateway/coprocess_helpers.go rename to coprocess_helpers.go index 573fb4fc3d08..f04b4c68ee6e 100644 --- a/gateway/coprocess_helpers.go +++ b/coprocess_helpers.go @@ -1,6 +1,6 @@ // +build coprocess -package gateway +package main import ( "encoding/json" diff --git a/gateway/coprocess_id_extractor.go b/coprocess_id_extractor.go similarity index 99% rename from gateway/coprocess_id_extractor.go rename to coprocess_id_extractor.go index 6633d026e39e..0212a877be88 100644 --- a/gateway/coprocess_id_extractor.go +++ b/coprocess_id_extractor.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "crypto/md5" @@ -11,7 +11,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/mitchellh/mapstructure" - xmlpath "gopkg.in/xmlpath.v2" + "gopkg.in/xmlpath.v2" "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/regexp" diff --git a/coprocess/python/coprocess_id_extractor_python_test.go b/coprocess_id_extractor_python_test.go similarity index 89% rename from coprocess/python/coprocess_id_extractor_python_test.go rename to coprocess_id_extractor_python_test.go index 9595ac1ac6af..ce2a1b779660 100644 --- a/coprocess/python/coprocess_id_extractor_python_test.go +++ b/coprocess_id_extractor_python_test.go @@ -1,7 +1,7 @@ // +build coprocess // +build python -package python +package main import ( "net/url" @@ -9,7 +9,6 @@ import ( "time" "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/gateway" "github.com/TykTechnologies/tyk/test" ) @@ -148,25 +147,25 @@ def MyAuthHook(request, session, metadata, spec): // Our `pythonBundleWithAuthCheck` plugin restrict more then 1 call // With ID extractor, it should run multiple times (because cache) func TestValueExtractorHeaderSource(t *testing.T) { - ts := gateway.StartTest(gateway.TestConfig{ - CoprocessConfig: config.CoProcessConfig{ + ts := newTykTestServer(tykTestServerConfig{ + coprocessConfig: config.CoProcessConfig{ EnableCoProcess: true, }, - Delay: 10 * time.Millisecond, + delay: 10 * time.Millisecond, }) defer ts.Close() - spec := gateway.BuildAPI(func(spec *gateway.APISpec) { + spec := buildAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.UseKeylessAccess = false spec.EnableCoProcessAuth = true })[0] t.Run("Header value", func(t *testing.T) { - bundleID := gateway.RegisterBundle("id_extractor_header_value", pythonIDExtractorHeaderValue) + bundleID := registerBundle("id_extractor_header_value", pythonIDExtractorHeaderValue) spec.CustomMiddlewareBundle = bundleID spec.APIID = "api1" - gateway.LoadAPI(spec) + loadAPI(spec) time.Sleep(1 * time.Second) ts.Run(t, []test.TestCase{ @@ -176,11 +175,11 @@ func TestValueExtractorHeaderSource(t *testing.T) { }...) }) t.Run("Form value", func(t *testing.T) { - bundleID := gateway.RegisterBundle("id_extractor_form_value", pythonIDExtractorFormValue) + bundleID := registerBundle("id_extractor_form_value", pythonIDExtractorFormValue) spec.CustomMiddlewareBundle = bundleID spec.APIID = "api2" - gateway.LoadAPI(spec) + loadAPI(spec) time.Sleep(1 * time.Second) formHeaders := map[string]string{"Content-Type": "application/x-www-form-urlencoded"} @@ -192,11 +191,11 @@ func TestValueExtractorHeaderSource(t *testing.T) { }...) }) t.Run("Header regex", func(t *testing.T) { - bundleID := gateway.RegisterBundle("id_extractor_header_regex", pythonIDExtractorHeaderRegex) + bundleID := registerBundle("id_extractor_header_regex", pythonIDExtractorHeaderRegex) spec.CustomMiddlewareBundle = bundleID spec.APIID = "api3" - gateway.LoadAPI(spec) + loadAPI(spec) time.Sleep(1 * time.Second) ts.Run(t, []test.TestCase{ diff --git a/gateway/coprocess_id_extractor_test.go b/coprocess_id_extractor_test.go similarity index 98% rename from gateway/coprocess_id_extractor_test.go rename to coprocess_id_extractor_test.go index 32e652deaead..113a3ab04ca5 100644 --- a/gateway/coprocess_id_extractor_test.go +++ b/coprocess_id_extractor_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "crypto/md5" @@ -30,16 +30,16 @@ func createSpecTestFrom(t testing.TB, def *apidef.APIDefinition) *APISpec { loader := APIDefinitionLoader{} spec := loader.MakeSpec(def, nil) tname := t.Name() - redisStore := &storage.RedisCluster{KeyPrefix: tname + "-apikey."} - healthStore := &storage.RedisCluster{KeyPrefix: tname + "-apihealth."} - orgStore := &storage.RedisCluster{KeyPrefix: tname + "-orgKey."} + redisStore := storage.RedisCluster{KeyPrefix: tname + "-apikey."} + healthStore := storage.RedisCluster{KeyPrefix: tname + "-apihealth."} + orgStore := storage.RedisCluster{KeyPrefix: tname + "-orgKey."} spec.Init(redisStore, redisStore, healthStore, orgStore) return spec } func prepareExtractor(t testing.TB, extractorSource apidef.IdExtractorSource, extractorType apidef.IdExtractorType, config map[string]interface{}) (IdExtractor, *APISpec) { def := &apidef.APIDefinition{ - OrgID: MockOrgID, + OrgID: mockOrgID, CustomMiddleware: apidef.MiddlewareSection{ IdExtractor: apidef.MiddlewareIdExtractor{ ExtractFrom: extractorSource, @@ -85,7 +85,7 @@ func prepareExtractorFormRequest(values map[string]string) *http.Request { func generateSessionID(input string) string { data := []byte(input) tokenID := fmt.Sprintf("%x", md5.Sum(data)) - return generateToken(MockOrgID, tokenID) + return generateToken(mockOrgID, tokenID) } func TestValueExtractor(t *testing.T) { diff --git a/gateway/coprocess_lua.go b/coprocess_lua.go similarity index 95% rename from gateway/coprocess_lua.go rename to coprocess_lua.go index b95c5ddd7abe..c2ad8433bb81 100644 --- a/gateway/coprocess_lua.go +++ b/coprocess_lua.go @@ -1,7 +1,7 @@ // +build coprocess // +build lua -package gateway +package main /* #cgo pkg-config: luajit @@ -10,9 +10,9 @@ package gateway #include #include -#include "../coprocess/api.h" +#include "coprocess/api.h" -#include "../coprocess/lua/binding.h" +#include "coprocess/lua/binding.h" #include #include @@ -78,6 +78,9 @@ import ( "github.com/TykTechnologies/tyk/coprocess" ) +// CoProcessName specifies the driver name. +const CoProcessName = apidef.LuaDriver + const ( // ModuleBasePath points to the Tyk modules path. ModuleBasePath = "coprocess/lua" @@ -85,6 +88,9 @@ const ( MiddlewareBasePath = "middleware/lua" ) +// MessageType sets the default message type. +var MessageType = coprocess.JsonMessage + // gMiddlewareCache will hold LuaDispatcher.gMiddlewareCache. var gMiddlewareCache map[string]string var gModuleCache map[string]string @@ -200,12 +206,6 @@ func (d *LuaDispatcher) DispatchEvent(eventJSON []byte) { // NewCoProcessDispatcher wraps all the actions needed for this CP. func NewCoProcessDispatcher() (coprocess.Dispatcher, error) { - // CoProcessName specifies the driver name. - CoProcessName = apidef.LuaDriver - - // MessageType sets the default message type. - MessageType = coprocess.JsonMessage - dispatcher := &LuaDispatcher{} dispatcher.LoadModules() dispatcher.Reload() diff --git a/gateway/coprocess_native.go b/coprocess_native.go similarity index 90% rename from gateway/coprocess_native.go rename to coprocess_native.go index d3cdd36488b6..aac014aebc47 100644 --- a/gateway/coprocess_native.go +++ b/coprocess_native.go @@ -1,18 +1,18 @@ // +build coprocess // +build !grpc -package gateway +package main /* #cgo python CFLAGS: -DENABLE_PYTHON #include #include -#include "../coprocess/api.h" +#include "coprocess/api.h" #ifdef ENABLE_PYTHON -#include "../coprocess/python/dispatcher.h" -#include "../coprocess/python/binding.h" +#include "coprocess/python/dispatcher.h" +#include "coprocess/python/binding.h" #endif */ @@ -29,8 +29,6 @@ import ( "unsafe" ) -var MessageType int - // Dispatch prepares a CoProcessMessage, sends it to the GlobalDispatcher and gets a reply. func (c *CoProcessor) Dispatch(object *coprocess.Object) (*coprocess.Object, error) { if GlobalDispatcher == nil { @@ -61,6 +59,8 @@ func (c *CoProcessor) Dispatch(object *coprocess.Object) (*coprocess.Object, err // Call the dispatcher (objectPtr is freed during this call): if err = GlobalDispatcher.Dispatch(unsafe.Pointer(objectPtr), unsafe.Pointer(newObjectPtr)); err != nil { + C.free(unsafe.Pointer(newObjectPtr.p_data)) + C.free(unsafe.Pointer(newObjectPtr)) return nil, err } newObjectBytes := C.GoBytes(newObjectPtr.p_data, newObjectPtr.length) diff --git a/gateway/coprocess_python.go b/coprocess_python.go similarity index 95% rename from gateway/coprocess_python.go rename to coprocess_python.go index 662e94805722..51138a267c4e 100644 --- a/gateway/coprocess_python.go +++ b/coprocess_python.go @@ -1,7 +1,7 @@ // +build coprocess // +build python -package gateway +package main /* #cgo pkg-config: python3 @@ -13,14 +13,14 @@ package gateway #include #include -#include "../coprocess/sds/sds.h" +#include "coprocess/sds/sds.h" -#include "../coprocess/api.h" +#include "coprocess/api.h" -#include "../coprocess/python/binding.h" -#include "../coprocess/python/dispatcher.h" +#include "coprocess/python/binding.h" +#include "coprocess/python/dispatcher.h" -#include "../coprocess/python/tyk/gateway_wrapper.h" +#include "coprocess/python/tyk/gateway_wrapper.h" PyGILState_STATE gilState; @@ -192,6 +192,12 @@ import ( "github.com/TykTechnologies/tyk/coprocess" ) +// CoProcessName declares the driver name. +const CoProcessName = apidef.PythonDriver + +// MessageType sets the default message type. +var MessageType = coprocess.ProtobufMessage + // PythonDispatcher implements a coprocess.Dispatcher type PythonDispatcher struct { coprocess.Dispatcher @@ -297,12 +303,6 @@ func getBundlePaths() []string { // NewCoProcessDispatcher wraps all the actions needed for this CP. func NewCoProcessDispatcher() (dispatcher coprocess.Dispatcher, err error) { - // MessageType sets the default message type. - MessageType = coprocess.ProtobufMessage - - // CoProcessName declares the driver name. - CoProcessName = apidef.PythonDriver - workDir := config.Global().CoProcessOptions.PythonPathPrefix dispatcherPath := filepath.Join(workDir, "coprocess", "python") diff --git a/coprocess_python_api.c b/coprocess_python_api.c new file mode 120000 index 000000000000..843480a5d814 --- /dev/null +++ b/coprocess_python_api.c @@ -0,0 +1 @@ +coprocess/python/tyk/gateway_wrapper.c \ No newline at end of file diff --git a/coprocess/python/coprocess_python_test.go b/coprocess_python_test.go similarity index 89% rename from coprocess/python/coprocess_python_test.go rename to coprocess_python_test.go index ce84dee7977f..3db06979c324 100644 --- a/coprocess/python/coprocess_python_test.go +++ b/coprocess_python_test.go @@ -1,17 +1,15 @@ // +build coprocess // +build python -package python +package main import ( "bytes" "mime/multipart" - "os" "testing" "time" "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/gateway" "github.com/TykTechnologies/tyk/test" "github.com/TykTechnologies/tyk/user" ) @@ -132,23 +130,19 @@ def MyPreHook(request, session, metadata, spec): `, } -func TestMain(m *testing.M) { - os.Exit(gateway.InitTestMain(m)) -} - func TestPythonBundles(t *testing.T) { - ts := gateway.StartTest(gateway.TestConfig{ - CoprocessConfig: config.CoProcessConfig{ + ts := newTykTestServer(tykTestServerConfig{ + coprocessConfig: config.CoProcessConfig{ EnableCoProcess: true, }}) defer ts.Close() - authCheckBundle := gateway.RegisterBundle("python_with_auth_check", pythonBundleWithAuthCheck) - postHookBundle := gateway.RegisterBundle("python_with_post_hook", pythonBundleWithPostHook) - preHookBundle := gateway.RegisterBundle("python_with_pre_hook", pythonBundleWithPreHook) + authCheckBundle := registerBundle("python_with_auth_check", pythonBundleWithAuthCheck) + postHookBundle := registerBundle("python_with_post_hook", pythonBundleWithPostHook) + preHookBundle := registerBundle("python_with_pre_hook", pythonBundleWithPreHook) t.Run("Single-file bundle with authentication hook", func(t *testing.T) { - gateway.BuildAndLoadAPI(func(spec *gateway.APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/test-api/" spec.UseKeylessAccess = false spec.EnableCoProcessAuth = true @@ -169,14 +163,14 @@ func TestPythonBundles(t *testing.T) { t.Run("Single-file bundle with post hook", func(t *testing.T) { - keyID := gateway.CreateSession(func(s *user.SessionState) { + keyID := createSession(func(s *user.SessionState) { s.MetaData = map[string]interface{}{ "testkey": map[string]interface{}{"nestedkey": "nestedvalue"}, "stringkey": "testvalue", } }) - gateway.BuildAndLoadAPI(func(spec *gateway.APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/test-api-2/" spec.UseKeylessAccess = false spec.EnableCoProcessAuth = false @@ -194,7 +188,7 @@ func TestPythonBundles(t *testing.T) { }) t.Run("Single-file bundle with pre hook and UTF-8/non-UTF-8 request data", func(t *testing.T) { - gateway.BuildAndLoadAPI(func(spec *gateway.APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/test-api-2/" spec.UseKeylessAccess = true spec.EnableCoProcessAuth = false @@ -204,7 +198,7 @@ func TestPythonBundles(t *testing.T) { time.Sleep(1 * time.Second) - fileData := gateway.GenerateTestBinaryData() + fileData := generateTestBinaryData() var buf bytes.Buffer multipartWriter := multipart.NewWriter(&buf) file, err := multipartWriter.CreateFormFile("file", "test.bin") diff --git a/coprocess/coprocess_test.go b/coprocess_test.go similarity index 72% rename from coprocess/coprocess_test.go rename to coprocess_test.go index 02b85ae101c6..53cdcef3f2d5 100644 --- a/coprocess/coprocess_test.go +++ b/coprocess_test.go @@ -2,14 +2,13 @@ // +build !python // +build !grpc -package coprocess_test +package main import ( "encoding/json" "net/http" "net/http/httptest" "net/url" - "os" "testing" "github.com/golang/protobuf/proto" @@ -19,25 +18,20 @@ import ( "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/coprocess" - "github.com/TykTechnologies/tyk/gateway" - logger "github.com/TykTechnologies/tyk/log" ) const baseMiddlewarePath = "middleware/python" var ( - testDispatcher, _ = gateway.NewCoProcessDispatcher() - log = logger.Get() + CoProcessName = apidef.MiddlewareDriver("test") + MessageType = coprocess.ProtobufMessage + testDispatcher, _ = NewCoProcessDispatcher() coprocessLog = log.WithFields(logrus.Fields{ "prefix": "coprocess", }) ) -func TestMain(m *testing.M) { - os.Exit(gateway.InitTestMain(m)) -} - /* Dispatcher functions */ func TestCoProcessDispatch(t *testing.T) { @@ -52,26 +46,26 @@ func TestCoProcessDispatch(t *testing.T) { } func TestCoProcessDispatchEvent(t *testing.T) { - spec := gateway.CreateSpecTest(t, basicCoProcessDef) + spec := createSpecTest(t, basicCoProcessDef) remote, _ := url.Parse(spec.Proxy.TargetURL) - proxy := gateway.TykNewSingleHostReverseProxy(remote, spec) - baseMid := gateway.BaseMiddleware{Spec: spec, Proxy: proxy} + proxy := TykNewSingleHostReverseProxy(remote, spec) + baseMid := BaseMiddleware{spec, proxy, coprocessLog} - meta := gateway.EventKeyFailureMeta{ - EventMetaDefault: gateway.EventMetaDefault{Message: "Auth Failure"}, + meta := EventKeyFailureMeta{ + EventMetaDefault: EventMetaDefault{Message: "Auth Failure"}, Path: "/", Origin: "127.0.0.1", Key: "abc", } - baseMid.FireEvent(gateway.EventAuthFailure, meta) + baseMid.FireEvent(EventAuthFailure, meta) - wrapper := gateway.CoProcessEventWrapper{} - if err := json.Unmarshal(<-gateway.CoProcessDispatchEvent, &wrapper); err != nil { + wrapper := CoProcessEventWrapper{} + if err := json.Unmarshal(<-CoProcessDispatchEvent, &wrapper); err != nil { t.Fatal(err) } - if wrapper.Event.Type != gateway.EventAuthFailure { + if wrapper.Event.Type != EventAuthFailure { t.Fatal("Wrong event type") } @@ -83,9 +77,9 @@ func TestCoProcessDispatchEvent(t *testing.T) { func TestCoProcessReload(t *testing.T) { // Use this as the GlobalDispatcher: - gateway.GlobalDispatcher = testDispatcher - gateway.DoCoprocessReload() - if !testDispatcher.Reloaded { + GlobalDispatcher = testDispatcher + doCoprocessReload() + if !testDispatcher.reloaded { t.Fatal("coprocess reload wasn't run") } } @@ -118,9 +112,9 @@ func TestCoProcessGetSetData(t *testing.T) { value := "testvalue" ttl := 1000 - gateway.TestTykStoreData(key, value, ttl) + TestTykStoreData(key, value, ttl) - retrievedValue := gateway.TestTykGetData("testkey") + retrievedValue := TestTykGetData("testkey") if retrievedValue != value { t.Fatal("Couldn't retrieve key value using CP API") @@ -128,53 +122,53 @@ func TestCoProcessGetSetData(t *testing.T) { } func TestCoProcessTykTriggerEvent(t *testing.T) { - gateway.TestTykTriggerEvent("testevent", "testpayload") + TestTykTriggerEvent("testevent", "testpayload") } /* Middleware */ -func buildCoProcessChain(spec *gateway.APISpec, hookName string, hookType coprocess.HookType, driver apidef.MiddlewareDriver) http.Handler { +func buildCoProcessChain(spec *APISpec, hookName string, hookType coprocess.HookType, driver apidef.MiddlewareDriver) http.Handler { remote, _ := url.Parse(spec.Proxy.TargetURL) - proxy := gateway.TykNewSingleHostReverseProxy(remote, spec) - proxyHandler := gateway.ProxyHandler(proxy, spec) - baseMid := gateway.BaseMiddleware{Spec: spec, Proxy: proxy} // TODO - mw := gateway.CreateCoProcessMiddleware(hookName, hookType, driver, baseMid) + proxy := TykNewSingleHostReverseProxy(remote, spec) + proxyHandler := ProxyHandler(proxy, spec) + baseMid := BaseMiddleware{spec, proxy, coprocessLog} + mw := CreateCoProcessMiddleware(hookName, hookType, driver, baseMid) return alice.New(mw).Then(proxyHandler) } func TestCoProcessMiddleware(t *testing.T) { - spec := gateway.CreateSpecTest(t, basicCoProcessDef) + spec := createSpecTest(t, basicCoProcessDef) chain := buildCoProcessChain(spec, "hook_test", coprocess.HookType_Pre, apidef.MiddlewareDriver("python")) - session := gateway.CreateStandardSession() + session := createStandardSession() spec.SessionManager.UpdateSession("abc", session, 60, false) recorder := httptest.NewRecorder() - req := gateway.TestReq(t, "GET", "/headers", nil) + req := testReq(t, "GET", "/headers", nil) req.Header.Set("authorization", "abc") chain.ServeHTTP(recorder, req) } func TestCoProcessObjectPostProcess(t *testing.T) { - spec := gateway.CreateSpecTest(t, basicCoProcessDef) + spec := createSpecTest(t, basicCoProcessDef) chain := buildCoProcessChain(spec, "hook_test_object_postprocess", coprocess.HookType_Pre, apidef.MiddlewareDriver("python")) - session := gateway.CreateStandardSession() + session := createStandardSession() spec.SessionManager.UpdateSession("abc", session, 60, false) recorder := httptest.NewRecorder() - req := gateway.TestReq(t, "GET", "/headers", nil) + req := testReq(t, "GET", "/headers", nil) req.Header.Set("authorization", "abc") req.Header.Set("Deletethisheader", "value") chain.ServeHTTP(recorder, req) - resp := gateway.TestHttpResponse{} + resp := testHttpResponse{} if err := json.Unmarshal(recorder.Body.Bytes(), &resp); err != nil { t.Fatal(err) } @@ -188,12 +182,12 @@ func TestCoProcessObjectPostProcess(t *testing.T) { recorder = httptest.NewRecorder() uri := "/get?a=a_value&b=123&remove=3" - req = gateway.TestReq(t, "GET", uri, nil) + req = testReq(t, "GET", uri, nil) req.Header.Set("authorization", "abc") chain.ServeHTTP(recorder, req) - resp = gateway.TestHttpResponse{} + resp = testHttpResponse{} if err := json.Unmarshal(recorder.Body.Bytes(), &resp); err != nil { t.Fatal(err) } @@ -213,16 +207,16 @@ func TestCoProcessObjectPostProcess(t *testing.T) { func TestCoProcessAuth(t *testing.T) { t.Log("CP AUTH") - spec := gateway.CreateSpecTest(t, protectedCoProcessDef) + spec := createSpecTest(t, protectedCoProcessDef) chain := buildCoProcessChain(spec, "hook_test_bad_auth", coprocess.HookType_CustomKeyCheck, apidef.MiddlewareDriver("python")) - session := gateway.CreateStandardSession() + session := createStandardSession() spec.SessionManager.UpdateSession("abc", session, 60, false) recorder := httptest.NewRecorder() - req := gateway.TestReq(t, "GET", "/headers", nil) + req := testReq(t, "GET", "/headers", nil) req.Header.Set("authorization", "abc") chain.ServeHTTP(recorder, req) @@ -230,18 +224,18 @@ func TestCoProcessAuth(t *testing.T) { if recorder.Code != 403 { t.Fatal("Authentication should fail! But it's returning:", recorder.Code) } - <-gateway.CoProcessDispatchEvent + <-CoProcessDispatchEvent } func TestCoProcessReturnOverrides(t *testing.T) { - spec := gateway.CreateSpecTest(t, basicCoProcessDef) + spec := createSpecTest(t, basicCoProcessDef) chain := buildCoProcessChain(spec, "hook_test_return_overrides", coprocess.HookType_Pre, apidef.MiddlewareDriver("python")) - session := gateway.CreateStandardSession() + session := createStandardSession() spec.SessionManager.UpdateSession("abc", session, 60, false) recorder := httptest.NewRecorder() - req := gateway.TestReq(t, "GET", "/headers", nil) + req := testReq(t, "GET", "/headers", nil) req.Header.Set("authorization", "abc") chain.ServeHTTP(recorder, req) if recorder.Code != 200 || recorder.Body.String() != "body" { @@ -254,14 +248,14 @@ func TestCoProcessReturnOverrides(t *testing.T) { } func TestCoProcessReturnOverridesErrorMessage(t *testing.T) { - spec := gateway.CreateSpecTest(t, basicCoProcessDef) + spec := createSpecTest(t, basicCoProcessDef) chain := buildCoProcessChain(spec, "hook_test_return_overrides_error", coprocess.HookType_Pre, apidef.MiddlewareDriver("python")) - session := gateway.CreateStandardSession() + session := createStandardSession() spec.SessionManager.UpdateSession("abc", session, 60, false) recorder := httptest.NewRecorder() - req := gateway.TestReq(t, "GET", "/headers", nil) + req := testReq(t, "GET", "/headers", nil) req.Header.Set("authorization", "abc") chain.ServeHTTP(recorder, req) if recorder.Code != 401 || recorder.Body.String() != "{\n \"error\": \"custom error message\"\n}" { @@ -292,7 +286,7 @@ const basicCoProcessDef = `{ }, "proxy": { "listen_path": "/v1", - "target_url": "` + gateway.TestHttpGet + `" + "target_url": "` + testHttpGet + `" } }` @@ -322,6 +316,6 @@ const protectedCoProcessDef = `{ }, "proxy": { "listen_path": "/v1", - "target_url": "` + gateway.TestHttpGet + `" + "target_url": "` + testHttpGet + `" } }` diff --git a/gateway/coprocess_testutil.go b/coprocess_test_helpers.go similarity index 97% rename from gateway/coprocess_testutil.go rename to coprocess_test_helpers.go index 4e3a355b40ab..da382e7f9c4f 100644 --- a/gateway/coprocess_testutil.go +++ b/coprocess_test_helpers.go @@ -3,13 +3,13 @@ // +build !lua // +build !grpc -package gateway +package main /* #include #include -#include "../coprocess/api.h" +#include "coprocess/api.h" void applyTestHooks(); @@ -42,7 +42,7 @@ var CoProcessDispatchEvent = make(chan []byte) type TestDispatcher struct { coprocess.Dispatcher - Reloaded bool + reloaded bool } /* Basic CoProcessDispatcher functions */ @@ -59,13 +59,12 @@ func (d *TestDispatcher) DispatchEvent(eventJSON []byte) { } func (d *TestDispatcher) Reload() { - d.Reloaded = true + d.reloaded = true } /* General test helpers */ func NewCoProcessDispatcher() (dispatcher *TestDispatcher, err error) { - MessageType = coprocess.ProtobufMessage d := &TestDispatcher{} GlobalDispatcher = d EnableCoProcess = true diff --git a/ctx/ctx.go b/ctx/ctx.go deleted file mode 100644 index 5daa1f04dab8..000000000000 --- a/ctx/ctx.go +++ /dev/null @@ -1,79 +0,0 @@ -package ctx - -import ( - "context" - "net/http" - - "github.com/TykTechnologies/tyk/storage" - "github.com/TykTechnologies/tyk/user" -) - -const ( - SessionData = iota - UpdateSession - AuthToken - HashedAuthToken - VersionData - VersionDefault - OrgSessionContext - ContextData - RetainHost - TrackThisEndpoint - DoNotTrackThisEndpoint - UrlRewritePath - RequestMethod - OrigRequestURL - LoopLevel - LoopLevelLimit - ThrottleLevel - ThrottleLevelLimit - Trace - CheckLoopLimits -) - -func setContext(r *http.Request, ctx context.Context) { - r2 := r.WithContext(ctx) - *r = *r2 -} - -func ctxSetSession(r *http.Request, s *user.SessionState, token string, scheduleUpdate bool) { - if s == nil { - panic("setting a nil context SessionData") - } - - if token == "" { - token = GetAuthToken(r) - } - - if s.KeyHashEmpty() { - s.SetKeyHash(storage.HashKey(token)) - } - - ctx := r.Context() - ctx = context.WithValue(ctx, SessionData, s) - ctx = context.WithValue(ctx, AuthToken, token) - - if scheduleUpdate { - ctx = context.WithValue(ctx, UpdateSession, true) - } - - setContext(r, ctx) -} - -func GetAuthToken(r *http.Request) string { - if v := r.Context().Value(AuthToken); v != nil { - return v.(string) - } - return "" -} - -func GetSession(r *http.Request) *user.SessionState { - if v := r.Context().Value(SessionData); v != nil { - return v.(*user.SessionState) - } - return nil -} - -func SetSession(r *http.Request, s *user.SessionState, token string, scheduleUpdate bool) { - ctxSetSession(r, s, token, scheduleUpdate) -} diff --git a/gateway/dashboard_register.go b/dashboard_register.go similarity index 85% rename from gateway/dashboard_register.go rename to dashboard_register.go index 373bf9cb2a43..dc5676ba1616 100644 --- a/gateway/dashboard_register.go +++ b/dashboard_register.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" @@ -9,11 +9,11 @@ import ( "net/http" "time" + "github.com/Sirupsen/logrus" + "github.com/TykTechnologies/tyk/config" ) -var dashLog = log.WithField("prefix", "dashboard") - type NodeResponseOK struct { Status string Message map[string]string @@ -62,21 +62,29 @@ func reLogin() { return } - dashLog.Info("Registering node (again).") + log.WithFields(logrus.Fields{ + "prefix": "main", + }).Info("Registering node (again).") DashService.StopBeating() if err := DashService.DeRegister(); err != nil { - dashLog.Error("Could not deregister: ", err) + log.WithFields(logrus.Fields{ + "prefix": "main", + }).Error("Could not deregister: ", err) } time.Sleep(5 * time.Second) if err := DashService.Register(); err != nil { - dashLog.Error("Could not register: ", err) + log.WithFields(logrus.Fields{ + "prefix": "main", + }).Error("Could not register: ", err) } else { go DashService.StartBeating() } - dashLog.Info("Recovering configurations, reloading...") + log.WithFields(logrus.Fields{ + "prefix": "main", + }).Info("Recovering configurations, reloading...") reloadURLStructure(nil) } @@ -87,7 +95,9 @@ func (h *HTTPDashboardHandler) Init() error { h.KeyQuotaTriggerEndpoint = buildConnStr("/system/key/quota_trigger") if h.Secret = config.Global().NodeSecret; h.Secret == "" { - dashLog.Fatal("Node secret is not set, required for dashboard connection") + log.WithFields(logrus.Fields{ + "prefix": "main", + }).Fatal("Node secret is not set, required for dashboard connection") } return nil } @@ -144,17 +154,16 @@ func (h *HTTPDashboardHandler) NotifyDashboardOfEvent(event interface{}) error { } func (h *HTTPDashboardHandler) Register() error { - dashLog.Info("Registering gateway node with Dashboard") req := h.newRequest(h.RegistrationEndpoint) c := initialiseClient(5 * time.Second) resp, err := c.Do(req) if err != nil { - dashLog.Errorf("Request failed with error %v; retrying in 5s", err) + log.Errorf("Request failed with error %v; retrying in 5s", err) time.Sleep(time.Second * 5) return h.Register() } else if resp != nil && resp.StatusCode != 200 { - dashLog.Errorf("Response failed with code %d; retrying in 5s", resp.StatusCode) + log.Errorf("Response failed with code %d; retrying in 5s", resp.StatusCode) time.Sleep(time.Second * 5) return h.Register() } @@ -169,16 +178,19 @@ func (h *HTTPDashboardHandler) Register() error { var found bool NodeID, found = val.Message["NodeID"] if !found { - dashLog.Error("Failed to register node, retrying in 5s") + log.Error("Failed to register node, retrying in 5s") time.Sleep(time.Second * 5) return h.Register() } - dashLog.WithField("id", NodeID).Info("Node Registered") + log.WithFields(logrus.Fields{ + "prefix": "dashboard", + "id": NodeID, + }).Info("Node registered") // Set the nonce ServiceNonce = val.Nonce - dashLog.Debug("Registration Finished: Nonce Set: ", ServiceNonce) + log.Debug("Registration Finished: Nonce Set: ", ServiceNonce) return nil } @@ -191,12 +203,12 @@ func (h *HTTPDashboardHandler) StartBeating() error { for !h.heartBeatStopSentinel { if err := h.sendHeartBeat(req, client); err != nil { - dashLog.Warning(err) + log.Warning(err) } time.Sleep(time.Second * 2) } - dashLog.Info("Stopped Heartbeat") + log.Info("Stopped Heartbeat") h.heartBeatStopSentinel = false return nil } @@ -264,7 +276,7 @@ func (h *HTTPDashboardHandler) DeRegister() error { // Set the nonce ServiceNonce = val.Nonce - dashLog.Info("De-registered.") + log.Info("De-registered.") return nil } diff --git a/gateway/distributed_rate_limiter.go b/distributed_rate_limiter.go similarity index 99% rename from gateway/distributed_rate_limiter.go rename to distributed_rate_limiter.go index d0e22c25eaff..913529bcdb63 100644 --- a/gateway/distributed_rate_limiter.go +++ b/distributed_rate_limiter.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/json" diff --git a/dnscache/manager.go b/dnscache/manager.go deleted file mode 100644 index 9510cb76fc58..000000000000 --- a/dnscache/manager.go +++ /dev/null @@ -1,162 +0,0 @@ -package dnscache - -import ( - "context" - "fmt" - "math/rand" - "net" - "time" - - "github.com/TykTechnologies/tyk/config" - - "github.com/Sirupsen/logrus" - - "github.com/TykTechnologies/tyk/log" -) - -var ( - logger = log.Get().WithField("prefix", "dnscache") -) - -type DialContextFunc func(ctx context.Context, network, address string) (net.Conn, error) - -// IDnsCacheManager is an interface for abstracting interaction with dns cache. Implemented by DnsCacheManager -type IDnsCacheManager interface { - InitDNSCaching(ttl, checkInterval time.Duration) - WrapDialer(dialer *net.Dialer) DialContextFunc - SetCacheStorage(cache IDnsCacheStorage) - CacheStorage() IDnsCacheStorage - IsCacheEnabled() bool - DisposeCache() -} - -// IDnsCacheStorage is an interface for working with cached storage of dns record. -// Wrapped by IDnsCacheManager/DnsCacheManager. Implemented by DnsCacheStorage -type IDnsCacheStorage interface { - FetchItem(key string) ([]string, error) - Get(key string) (DnsCacheItem, bool) - Set(key string, addrs []string) - Delete(key string) - Clear() -} - -// DnsCacheManager is responsible for in-memory dns query records cache. -// It allows to init dns caching and to hook into net/http dns resolution chain in order to cache query response ip records. -type DnsCacheManager struct { - cacheStorage IDnsCacheStorage - strategy config.IPsHandleStrategy - rand *rand.Rand -} - -// NewDnsCacheManager returns new empty/non-initialized DnsCacheManager -func NewDnsCacheManager(multipleIPsHandleStrategy config.IPsHandleStrategy) *DnsCacheManager { - manager := &DnsCacheManager{nil, multipleIPsHandleStrategy, nil} - return manager -} - -func (m *DnsCacheManager) SetCacheStorage(cache IDnsCacheStorage) { - m.cacheStorage = cache -} - -func (m *DnsCacheManager) CacheStorage() IDnsCacheStorage { - return m.cacheStorage -} - -func (m *DnsCacheManager) IsCacheEnabled() bool { - return m.cacheStorage != nil -} - -// WrapDialer returns wrapped version of net.Dialer#DialContext func with hooked up caching of dns queries. -// -// Actual dns server call occures in net.Resolver#LookupIPAddr method, -// linked to net.Dialer instance by net.Dialer#Resolver field -func (m *DnsCacheManager) WrapDialer(dialer *net.Dialer) DialContextFunc { - return func(ctx context.Context, network, address string) (net.Conn, error) { - return m.doCachedDial(dialer, ctx, network, address) - } -} - -func (m *DnsCacheManager) doCachedDial(d *net.Dialer, ctx context.Context, network, address string) (net.Conn, error) { - safeDial := func(addr string, itemKey string) (net.Conn, error) { - conn, err := d.DialContext(ctx, network, addr) - if err != nil && itemKey != "" { - m.cacheStorage.Delete(itemKey) - } - return conn, err - } - - if !m.IsCacheEnabled() { - return safeDial(address, "") - } - - host, port, err := net.SplitHostPort(address) - if err != nil { - return nil, err - } - - if ip := net.ParseIP(host); ip != nil { - return safeDial(address, "") - } - - ips, err := m.cacheStorage.FetchItem(host) - if err != nil { - logger.WithError(err).WithFields(logrus.Fields{ - "network": network, - "address": address, - }).Errorf("doCachedDial cachedStorage.FetchItem error. ips=%v", ips) - - return safeDial(address, "") - } - - if m.strategy == config.NoCacheStrategy { - if len(ips) > 1 { - m.cacheStorage.Delete(host) - return safeDial(ips[0]+":"+port, "") - } - } - - if m.strategy == config.RandomStrategy { - if len(ips) > 1 { - ip, _ := m.getRandomIp(ips) - return safeDial(ip+":"+port, host) - } - return safeDial(ips[0]+":"+port, host) - } - - return safeDial(ips[0]+":"+port, host) -} - -func (m *DnsCacheManager) getRandomIp(ips []string) (string, error) { - if m.strategy != config.RandomStrategy { - return "", fmt.Errorf( - "getRandomIp can be called only with %v strategy. strategy=%v", - config.RandomStrategy, m.strategy) - } - - if m.rand == nil { - source := rand.NewSource(time.Now().Unix()) - m.rand = rand.New(source) - } - - ip := ips[m.rand.Intn(len(ips))] - - return ip, nil -} - -// InitDNSCaching initializes manager's cache storage if it wasn't initialized before with provided ttl, checkinterval values -// Initialized cache storage enables caching of previously hoooked net.Dialer DialContext calls -// -// Otherwise leave storage as is. -func (m *DnsCacheManager) InitDNSCaching(ttl, checkInterval time.Duration) { - if !m.IsCacheEnabled() { - logger.Infof("Initializing dns cache with ttl=%s, duration=%s", ttl, checkInterval) - storage := NewDnsCacheStorage(ttl, checkInterval) - m.SetCacheStorage(IDnsCacheStorage(storage)) - } -} - -// DisposeCache clear all entries from cache and disposes/disables caching of dns queries -func (m *DnsCacheManager) DisposeCache() { - m.cacheStorage.Clear() - m.cacheStorage = nil -} diff --git a/dnscache/manager_test.go b/dnscache/manager_test.go deleted file mode 100644 index a45cbad64553..000000000000 --- a/dnscache/manager_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package dnscache - -import ( - "context" - "net" - "strings" - "testing" - "time" - - "github.com/TykTechnologies/tyk/config" -) - -func TestWrapDialerDialContextFunc(t *testing.T) { - tearDownTestStorageFetchItem := setupTestStorageFetchItem(&configTestStorageFetchItem{t, etcHostsMap, etcHostsErrorMap}) - defer tearDownTestStorageFetchItem() - - expectedHost := "orig-host.com" - expectedSingleIpHost := "single.orig-host.com" - hostWithPort := expectedHost + ":8078" - singleIpHostWithPort := expectedSingleIpHost + ":8078" - dialerContext, cancel := context.WithCancel(context.TODO()) - cancel() //Manually disable connection establishment - - cases := []struct { - name string - - address string - multiIPsStrategy config.IPsHandleStrategy - initStorage bool - - shouldCallFetchItem bool - shouldCallDelete bool - expectedHostname string - expectedError string - }{ - { - "PickFirstStrategy(1 ip): Should parse address, call storage.FetchItem, cache ip, call storage.Delete on DialContext error", - singleIpHostWithPort, config.PickFirstStrategy, true, - true, true, expectedSingleIpHost, "operation was canceled", - }, - { - "PickFirstStrategy(>1 ip): Should parse address, call storage.FetchItem, cache all ips, call storage.Delete on DialContext error", - hostWithPort, config.PickFirstStrategy, true, - true, true, expectedHost, "operation was canceled", - }, - { - "NoCacheStrategy(1 ip): Should parse address, call storage.FetchItem, cache ip, call storage.Delete on DialContext error", - singleIpHostWithPort, config.NoCacheStrategy, true, - true, true, expectedSingleIpHost, "operation was canceled", - }, - { - "NoCacheStrategy(>1 ip): Should parse address, call storage.FetchItem, remove ips caching", - hostWithPort, config.NoCacheStrategy, true, - true, true, expectedHost, "operation was canceled", - }, - { - "RandomStrategy(>1 ip): Should parse address, call storage.FetchItem, cache all ips, connect to random ip, call storage.Delete on DialContext error", - hostWithPort, config.RandomStrategy, true, - true, true, expectedHost, "operation was canceled", - }, - { - "Shouldn't call FetchItem when caching is disabled(storage == nil)", - hostWithPort, config.NoCacheStrategy, false, - false, false, "", "", - }, - { - "Shouldn't cache ipv4 address", - "192.0.2.10:80", config.NoCacheStrategy, true, - false, false, "", "operation was canceled", - }, - { - "Should faifast on address without port(accept only address with port)", - expectedHost, config.NoCacheStrategy, true, - false, false, "", "missing port in address", - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - var fetchItemCall, deleteCall struct { - called bool - key string - } - - storage := &MockStorage{func(key string) ([]string, error) { - fetchItemCall.called = true - fetchItemCall.key = key - return etcHostsMap[key+"."], nil - }, func(key string) (DnsCacheItem, bool) { - if _, ok := etcHostsMap[key]; ok { - return DnsCacheItem{}, true - } - - return DnsCacheItem{}, false - }, func(key string, addrs []string) {}, - func(key string) { - deleteCall.called = true - deleteCall.key = key - }, func() {}} - - dnsManager := NewDnsCacheManager(tc.multiIPsStrategy) - if tc.initStorage { - dnsManager.SetCacheStorage(storage) - } - - _, err := dnsManager.WrapDialer(&net.Dialer{ - Timeout: 1 * time.Second, - KeepAlive: 0, - })(dialerContext, "tcp", tc.address) - - if tc.expectedError != "" { - if err != nil && !strings.Contains(err.Error(), tc.expectedError) { - t.Fatalf("wanted error '%s', got '%s'", tc.expectedError, err.Error()) - } - } - - if tc.shouldCallFetchItem != fetchItemCall.called { - t.Fatalf("wanted fetchItemCall.called to be %v, got %v", tc.shouldCallFetchItem, fetchItemCall.called) - } - - if tc.shouldCallFetchItem { - if fetchItemCall.key != tc.expectedHostname { - t.Fatalf("wanted fetchItemCall.key to be %v, got %v", tc.expectedHostname, fetchItemCall.key) - } - } - - if tc.shouldCallDelete != deleteCall.called { - t.Fatalf("wanted deleteCall.called to be %v, got %v", tc.shouldCallDelete, deleteCall.called) - } - if tc.shouldCallDelete { - if deleteCall.key != tc.expectedHostname { - t.Fatalf("wanted deleteCall.key to be %v, got %v", tc.expectedHostname, deleteCall.key) - } - } - - if tc.initStorage { - dnsManager.DisposeCache() - } - }) - } -} diff --git a/dnscache/mock_storage.go b/dnscache/mock_storage.go deleted file mode 100644 index 90c037cde54e..000000000000 --- a/dnscache/mock_storage.go +++ /dev/null @@ -1,29 +0,0 @@ -package dnscache - -type MockStorage struct { - MockFetchItem func(key string) ([]string, error) - MockGet func(key string) (DnsCacheItem, bool) - MockSet func(key string, addrs []string) - MockDelete func(key string) - MockClear func() -} - -func (ms *MockStorage) FetchItem(key string) ([]string, error) { - return ms.MockFetchItem(key) -} - -func (ms *MockStorage) Get(key string) (DnsCacheItem, bool) { - return ms.MockGet(key) -} - -func (ms *MockStorage) Set(key string, addrs []string) { - ms.MockSet(key, addrs) -} - -func (ms *MockStorage) Delete(key string) { - ms.MockDelete(key) -} - -func (ms *MockStorage) Clear() { - ms.MockClear() -} diff --git a/dnscache/storage.go b/dnscache/storage.go deleted file mode 100644 index ac8053490c87..000000000000 --- a/dnscache/storage.go +++ /dev/null @@ -1,93 +0,0 @@ -package dnscache - -import ( - "net" - "time" - - "fmt" - - "github.com/Sirupsen/logrus" - cache "github.com/pmylund/go-cache" -) - -// DnsCacheItem represents single record in cache -type DnsCacheItem struct { - Addrs []string -} - -// DnsCacheStorage is an in-memory cache of auto-purged dns query ip responses -type DnsCacheStorage struct { - cache *cache.Cache -} - -func NewDnsCacheStorage(expiration, checkInterval time.Duration) *DnsCacheStorage { - storage := DnsCacheStorage{cache.New(expiration, checkInterval)} - return &storage -} - -// Items returns map of non expired dns cache items -func (dc *DnsCacheStorage) Items(includeExpired bool) map[string]DnsCacheItem { - var allItems = dc.cache.Items() - - nonExpiredItems := map[string]DnsCacheItem{} - - for k, v := range allItems { - if !includeExpired && v.Expired() { - continue - } - nonExpiredItems[k] = v.Object.(DnsCacheItem) - } - - return nonExpiredItems -} - -// Get returns non expired item from cache -func (dc *DnsCacheStorage) Get(key string) (DnsCacheItem, bool) { - item, found := dc.cache.Get(key) - if !found { - return DnsCacheItem{}, false - } - return item.(DnsCacheItem), found -} - -func (dc *DnsCacheStorage) Delete(key string) { - dc.cache.Delete(key) -} - -// FetchItem returns list of ips from cache or resolves them and add to cache -func (dc *DnsCacheStorage) FetchItem(hostName string) ([]string, error) { - if hostName == "" { - return nil, fmt.Errorf("hostName can't be empty. hostName=%v", hostName) - } - - item, ok := dc.Get(hostName) - if ok { - logger.WithFields(logrus.Fields{ - "hostName": hostName, - "addrs": item.Addrs, - }).Debug("Dns record was populated from cache") - return item.Addrs, nil - } - - addrs, err := dc.resolveDNSRecord(hostName) - if err != nil { - return nil, err - } - - dc.Set(hostName, addrs) - return addrs, nil -} - -func (dc *DnsCacheStorage) Set(key string, addrs []string) { - logger.Debugf("Adding dns record to cache: key=%q, addrs=%q", key, addrs) - dc.cache.Set(key, DnsCacheItem{addrs}, cache.DefaultExpiration) -} - -// Clear deletes all records from cache -func (dc *DnsCacheStorage) Clear() { - dc.cache.Flush() -} - -func (dc *DnsCacheStorage) resolveDNSRecord(host string) ([]string, error) { - return net.LookupHost(host) -} diff --git a/dnscache/storage_test.go b/dnscache/storage_test.go deleted file mode 100644 index 74dca0673dca..000000000000 --- a/dnscache/storage_test.go +++ /dev/null @@ -1,288 +0,0 @@ -package dnscache - -import ( - "net" - "reflect" - "testing" - "time" - - "github.com/miekg/dns" - - "github.com/TykTechnologies/tyk/test" -) - -var ( - expiration = 10 - checkInterval = 5 -) - -const ( - host = "orig-host.com." - singleRecordHost = "single.orig-host.com." - host2 = "orig-host2.com." - host3 = "some.orig-host3.com" - host4 = "some.orig-host4.com" - hostErrorable = "unknown.orig-host.com." - wsHost = "ws.orig-host.com." -) - -var ( - etcHostsMap = map[string][]string{ - singleRecordHost: {"10.0.2.10"}, - host: {"127.0.0.1", "127.0.0.2", "127.0.0.3"}, - host2: {"10.0.2.0", "10.0.2.1", "10.0.2.2"}, - host3: {"10.0.2.15", "10.0.2.16"}, - host4: {"10.0.2.11", "10.0.2.10"}, - wsHost: {"127.0.0.1", "127.0.0.1"}, - } - - etcHostsErrorMap = map[string]int{ - hostErrorable: dns.RcodeServerFailure, - } -) - -type configTestStorageFetchItem struct { - *testing.T - etcHostsMap map[string][]string - etcHostsErrorsMap map[string]int -} - -func setupTestStorageFetchItem(cfg *configTestStorageFetchItem) func() { - handle, err := test.InitDNSMock(cfg.etcHostsMap, cfg.etcHostsErrorsMap) - if err != nil { - cfg.T.Error(err.Error()) - } - - return func() { - if err := handle.ShutdownDnsMock(); err != nil { - cfg.T.Error(err.Error()) - } - } -} - -func TestStorageFetchItem(t *testing.T) { - dnsCache := NewDnsCacheStorage(time.Duration(expiration)*time.Second, time.Duration(checkInterval)*time.Second) - - tearDownTestStorageFetchItem := setupTestStorageFetchItem(&configTestStorageFetchItem{t, etcHostsMap, etcHostsErrorMap}) - defer func() { - tearDownTestStorageFetchItem() - dnsCache.Clear() - dnsCache = nil - }() - - cases := []struct { - name string - - Host string - ExpectedIPs []string - - expectedErrorType reflect.Type - shouldExistInCache bool - shouldBeAddedToCache bool - }{ - { - "Should cache first dns record first fetch", - host, etcHostsMap[host], - nil, false, true, - }, - { - "Should cache second dns record first fetch", - host2, etcHostsMap[host2], - nil, false, true, - }, - { - "Should populate from cache first dns record second fetch", - host, etcHostsMap[host], - nil, true, false, - }, - { - "Should populate from cache first dns record third fetch", - host, etcHostsMap[host], - nil, true, false, - }, - { - "Should populate from cache second dns record second fetch", - host2, etcHostsMap[host2], - nil, true, false, - }, - { - "Shouldn't cache dns record fetch in case error", - hostErrorable, nil, - reflect.TypeOf(&net.DNSError{}), false, false, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - got, err := dnsCache.FetchItem(tc.Host) - - if tc.expectedErrorType != nil { - if err == nil || tc.expectedErrorType != reflect.TypeOf(err) { - t.Fatalf("wanted FetchItem error type %v, got %v. Error=%#v", tc.expectedErrorType, reflect.TypeOf(err), err) - } - - if _, ok := dnsCache.Get(tc.Host); got != nil || ok { - t.Fatalf("wanted FetchItem error to omit cache. got %#v, ok=%t", got, ok) - } - return - } - - if err != nil || !reflect.DeepEqual(got, tc.ExpectedIPs) { - t.Fatalf("wanted ips %q, got %q. Error: %v", tc.ExpectedIPs, got, err) - } - - if tc.shouldExistInCache || tc.shouldBeAddedToCache { - record, ok := dnsCache.Get(tc.Host) - - if !ok { - t.Fatalf("Host addresses weren't found in cache; host %q", tc.Host) - } - - if !test.IsDnsRecordsAddrsEqualsTo(record.Addrs, tc.ExpectedIPs) { - t.Fatalf("wanted cached ips %v, got record %v", tc.ExpectedIPs, record) - } - } else { - if got, ok := dnsCache.Get(tc.Host); !test.IsDnsRecordsAddrsEqualsTo(got.Addrs, nil) || ok { - t.Fatalf("wanted FetchItem to omit write to cache. got %#v, ok=%t", got, ok) - } - } - }) - } -} - -func TestStorageRecordExpiration(t *testing.T) { - var ( - expiration = 2000 - checkInterval = 1500 - ) - - type testRecord struct { - dns string - addrs []string - addDelay time.Duration - } - - cases := []struct { - name string - - records []testRecord - sleepBeforeCleanup time.Duration - notExpiredAfterDelay []testRecord - checkInterval int - }{ - { - "Shouldn't remove dns record when ttl/expiration < 1", - []testRecord{ - {dns: host, addrs: etcHostsMap[host]}, - }, - time.Duration(checkInterval+10) * time.Millisecond, - []testRecord{ - {dns: host, addrs: etcHostsMap[host]}, - }, - checkInterval, - }, - { - "Should remove single dns record after expiration", - []testRecord{ - {dns: host, addrs: etcHostsMap[host]}, - }, - time.Duration(expiration+10) * time.Millisecond, - []testRecord{}, - checkInterval, - }, - { - "Should leave as expired dns records if check_interval=-1", - []testRecord{ - {dns: host, addrs: etcHostsMap[host]}, - {dns: host2, addrs: etcHostsMap[host2]}, - {dns: wsHost, addrs: etcHostsMap[wsHost]}, - }, - time.Duration(checkInterval+10) * time.Millisecond, - []testRecord{ - {dns: host, addrs: etcHostsMap[host]}, - {dns: host2, addrs: etcHostsMap[host2]}, - {dns: wsHost, addrs: etcHostsMap[wsHost]}, - }, - -1, - }, - { - "Should remove all(>1) dns records after expiration", - []testRecord{ - {dns: host2, addrs: etcHostsMap[host]}, - {dns: host2, addrs: etcHostsMap[host2]}, - {dns: host2, addrs: etcHostsMap[wsHost]}, - }, - time.Duration(expiration+10) * time.Millisecond, - []testRecord{}, - checkInterval, - }, - { - "Should remove only expired record after expiration", - []testRecord{ - {dns: host, addrs: etcHostsMap[host]}, - {dns: host2, addrs: etcHostsMap[host2], addDelay: 500 * time.Millisecond}, - {dns: wsHost, addrs: etcHostsMap[wsHost]}, - }, - time.Duration(expiration-400) * time.Millisecond, - []testRecord{ - {dns: host2, addrs: etcHostsMap[host2]}, - {dns: wsHost, addrs: etcHostsMap[wsHost]}, - }, - checkInterval, - }, - { - "Should remove only expired records after expiration", - []testRecord{ - {dns: host, addrs: etcHostsMap[host]}, - {dns: host2, addrs: etcHostsMap[host2], addDelay: 250 * time.Millisecond}, - {dns: host3, addrs: etcHostsMap[host3], addDelay: 500 * time.Millisecond}, - {dns: host4, addrs: etcHostsMap[host4], addDelay: 100 * time.Millisecond}, - {dns: wsHost, addrs: etcHostsMap[wsHost]}, - }, - time.Duration(expiration-350) * time.Millisecond, - []testRecord{ - {dns: host3, addrs: etcHostsMap[host3]}, - {dns: host4, addrs: etcHostsMap[host4]}, - {dns: wsHost, addrs: etcHostsMap[wsHost]}, - }, - checkInterval, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - dnsCache := NewDnsCacheStorage(time.Duration(expiration)*time.Millisecond, time.Duration(tc.checkInterval)*time.Millisecond) - - for _, r := range tc.records { - if r.addDelay > 0 { - time.Sleep(r.addDelay) - } - dnsCache.Set(r.dns, r.addrs) - } - - if tc.sleepBeforeCleanup > 0 { - time.Sleep(tc.sleepBeforeCleanup) - } - if lenNonExpired, lenCurrent := len(tc.notExpiredAfterDelay), len(dnsCache.Items(tc.checkInterval == -1)); lenNonExpired != lenCurrent { - t.Fatalf("wanted len(nonExpiredItems) %d, got %d. items=%+v", lenNonExpired, lenCurrent, dnsCache.Items(tc.checkInterval == -1)) - } - - if tc.checkInterval == -1 { - for _, r := range tc.records { - if item, ok := dnsCache.Items(true)[r.dns]; !ok || !test.IsDnsRecordsAddrsEqualsTo(item.Addrs, r.addrs) { - t.Fatalf("wanted expired cached ips %v, got item %#v. items=%+v, ok=%t", r.addrs, item, dnsCache.Items(true), ok) - } - } - } else { - for _, r := range tc.notExpiredAfterDelay { - if item, ok := dnsCache.Get(r.dns); !ok || !test.IsDnsRecordsAddrsEqualsTo(item.Addrs, r.addrs) { - t.Fatalf("wanted cached ips %v, got item %#v. items=%+v, ok=%t", r.addrs, item, dnsCache.Items(false), ok) - } - } - } - - dnsCache.Clear() - dnsCache = nil - }) - } -} diff --git a/gateway/event_handler_webhooks.go b/event_handler_webhooks.go similarity index 98% rename from gateway/event_handler_webhooks.go rename to event_handler_webhooks.go index c56b6a41d326..9ae89091e0de 100644 --- a/gateway/event_handler_webhooks.go +++ b/event_handler_webhooks.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" @@ -70,7 +70,7 @@ func (w *WebHookHandler) Init(handlerConf interface{}) error { return err } - w.store = &storage.RedisCluster{KeyPrefix: "webhook.cache."} + w.store = storage.RedisCluster{KeyPrefix: "webhook.cache."} w.store.Connect() // Pre-load template on init diff --git a/gateway/event_handler_webhooks_test.go b/event_handler_webhooks_test.go similarity index 94% rename from gateway/event_handler_webhooks_test.go rename to event_handler_webhooks_test.go index 81035fc0e681..491bb9512d46 100644 --- a/gateway/event_handler_webhooks_test.go +++ b/event_handler_webhooks_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "path/filepath" @@ -10,10 +10,10 @@ import ( func createGetHandler() *WebHookHandler { eventHandlerConf := config.WebHookHandlerConf{ - TargetPath: TestHttpGet, + TargetPath: testHttpGet, Method: "GET", EventTimeout: 10, - TemplatePath: "../templates/default_webhook.json", + TemplatePath: "./templates/default_webhook.json", HeaderList: map[string]string{"x-tyk-test": "TEST"}, } ev := &WebHookHandler{} @@ -28,7 +28,7 @@ func TestNewValid(t *testing.T) { err := h.Init(map[string]interface{}{ "method": "POST", "target_path": testHttpPost, - "template_path": "../templates/default_webhook.json", + "template_path": "./templates/default_webhook.json", "header_map": map[string]string{"X-Tyk-Test-Header": "Tyk v1.BANANA"}, "event_timeout": 10, }) @@ -42,7 +42,7 @@ func TestNewInvalid(t *testing.T) { err := h.Init(map[string]interface{}{ "method": 123, "target_path": testHttpPost, - "template_path": "../templates/default_webhook.json", + "template_path": "./templates/default_webhook.json", "header_map": map[string]string{"X-Tyk-Test-Header": "Tyk v1.BANANA"}, "event_timeout": 10, }) @@ -187,10 +187,10 @@ func TestNewCustomTemplate(t *testing.T) { }{ {"UseDefault", false, "", false}, {"FallbackToDefault", false, "missing_webhook.json", false}, - {"UseCustom", false, "templates/breaker_webhook.json", false}, + {"UseCustom", false, "./templates/breaker_webhook.json", false}, {"MissingDefault", true, "", true}, {"MissingDefaultFallback", true, "missing_webhook.json", true}, - {"MissingDefaultNotNeeded", true, "../templates/breaker_webhook.json", false}, + {"MissingDefaultNotNeeded", true, "./templates/breaker_webhook.json", false}, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -236,7 +236,7 @@ func TestWebhookContentTypeHeader(t *testing.T) { {"InvalidTemplatePath", "randomPath", nil, "application/json"}, {"InvalidTemplatePath/CustomHeaders", "randomPath", map[string]string{"Content-Type": "application/xml"}, "application/xml"}, {"CustomTemplate", filepath.Join(templatePath, "transform_test.tmpl"), nil, ""}, - {"CustomTemplate/CustomHeaders", filepath.Join(templatePath, "breaker_webhook.json"), map[string]string{"Content-Type": "application/xml"}, "application/xml"}, + {"CustomTemplate/CustomHeaders", filepath.Join(templatePath, "breaker_webhook.json"), map[string]string{"Content-Type": "application/json"}, "application/json"}, } for _, ts := range tests { diff --git a/gateway/event_system.go b/event_system.go similarity index 99% rename from gateway/event_system.go rename to event_system.go index 6a74e4e36a06..7e73ea9026b5 100644 --- a/gateway/event_system.go +++ b/event_system.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" @@ -8,7 +8,7 @@ import ( "time" "github.com/Sirupsen/logrus" - circuit "github.com/rubyist/circuitbreaker" + "github.com/rubyist/circuitbreaker" "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" diff --git a/gateway/event_system_test.go b/event_system_test.go similarity index 99% rename from gateway/event_system_test.go rename to event_system_test.go index 07f362a00d27..259e047a45e3 100644 --- a/gateway/event_system_test.go +++ b/event_system_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" diff --git a/gateway/auth_manager_test.go b/gateway/auth_manager_test.go deleted file mode 100644 index 6ab5b09988f4..000000000000 --- a/gateway/auth_manager_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package gateway - -import ( - "net/http" - "testing" - - "github.com/TykTechnologies/tyk/storage" - - "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/test" - "github.com/TykTechnologies/tyk/user" -) - -func TestAuthenticationAfterDeleteKey(t *testing.T) { - assert := func(hashKeys bool) { - globalConf := config.Global() - globalConf.HashKeys = hashKeys - config.SetGlobal(globalConf) - - ts := StartTest() - defer ts.Close() - - api := BuildAndLoadAPI(func(spec *APISpec) { - spec.UseKeylessAccess = false - spec.Proxy.ListenPath = "/" - })[0] - - key := CreateSession(func(s *user.SessionState) { - s.AccessRights = map[string]user.AccessDefinition{api.APIID: { - APIID: api.APIID, - }} - }) - deletePath := "/tyk/keys/" + key - authHeader := map[string]string{ - "authorization": key, - } - - ts.Run(t, []test.TestCase{ - {Path: "/get", Headers: authHeader, Code: http.StatusOK}, - {Method: http.MethodDelete, Path: deletePath, AdminAuth: true, Code: http.StatusOK, BodyMatch: `"action":"deleted"`}, - {Path: "/get", Headers: authHeader, Code: http.StatusForbidden}, - }...) - } - - t.Run("HashKeys=false", func(t *testing.T) { - assert(false) - }) - - t.Run("HashKeys=true", func(t *testing.T) { - assert(true) - }) -} - -func TestAuthenticationAfterUpdateKey(t *testing.T) { - assert := func(hashKeys bool) { - globalConf := config.Global() - globalConf.HashKeys = hashKeys - config.SetGlobal(globalConf) - - ts := StartTest() - defer ts.Close() - - api := BuildAndLoadAPI(func(spec *APISpec) { - spec.UseKeylessAccess = false - spec.Proxy.ListenPath = "/" - })[0] - - key := generateToken("", "") - - session := CreateStandardSession() - session.AccessRights = map[string]user.AccessDefinition{api.APIID: { - APIID: api.APIID, - }} - - FallbackKeySesionManager.UpdateSession(storage.HashKey(key), session, 0, config.Global().HashKeys) - - authHeader := map[string]string{ - "authorization": key, - } - - ts.Run(t, []test.TestCase{ - {Path: "/get", Headers: authHeader, Code: http.StatusOK}, - }...) - - session.AccessRights = map[string]user.AccessDefinition{"dummy": { - APIID: "dummy", - }} - - FallbackKeySesionManager.UpdateSession(storage.HashKey(key), session, 0, config.Global().HashKeys) - - ts.Run(t, []test.TestCase{ - {Path: "/get", Headers: authHeader, Code: http.StatusForbidden}, - }...) - - } - - t.Run("HashKeys=false", func(t *testing.T) { - assert(false) - }) - - t.Run("HashKeys=true", func(t *testing.T) { - assert(true) - }) -} diff --git a/gateway/coprocess_python_api.c b/gateway/coprocess_python_api.c deleted file mode 100644 index e0edbd93efd5..000000000000 --- a/gateway/coprocess_python_api.c +++ /dev/null @@ -1,78 +0,0 @@ -// +build coprocess -// +build python - -#include -#include "../coprocess/api.h" - - -static PyObject *store_data(PyObject *self, PyObject *args) { - char *key, *value; - int ttl; - - if (!PyArg_ParseTuple(args, "ssi", &key, &value, &ttl)) - return NULL; - - TykStoreData(key, value, ttl); - - Py_RETURN_NONE; -} - -static PyObject *get_data(PyObject *self, PyObject *args) { - char *key, *value; - PyObject *ret; - - if (!PyArg_ParseTuple(args, "s", &key)) - return NULL; - - value = TykGetData(key); - // TykGetData doesn't currently handle storage errors so let's at least safeguard against null pointer - if (value == NULL) { - PyErr_SetString(PyExc_ValueError, "Null pointer from TykGetData"); - return NULL; - } - ret = Py_BuildValue("s", value); - // CGO mallocs it in TykGetData and Py_BuildValue just copies strings, hence it's our responsibility to free it now - free(value); - - return ret; -} - -static PyObject *trigger_event(PyObject *self, PyObject *args) { - char *name, *payload; - - if (!PyArg_ParseTuple(args, "ss", &name, &payload)) - return NULL; - - TykTriggerEvent(name, payload); - - Py_RETURN_NONE; -} - -static PyObject *coprocess_log(PyObject *self, PyObject *args) { - char *message, *level; - - if (!PyArg_ParseTuple(args, "ss", &message, &level)) - return NULL; - - CoProcessLog(message, level); - - Py_RETURN_NONE; -} - - -static PyMethodDef module_methods[] = { - {"store_data", store_data, METH_VARARGS, "Stores the data in gateway storage by given key and TTL"}, - {"get_data", get_data, METH_VARARGS, "Retrieves the data from gateway storage by given key"}, - {"trigger_event", trigger_event, METH_VARARGS, "Triggers a named gateway event with given payload"}, - {"log", coprocess_log, METH_VARARGS, "Logs a message with given level"}, - {NULL, NULL, 0, NULL} /* Sentinel */ -}; - -static PyModuleDef module = { - PyModuleDef_HEAD_INIT, "gateway_wrapper", NULL, -1, module_methods, - NULL, NULL, NULL, NULL -}; - -PyMODINIT_FUNC PyInit_gateway_wrapper(void) { - return PyModule_Create(&module); -} diff --git a/gateway/mw_go_plugin.go b/gateway/mw_go_plugin.go deleted file mode 100644 index 855d8b62b8c9..000000000000 --- a/gateway/mw_go_plugin.go +++ /dev/null @@ -1,164 +0,0 @@ -package gateway - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "time" - - "github.com/Sirupsen/logrus" - - "github.com/TykTechnologies/tyk/goplugin" -) - -// customResponseWriter is a wrapper around standard http.ResponseWriter -// plus it tracks if response was sent and what status code was sent -type customResponseWriter struct { - http.ResponseWriter - responseSent bool - statusCodeSent int - copyData bool - data []byte - dataLength int64 -} - -func (w *customResponseWriter) Write(b []byte) (int, error) { - w.responseSent = true - if w.statusCodeSent == 0 { - w.statusCodeSent = http.StatusOK // no WriteHeader was called so it will be set to StatusOK in actual ResponseWriter - } - - // send actual data - num, err := w.ResponseWriter.Write(b) - - // copy data sent - if w.copyData { - if w.data == nil { - w.data = make([]byte, num) - copy(w.data, b[:num]) - } else { - w.data = append(w.data, b[:num]...) - } - } - - // count how many bytes we sent - w.dataLength += int64(num) - - return num, err -} - -func (w *customResponseWriter) WriteHeader(statusCode int) { - w.responseSent = true - w.statusCodeSent = statusCode - w.ResponseWriter.WriteHeader(statusCode) -} - -func (w *customResponseWriter) getHttpResponse(r *http.Request) *http.Response { - // craft response on the fly for analytics - httpResponse := &http.Response{ - Status: http.StatusText(w.statusCodeSent), - StatusCode: w.statusCodeSent, - Header: w.ResponseWriter.Header(), // TODO: worth to think about trailer headers - Proto: r.Proto, - ProtoMajor: r.ProtoMajor, - ProtoMinor: r.ProtoMinor, - Request: r, - ContentLength: w.dataLength, - } - if w.copyData { - httpResponse.Body = ioutil.NopCloser(bytes.NewReader(w.data)) - } - - return httpResponse -} - -// GoPluginMiddleware is a generic middleware that will execute Go-plugin code before continuing -type GoPluginMiddleware struct { - BaseMiddleware - Path string // path to .so file - SymbolName string // function symbol to look up - handler http.HandlerFunc - logger *logrus.Entry - successHandler *SuccessHandler // to record analytics -} - -func (m *GoPluginMiddleware) Name() string { - return "GoPluginMiddleware: " + m.Path + ":" + m.SymbolName -} - -func (m *GoPluginMiddleware) EnabledForSpec() bool { - m.logger = log.WithFields(logrus.Fields{ - "mwPath": m.Path, - "mwSymbolName": m.SymbolName, - }) - - if m.handler != nil { - m.logger.Info("Go-plugin middleware is already initialized") - return true - } - - // try to load plugin - var err error - if m.handler, err = goplugin.GetHandler(m.Path, m.SymbolName); err != nil { - m.logger.WithError(err).Error("Could not load Go-plugin") - return false - } - - // to record 2XX hits in analytics - m.successHandler = &SuccessHandler{BaseMiddleware: m.BaseMiddleware} - - return true -} - -func (m *GoPluginMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Request, conf interface{}) (err error, respCode int) { - // make sure tyk recover in case Go-plugin function panics - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("%v", e) - respCode = http.StatusInternalServerError - m.logger.WithError(err).Error("Recovered from panic while running Go-plugin middleware func") - } - }() - - // prepare data to call Go-plugin function - - // make sure request's body can be re-read again - nopCloseRequestBody(r) - - // wrap ResponseWriter to check if response was sent - rw := &customResponseWriter{ - ResponseWriter: w, - copyData: recordDetail(r, m.Spec.GlobalConfig), - } - - // call Go-plugin function - t1 := time.Now() - m.handler(rw, r) - t2 := time.Now() - - // calculate latency - ms := float64(t2.UnixNano()-t1.UnixNano()) * 0.000001 - m.logger.WithField("ms", ms).Debug("Go-plugin request processing took") - - // check if response was sent - if rw.responseSent { - // check if response code was an error one - if rw.statusCodeSent >= http.StatusBadRequest { - // base middleware will report this error to analytics if needed - respCode = rw.statusCodeSent - err = fmt.Errorf("plugin function sent error response code: %d", rw.statusCodeSent) - m.logger.WithError(err).Error("Failed to process request with Go-plugin middleware func") - } else { - // record 2XX to analytics - m.successHandler.RecordHit(r, int64(ms), rw.statusCodeSent, rw.getHttpResponse(r)) - - // no need to continue passing this request down to reverse proxy - respCode = mwStatusRespond - } - } else { - respCode = http.StatusOK - } - - return -} diff --git a/gateway/redis_analytics_purger.go b/gateway/redis_analytics_purger.go deleted file mode 100644 index a7e3a1a208da..000000000000 --- a/gateway/redis_analytics_purger.go +++ /dev/null @@ -1,38 +0,0 @@ -package gateway - -import ( - "time" - - "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/storage" -) - -// Purger is an interface that will define how the in-memory store will be purged -// of analytics data to prevent it growing too large -type Purger interface { - PurgeCache() - PurgeLoop(<-chan time.Time) -} - -type RedisPurger struct { - Store storage.Handler -} - -func (r RedisPurger) PurgeLoop(ticker <-chan time.Time) { - for { - <-ticker - r.PurgeCache() - } -} - -func (r *RedisPurger) PurgeCache() { - expireAfter := config.Global().AnalyticsConfig.StorageExpirationTime - if expireAfter == 0 { - expireAfter = 60 // 1 minute - } - - exp, _ := r.Store.GetExp(analyticsKeyName) - if exp <= 0 { - r.Store.SetExp(analyticsKeyName, int64(expireAfter)) - } -} diff --git a/gateway/sds.c b/gateway/sds.c deleted file mode 100644 index 587e794a4468..000000000000 --- a/gateway/sds.c +++ /dev/null @@ -1,1277 +0,0 @@ -// +build coprocess -// +build !grpc - -/* SDSLib 2.0 -- A C dynamic strings library - * - * Copyright (c) 2006-2015, Salvatore Sanfilippo - * Copyright (c) 2015, Oran Agra - * Copyright (c) 2015, Redis Labs, Inc - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include "../coprocess/sds/sds.h" -#include "../coprocess/sds/sdsalloc.h" - -static inline int sdsHdrSize(char type) { - switch(type&SDS_TYPE_MASK) { - case SDS_TYPE_5: - return sizeof(struct sdshdr5); - case SDS_TYPE_8: - return sizeof(struct sdshdr8); - case SDS_TYPE_16: - return sizeof(struct sdshdr16); - case SDS_TYPE_32: - return sizeof(struct sdshdr32); - case SDS_TYPE_64: - return sizeof(struct sdshdr64); - } - return 0; -} - -static inline char sdsReqType(size_t string_size) { - if (string_size < 32) - return SDS_TYPE_5; - if (string_size < 0xff) - return SDS_TYPE_8; - if (string_size < 0xffff) - return SDS_TYPE_16; - if (string_size < 0xffffffff) - return SDS_TYPE_32; - return SDS_TYPE_64; -} - -/* Create a new sds string with the content specified by the 'init' pointer - * and 'initlen'. - * If NULL is used for 'init' the string is initialized with zero bytes. - * - * The string is always null-termined (all the sds strings are, always) so - * even if you create an sds string with: - * - * mystring = sdsnewlen("abc",3); - * - * You can print the string with printf() as there is an implicit \0 at the - * end of the string. However the string is binary safe and can contain - * \0 characters in the middle, as the length is stored in the sds header. */ -sds sdsnewlen(const void *init, size_t initlen) { - void *sh; - sds s; - char type = sdsReqType(initlen); - /* Empty strings are usually created in order to append. Use type 8 - * since type 5 is not good at this. */ - if (type == SDS_TYPE_5 && initlen == 0) type = SDS_TYPE_8; - int hdrlen = sdsHdrSize(type); - unsigned char *fp; /* flags pointer. */ - - sh = s_malloc(hdrlen+initlen+1); - if (!init) - memset(sh, 0, hdrlen+initlen+1); - if (sh == NULL) return NULL; - s = (char*)sh+hdrlen; - fp = ((unsigned char*)s)-1; - switch(type) { - case SDS_TYPE_5: { - *fp = type | (initlen << SDS_TYPE_BITS); - break; - } - case SDS_TYPE_8: { - SDS_HDR_VAR(8,s); - sh->len = initlen; - sh->alloc = initlen; - *fp = type; - break; - } - case SDS_TYPE_16: { - SDS_HDR_VAR(16,s); - sh->len = initlen; - sh->alloc = initlen; - *fp = type; - break; - } - case SDS_TYPE_32: { - SDS_HDR_VAR(32,s); - sh->len = initlen; - sh->alloc = initlen; - *fp = type; - break; - } - case SDS_TYPE_64: { - SDS_HDR_VAR(64,s); - sh->len = initlen; - sh->alloc = initlen; - *fp = type; - break; - } - } - if (initlen && init) - memcpy(s, init, initlen); - s[initlen] = '\0'; - return s; -} - -/* Create an empty (zero length) sds string. Even in this case the string - * always has an implicit null term. */ -sds sdsempty(void) { - return sdsnewlen("",0); -} - -/* Create a new sds string starting from a null terminated C string. */ -sds sdsnew(const char *init) { - size_t initlen = (init == NULL) ? 0 : strlen(init); - return sdsnewlen(init, initlen); -} - -/* Duplicate an sds string. */ -sds sdsdup(const sds s) { - return sdsnewlen(s, sdslen(s)); -} - -/* Free an sds string. No operation is performed if 's' is NULL. */ -void sdsfree(sds s) { - if (s == NULL) return; - s_free((char*)s-sdsHdrSize(s[-1])); -} - -/* Set the sds string length to the length as obtained with strlen(), so - * considering as content only up to the first null term character. - * - * This function is useful when the sds string is hacked manually in some - * way, like in the following example: - * - * s = sdsnew("foobar"); - * s[2] = '\0'; - * sdsupdatelen(s); - * printf("%d\n", sdslen(s)); - * - * The output will be "2", but if we comment out the call to sdsupdatelen() - * the output will be "6" as the string was modified but the logical length - * remains 6 bytes. */ -void sdsupdatelen(sds s) { - int reallen = strlen(s); - sdssetlen(s, reallen); -} - -/* Modify an sds string in-place to make it empty (zero length). - * However all the existing buffer is not discarded but set as free space - * so that next append operations will not require allocations up to the - * number of bytes previously available. */ -void sdsclear(sds s) { - sdssetlen(s, 0); - s[0] = '\0'; -} - -/* Enlarge the free space at the end of the sds string so that the caller - * is sure that after calling this function can overwrite up to addlen - * bytes after the end of the string, plus one more byte for nul term. - * - * Note: this does not change the *length* of the sds string as returned - * by sdslen(), but only the free buffer space we have. */ -sds sdsMakeRoomFor(sds s, size_t addlen) { - void *sh, *newsh; - size_t avail = sdsavail(s); - size_t len, newlen; - char type, oldtype = s[-1] & SDS_TYPE_MASK; - int hdrlen; - - /* Return ASAP if there is enough space left. */ - if (avail >= addlen) return s; - - len = sdslen(s); - sh = (char*)s-sdsHdrSize(oldtype); - newlen = (len+addlen); - if (newlen < SDS_MAX_PREALLOC) - newlen *= 2; - else - newlen += SDS_MAX_PREALLOC; - - type = sdsReqType(newlen); - - /* Don't use type 5: the user is appending to the string and type 5 is - * not able to remember empty space, so sdsMakeRoomFor() must be called - * at every appending operation. */ - if (type == SDS_TYPE_5) type = SDS_TYPE_8; - - hdrlen = sdsHdrSize(type); - if (oldtype==type) { - newsh = s_realloc(sh, hdrlen+newlen+1); - if (newsh == NULL) return NULL; - s = (char*)newsh+hdrlen; - } else { - /* Since the header size changes, need to move the string forward, - * and can't use realloc */ - newsh = s_malloc(hdrlen+newlen+1); - if (newsh == NULL) return NULL; - memcpy((char*)newsh+hdrlen, s, len+1); - s_free(sh); - s = (char*)newsh+hdrlen; - s[-1] = type; - sdssetlen(s, len); - } - sdssetalloc(s, newlen); - return s; -} - -/* Reallocate the sds string so that it has no free space at the end. The - * contained string remains not altered, but next concatenation operations - * will require a reallocation. - * - * After the call, the passed sds string is no longer valid and all the - * references must be substituted with the new pointer returned by the call. */ -sds sdsRemoveFreeSpace(sds s) { - void *sh, *newsh; - char type, oldtype = s[-1] & SDS_TYPE_MASK; - int hdrlen; - size_t len = sdslen(s); - sh = (char*)s-sdsHdrSize(oldtype); - - type = sdsReqType(len); - hdrlen = sdsHdrSize(type); - if (oldtype==type) { - newsh = s_realloc(sh, hdrlen+len+1); - if (newsh == NULL) return NULL; - s = (char*)newsh+hdrlen; - } else { - newsh = s_malloc(hdrlen+len+1); - if (newsh == NULL) return NULL; - memcpy((char*)newsh+hdrlen, s, len+1); - s_free(sh); - s = (char*)newsh+hdrlen; - s[-1] = type; - sdssetlen(s, len); - } - sdssetalloc(s, len); - return s; -} - -/* Return the total size of the allocation of the specified sds string, - * including: - * 1) The sds header before the pointer. - * 2) The string. - * 3) The free buffer at the end if any. - * 4) The implicit null term. - */ -size_t sdsAllocSize(sds s) { - size_t alloc = sdsalloc(s); - return sdsHdrSize(s[-1])+alloc+1; -} - -/* Return the pointer of the actual SDS allocation (normally SDS strings - * are referenced by the start of the string buffer). */ -void *sdsAllocPtr(sds s) { - return (void*) (s-sdsHdrSize(s[-1])); -} - -/* Increment the sds length and decrements the left free space at the - * end of the string according to 'incr'. Also set the null term - * in the new end of the string. - * - * This function is used in order to fix the string length after the - * user calls sdsMakeRoomFor(), writes something after the end of - * the current string, and finally needs to set the new length. - * - * Note: it is possible to use a negative increment in order to - * right-trim the string. - * - * Usage example: - * - * Using sdsIncrLen() and sdsMakeRoomFor() it is possible to mount the - * following schema, to cat bytes coming from the kernel to the end of an - * sds string without copying into an intermediate buffer: - * - * oldlen = sdslen(s); - * s = sdsMakeRoomFor(s, BUFFER_SIZE); - * nread = read(fd, s+oldlen, BUFFER_SIZE); - * ... check for nread <= 0 and handle it ... - * sdsIncrLen(s, nread); - */ -void sdsIncrLen(sds s, int incr) { - unsigned char flags = s[-1]; - size_t len; - switch(flags&SDS_TYPE_MASK) { - case SDS_TYPE_5: { - unsigned char *fp = ((unsigned char*)s)-1; - unsigned char oldlen = SDS_TYPE_5_LEN(flags); - assert((incr > 0 && oldlen+incr < 32) || (incr < 0 && oldlen >= (unsigned int)(-incr))); - *fp = SDS_TYPE_5 | ((oldlen+incr) << SDS_TYPE_BITS); - len = oldlen+incr; - break; - } - case SDS_TYPE_8: { - SDS_HDR_VAR(8,s); - assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); - len = (sh->len += incr); - break; - } - case SDS_TYPE_16: { - SDS_HDR_VAR(16,s); - assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); - len = (sh->len += incr); - break; - } - case SDS_TYPE_32: { - SDS_HDR_VAR(32,s); - assert((incr >= 0 && sh->alloc-sh->len >= (unsigned int)incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); - len = (sh->len += incr); - break; - } - case SDS_TYPE_64: { - SDS_HDR_VAR(64,s); - assert((incr >= 0 && sh->alloc-sh->len >= (uint64_t)incr) || (incr < 0 && sh->len >= (uint64_t)(-incr))); - len = (sh->len += incr); - break; - } - default: len = 0; /* Just to avoid compilation warnings. */ - } - s[len] = '\0'; -} - -/* Grow the sds to have the specified length. Bytes that were not part of - * the original length of the sds will be set to zero. - * - * if the specified length is smaller than the current length, no operation - * is performed. */ -sds sdsgrowzero(sds s, size_t len) { - size_t curlen = sdslen(s); - - if (len <= curlen) return s; - s = sdsMakeRoomFor(s,len-curlen); - if (s == NULL) return NULL; - - /* Make sure added region doesn't contain garbage */ - memset(s+curlen,0,(len-curlen+1)); /* also set trailing \0 byte */ - sdssetlen(s, len); - return s; -} - -/* Append the specified binary-safe string pointed by 't' of 'len' bytes to the - * end of the specified sds string 's'. - * - * After the call, the passed sds string is no longer valid and all the - * references must be substituted with the new pointer returned by the call. */ -sds sdscatlen(sds s, const void *t, size_t len) { - size_t curlen = sdslen(s); - - s = sdsMakeRoomFor(s,len); - if (s == NULL) return NULL; - memcpy(s+curlen, t, len); - sdssetlen(s, curlen+len); - s[curlen+len] = '\0'; - return s; -} - -/* Append the specified null termianted C string to the sds string 's'. - * - * After the call, the passed sds string is no longer valid and all the - * references must be substituted with the new pointer returned by the call. */ -sds sdscat(sds s, const char *t) { - return sdscatlen(s, t, strlen(t)); -} - -/* Append the specified sds 't' to the existing sds 's'. - * - * After the call, the modified sds string is no longer valid and all the - * references must be substituted with the new pointer returned by the call. */ -sds sdscatsds(sds s, const sds t) { - return sdscatlen(s, t, sdslen(t)); -} - -/* Destructively modify the sds string 's' to hold the specified binary - * safe string pointed by 't' of length 'len' bytes. */ -sds sdscpylen(sds s, const char *t, size_t len) { - if (sdsalloc(s) < len) { - s = sdsMakeRoomFor(s,len-sdslen(s)); - if (s == NULL) return NULL; - } - memcpy(s, t, len); - s[len] = '\0'; - sdssetlen(s, len); - return s; -} - -/* Like sdscpylen() but 't' must be a null-termined string so that the length - * of the string is obtained with strlen(). */ -sds sdscpy(sds s, const char *t) { - return sdscpylen(s, t, strlen(t)); -} - -/* Helper for sdscatlonglong() doing the actual number -> string - * conversion. 's' must point to a string with room for at least - * SDS_LLSTR_SIZE bytes. - * - * The function returns the length of the null-terminated string - * representation stored at 's'. */ -#define SDS_LLSTR_SIZE 21 -int sdsll2str(char *s, long long value) { - char *p, aux; - unsigned long long v; - size_t l; - - /* Generate the string representation, this method produces - * an reversed string. */ - v = (value < 0) ? -value : value; - p = s; - do { - *p++ = '0'+(v%10); - v /= 10; - } while(v); - if (value < 0) *p++ = '-'; - - /* Compute length and add null term. */ - l = p-s; - *p = '\0'; - - /* Reverse the string. */ - p--; - while(s < p) { - aux = *s; - *s = *p; - *p = aux; - s++; - p--; - } - return l; -} - -/* Identical sdsll2str(), but for unsigned long long type. */ -int sdsull2str(char *s, unsigned long long v) { - char *p, aux; - size_t l; - - /* Generate the string representation, this method produces - * an reversed string. */ - p = s; - do { - *p++ = '0'+(v%10); - v /= 10; - } while(v); - - /* Compute length and add null term. */ - l = p-s; - *p = '\0'; - - /* Reverse the string. */ - p--; - while(s < p) { - aux = *s; - *s = *p; - *p = aux; - s++; - p--; - } - return l; -} - -/* Create an sds string from a long long value. It is much faster than: - * - * sdscatprintf(sdsempty(),"%lld\n", value); - */ -sds sdsfromlonglong(long long value) { - char buf[SDS_LLSTR_SIZE]; - int len = sdsll2str(buf,value); - - return sdsnewlen(buf,len); -} - -/* Like sdscatprintf() but gets va_list instead of being variadic. */ -sds sdscatvprintf(sds s, const char *fmt, va_list ap) { - va_list cpy; - char staticbuf[1024], *buf = staticbuf, *t; - size_t buflen = strlen(fmt)*2; - - /* We try to start using a static buffer for speed. - * If not possible we revert to heap allocation. */ - if (buflen > sizeof(staticbuf)) { - buf = s_malloc(buflen); - if (buf == NULL) return NULL; - } else { - buflen = sizeof(staticbuf); - } - - /* Try with buffers two times bigger every time we fail to - * fit the string in the current buffer size. */ - while(1) { - buf[buflen-2] = '\0'; - va_copy(cpy,ap); - vsnprintf(buf, buflen, fmt, cpy); - va_end(cpy); - if (buf[buflen-2] != '\0') { - if (buf != staticbuf) s_free(buf); - buflen *= 2; - buf = s_malloc(buflen); - if (buf == NULL) return NULL; - continue; - } - break; - } - - /* Finally concat the obtained string to the SDS string and return it. */ - t = sdscat(s, buf); - if (buf != staticbuf) s_free(buf); - return t; -} - -/* Append to the sds string 's' a string obtained using printf-alike format - * specifier. - * - * After the call, the modified sds string is no longer valid and all the - * references must be substituted with the new pointer returned by the call. - * - * Example: - * - * s = sdsnew("Sum is: "); - * s = sdscatprintf(s,"%d+%d = %d",a,b,a+b). - * - * Often you need to create a string from scratch with the printf-alike - * format. When this is the need, just use sdsempty() as the target string: - * - * s = sdscatprintf(sdsempty(), "... your format ...", args); - */ -sds sdscatprintf(sds s, const char *fmt, ...) { - va_list ap; - char *t; - va_start(ap, fmt); - t = sdscatvprintf(s,fmt,ap); - va_end(ap); - return t; -} - -/* This function is similar to sdscatprintf, but much faster as it does - * not rely on sprintf() family functions implemented by the libc that - * are often very slow. Moreover directly handling the sds string as - * new data is concatenated provides a performance improvement. - * - * However this function only handles an incompatible subset of printf-alike - * format specifiers: - * - * %s - C String - * %S - SDS string - * %i - signed int - * %I - 64 bit signed integer (long long, int64_t) - * %u - unsigned int - * %U - 64 bit unsigned integer (unsigned long long, uint64_t) - * %% - Verbatim "%" character. - */ -sds sdscatfmt(sds s, char const *fmt, ...) { - size_t initlen = sdslen(s); - const char *f = fmt; - int i; - va_list ap; - - va_start(ap,fmt); - f = fmt; /* Next format specifier byte to process. */ - i = initlen; /* Position of the next byte to write to dest str. */ - while(*f) { - char next, *str; - size_t l; - long long num; - unsigned long long unum; - - /* Make sure there is always space for at least 1 char. */ - if (sdsavail(s)==0) { - s = sdsMakeRoomFor(s,1); - } - - switch(*f) { - case '%': - next = *(f+1); - f++; - switch(next) { - case 's': - case 'S': - str = va_arg(ap,char*); - l = (next == 's') ? strlen(str) : sdslen(str); - if (sdsavail(s) < l) { - s = sdsMakeRoomFor(s,l); - } - memcpy(s+i,str,l); - sdsinclen(s,l); - i += l; - break; - case 'i': - case 'I': - if (next == 'i') - num = va_arg(ap,int); - else - num = va_arg(ap,long long); - { - char buf[SDS_LLSTR_SIZE]; - l = sdsll2str(buf,num); - if (sdsavail(s) < l) { - s = sdsMakeRoomFor(s,l); - } - memcpy(s+i,buf,l); - sdsinclen(s,l); - i += l; - } - break; - case 'u': - case 'U': - if (next == 'u') - unum = va_arg(ap,unsigned int); - else - unum = va_arg(ap,unsigned long long); - { - char buf[SDS_LLSTR_SIZE]; - l = sdsull2str(buf,unum); - if (sdsavail(s) < l) { - s = sdsMakeRoomFor(s,l); - } - memcpy(s+i,buf,l); - sdsinclen(s,l); - i += l; - } - break; - default: /* Handle %% and generally %. */ - s[i++] = next; - sdsinclen(s,1); - break; - } - break; - default: - s[i++] = *f; - sdsinclen(s,1); - break; - } - f++; - } - va_end(ap); - - /* Add null-term */ - s[i] = '\0'; - return s; -} - -/* Remove the part of the string from left and from right composed just of - * contiguous characters found in 'cset', that is a null terminted C string. - * - * After the call, the modified sds string is no longer valid and all the - * references must be substituted with the new pointer returned by the call. - * - * Example: - * - * s = sdsnew("AA...AA.a.aa.aHelloWorld :::"); - * s = sdstrim(s,"Aa. :"); - * printf("%s\n", s); - * - * Output will be just "Hello World". - */ -sds sdstrim(sds s, const char *cset) { - char *start, *end, *sp, *ep; - size_t len; - - sp = start = s; - ep = end = s+sdslen(s)-1; - while(sp <= end && strchr(cset, *sp)) sp++; - while(ep > sp && strchr(cset, *ep)) ep--; - len = (sp > ep) ? 0 : ((ep-sp)+1); - if (s != sp) memmove(s, sp, len); - s[len] = '\0'; - sdssetlen(s,len); - return s; -} - -/* Turn the string into a smaller (or equal) string containing only the - * substring specified by the 'start' and 'end' indexes. - * - * start and end can be negative, where -1 means the last character of the - * string, -2 the penultimate character, and so forth. - * - * The interval is inclusive, so the start and end characters will be part - * of the resulting string. - * - * The string is modified in-place. - * - * Example: - * - * s = sdsnew("Hello World"); - * sdsrange(s,1,-1); => "ello World" - */ -void sdsrange(sds s, int start, int end) { - size_t newlen, len = sdslen(s); - - if (len == 0) return; - if (start < 0) { - start = len+start; - if (start < 0) start = 0; - } - if (end < 0) { - end = len+end; - if (end < 0) end = 0; - } - newlen = (start > end) ? 0 : (end-start)+1; - if (newlen != 0) { - if (start >= (signed)len) { - newlen = 0; - } else if (end >= (signed)len) { - end = len-1; - newlen = (start > end) ? 0 : (end-start)+1; - } - } else { - start = 0; - } - if (start && newlen) memmove(s, s+start, newlen); - s[newlen] = 0; - sdssetlen(s,newlen); -} - -/* Apply tolower() to every character of the sds string 's'. */ -void sdstolower(sds s) { - int len = sdslen(s), j; - - for (j = 0; j < len; j++) s[j] = tolower(s[j]); -} - -/* Apply toupper() to every character of the sds string 's'. */ -void sdstoupper(sds s) { - int len = sdslen(s), j; - - for (j = 0; j < len; j++) s[j] = toupper(s[j]); -} - -/* Compare two sds strings s1 and s2 with memcmp(). - * - * Return value: - * - * positive if s1 > s2. - * negative if s1 < s2. - * 0 if s1 and s2 are exactly the same binary string. - * - * If two strings share exactly the same prefix, but one of the two has - * additional characters, the longer string is considered to be greater than - * the smaller one. */ -int sdscmp(const sds s1, const sds s2) { - size_t l1, l2, minlen; - int cmp; - - l1 = sdslen(s1); - l2 = sdslen(s2); - minlen = (l1 < l2) ? l1 : l2; - cmp = memcmp(s1,s2,minlen); - if (cmp == 0) return l1-l2; - return cmp; -} - -/* Split 's' with separator in 'sep'. An array - * of sds strings is returned. *count will be set - * by reference to the number of tokens returned. - * - * On out of memory, zero length string, zero length - * separator, NULL is returned. - * - * Note that 'sep' is able to split a string using - * a multi-character separator. For example - * sdssplit("foo_-_bar","_-_"); will return two - * elements "foo" and "bar". - * - * This version of the function is binary-safe but - * requires length arguments. sdssplit() is just the - * same function but for zero-terminated strings. - */ -sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count) { - int elements = 0, slots = 5, start = 0, j; - sds *tokens; - - if (seplen < 1 || len < 0) return NULL; - - tokens = s_malloc(sizeof(sds)*slots); - if (tokens == NULL) return NULL; - - if (len == 0) { - *count = 0; - return tokens; - } - for (j = 0; j < (len-(seplen-1)); j++) { - /* make sure there is room for the next element and the final one */ - if (slots < elements+2) { - sds *newtokens; - - slots *= 2; - newtokens = s_realloc(tokens,sizeof(sds)*slots); - if (newtokens == NULL) goto cleanup; - tokens = newtokens; - } - /* search the separator */ - if ((seplen == 1 && *(s+j) == sep[0]) || (memcmp(s+j,sep,seplen) == 0)) { - tokens[elements] = sdsnewlen(s+start,j-start); - if (tokens[elements] == NULL) goto cleanup; - elements++; - start = j+seplen; - j = j+seplen-1; /* skip the separator */ - } - } - /* Add the final element. We are sure there is room in the tokens array. */ - tokens[elements] = sdsnewlen(s+start,len-start); - if (tokens[elements] == NULL) goto cleanup; - elements++; - *count = elements; - return tokens; - -cleanup: - { - int i; - for (i = 0; i < elements; i++) sdsfree(tokens[i]); - s_free(tokens); - *count = 0; - return NULL; - } -} - -/* Free the result returned by sdssplitlen(), or do nothing if 'tokens' is NULL. */ -void sdsfreesplitres(sds *tokens, int count) { - if (!tokens) return; - while(count--) - sdsfree(tokens[count]); - s_free(tokens); -} - -/* Append to the sds string "s" an escaped string representation where - * all the non-printable characters (tested with isprint()) are turned into - * escapes in the form "\n\r\a...." or "\x". - * - * After the call, the modified sds string is no longer valid and all the - * references must be substituted with the new pointer returned by the call. */ -sds sdscatrepr(sds s, const char *p, size_t len) { - s = sdscatlen(s,"\"",1); - while(len--) { - switch(*p) { - case '\\': - case '"': - s = sdscatprintf(s,"\\%c",*p); - break; - case '\n': s = sdscatlen(s,"\\n",2); break; - case '\r': s = sdscatlen(s,"\\r",2); break; - case '\t': s = sdscatlen(s,"\\t",2); break; - case '\a': s = sdscatlen(s,"\\a",2); break; - case '\b': s = sdscatlen(s,"\\b",2); break; - default: - if (isprint(*p)) - s = sdscatprintf(s,"%c",*p); - else - s = sdscatprintf(s,"\\x%02x",(unsigned char)*p); - break; - } - p++; - } - return sdscatlen(s,"\"",1); -} - -/* Helper function for sdssplitargs() that returns non zero if 'c' - * is a valid hex digit. */ -int is_hex_digit(char c) { - return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || - (c >= 'A' && c <= 'F'); -} - -/* Helper function for sdssplitargs() that converts a hex digit into an - * integer from 0 to 15 */ -int hex_digit_to_int(char c) { - switch(c) { - case '0': return 0; - case '1': return 1; - case '2': return 2; - case '3': return 3; - case '4': return 4; - case '5': return 5; - case '6': return 6; - case '7': return 7; - case '8': return 8; - case '9': return 9; - case 'a': case 'A': return 10; - case 'b': case 'B': return 11; - case 'c': case 'C': return 12; - case 'd': case 'D': return 13; - case 'e': case 'E': return 14; - case 'f': case 'F': return 15; - default: return 0; - } -} - -/* Split a line into arguments, where every argument can be in the - * following programming-language REPL-alike form: - * - * foo bar "newline are supported\n" and "\xff\x00otherstuff" - * - * The number of arguments is stored into *argc, and an array - * of sds is returned. - * - * The caller should free the resulting array of sds strings with - * sdsfreesplitres(). - * - * Note that sdscatrepr() is able to convert back a string into - * a quoted string in the same format sdssplitargs() is able to parse. - * - * The function returns the allocated tokens on success, even when the - * input string is empty, or NULL if the input contains unbalanced - * quotes or closed quotes followed by non space characters - * as in: "foo"bar or "foo' - */ -sds *sdssplitargs(const char *line, int *argc) { - const char *p = line; - char *current = NULL; - char **vector = NULL; - - *argc = 0; - while(1) { - /* skip blanks */ - while(*p && isspace(*p)) p++; - if (*p) { - /* get a token */ - int inq=0; /* set to 1 if we are in "quotes" */ - int insq=0; /* set to 1 if we are in 'single quotes' */ - int done=0; - - if (current == NULL) current = sdsempty(); - while(!done) { - if (inq) { - if (*p == '\\' && *(p+1) == 'x' && - is_hex_digit(*(p+2)) && - is_hex_digit(*(p+3))) - { - unsigned char byte; - - byte = (hex_digit_to_int(*(p+2))*16)+ - hex_digit_to_int(*(p+3)); - current = sdscatlen(current,(char*)&byte,1); - p += 3; - } else if (*p == '\\' && *(p+1)) { - char c; - - p++; - switch(*p) { - case 'n': c = '\n'; break; - case 'r': c = '\r'; break; - case 't': c = '\t'; break; - case 'b': c = '\b'; break; - case 'a': c = '\a'; break; - default: c = *p; break; - } - current = sdscatlen(current,&c,1); - } else if (*p == '"') { - /* closing quote must be followed by a space or - * nothing at all. */ - if (*(p+1) && !isspace(*(p+1))) goto err; - done=1; - } else if (!*p) { - /* unterminated quotes */ - goto err; - } else { - current = sdscatlen(current,p,1); - } - } else if (insq) { - if (*p == '\\' && *(p+1) == '\'') { - p++; - current = sdscatlen(current,"'",1); - } else if (*p == '\'') { - /* closing quote must be followed by a space or - * nothing at all. */ - if (*(p+1) && !isspace(*(p+1))) goto err; - done=1; - } else if (!*p) { - /* unterminated quotes */ - goto err; - } else { - current = sdscatlen(current,p,1); - } - } else { - switch(*p) { - case ' ': - case '\n': - case '\r': - case '\t': - case '\0': - done=1; - break; - case '"': - inq=1; - break; - case '\'': - insq=1; - break; - default: - current = sdscatlen(current,p,1); - break; - } - } - if (*p) p++; - } - /* add the token to the vector */ - vector = s_realloc(vector,((*argc)+1)*sizeof(char*)); - vector[*argc] = current; - (*argc)++; - current = NULL; - } else { - /* Even on empty input string return something not NULL. */ - if (vector == NULL) vector = s_malloc(sizeof(void*)); - return vector; - } - } - -err: - while((*argc)--) - sdsfree(vector[*argc]); - s_free(vector); - if (current) sdsfree(current); - *argc = 0; - return NULL; -} - -/* Modify the string substituting all the occurrences of the set of - * characters specified in the 'from' string to the corresponding character - * in the 'to' array. - * - * For instance: sdsmapchars(mystring, "ho", "01", 2) - * will have the effect of turning the string "hello" into "0ell1". - * - * The function returns the sds string pointer, that is always the same - * as the input pointer since no resize is needed. */ -sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen) { - size_t j, i, l = sdslen(s); - - for (j = 0; j < l; j++) { - for (i = 0; i < setlen; i++) { - if (s[j] == from[i]) { - s[j] = to[i]; - break; - } - } - } - return s; -} - -/* Join an array of C strings using the specified separator (also a C string). - * Returns the result as an sds string. */ -sds sdsjoin(char **argv, int argc, char *sep) { - sds join = sdsempty(); - int j; - - for (j = 0; j < argc; j++) { - join = sdscat(join, argv[j]); - if (j != argc-1) join = sdscat(join,sep); - } - return join; -} - -/* Like sdsjoin, but joins an array of SDS strings. */ -sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen) { - sds join = sdsempty(); - int j; - - for (j = 0; j < argc; j++) { - join = sdscatsds(join, argv[j]); - if (j != argc-1) join = sdscatlen(join,sep,seplen); - } - return join; -} - -/* Wrappers to the allocators used by SDS. Note that SDS will actually - * just use the macros defined into sdsalloc.h in order to avoid to pay - * the overhead of function calls. Here we define these wrappers only for - * the programs SDS is linked to, if they want to touch the SDS internals - * even if they use a different allocator. */ -void *sds_malloc(size_t size) { return s_malloc(size); } -void *sds_realloc(void *ptr, size_t size) { return s_realloc(ptr,size); } -void sds_free(void *ptr) { s_free(ptr); } - -#if defined(SDS_TEST_MAIN) -#include -#include "testhelp.h" -#include "limits.h" - -#define UNUSED(x) (void)(x) -int sdsTest(void) { - { - sds x = sdsnew("foo"), y; - - test_cond("Create a string and obtain the length", - sdslen(x) == 3 && memcmp(x,"foo\0",4) == 0) - - sdsfree(x); - x = sdsnewlen("foo",2); - test_cond("Create a string with specified length", - sdslen(x) == 2 && memcmp(x,"fo\0",3) == 0) - - x = sdscat(x,"bar"); - test_cond("Strings concatenation", - sdslen(x) == 5 && memcmp(x,"fobar\0",6) == 0); - - x = sdscpy(x,"a"); - test_cond("sdscpy() against an originally longer string", - sdslen(x) == 1 && memcmp(x,"a\0",2) == 0) - - x = sdscpy(x,"xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk"); - test_cond("sdscpy() against an originally shorter string", - sdslen(x) == 33 && - memcmp(x,"xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk\0",33) == 0) - - sdsfree(x); - x = sdscatprintf(sdsempty(),"%d",123); - test_cond("sdscatprintf() seems working in the base case", - sdslen(x) == 3 && memcmp(x,"123\0",4) == 0) - - sdsfree(x); - x = sdsnew("--"); - x = sdscatfmt(x, "Hello %s World %I,%I--", "Hi!", LLONG_MIN,LLONG_MAX); - test_cond("sdscatfmt() seems working in the base case", - sdslen(x) == 60 && - memcmp(x,"--Hello Hi! World -9223372036854775808," - "9223372036854775807--",60) == 0) - printf("[%s]\n",x); - - sdsfree(x); - x = sdsnew("--"); - x = sdscatfmt(x, "%u,%U--", UINT_MAX, ULLONG_MAX); - test_cond("sdscatfmt() seems working with unsigned numbers", - sdslen(x) == 35 && - memcmp(x,"--4294967295,18446744073709551615--",35) == 0) - - sdsfree(x); - x = sdsnew(" x "); - sdstrim(x," x"); - test_cond("sdstrim() works when all chars match", - sdslen(x) == 0) - - sdsfree(x); - x = sdsnew(" x "); - sdstrim(x," "); - test_cond("sdstrim() works when a single char remains", - sdslen(x) == 1 && x[0] == 'x') - - sdsfree(x); - x = sdsnew("xxciaoyyy"); - sdstrim(x,"xy"); - test_cond("sdstrim() correctly trims characters", - sdslen(x) == 4 && memcmp(x,"ciao\0",5) == 0) - - y = sdsdup(x); - sdsrange(y,1,1); - test_cond("sdsrange(...,1,1)", - sdslen(y) == 1 && memcmp(y,"i\0",2) == 0) - - sdsfree(y); - y = sdsdup(x); - sdsrange(y,1,-1); - test_cond("sdsrange(...,1,-1)", - sdslen(y) == 3 && memcmp(y,"iao\0",4) == 0) - - sdsfree(y); - y = sdsdup(x); - sdsrange(y,-2,-1); - test_cond("sdsrange(...,-2,-1)", - sdslen(y) == 2 && memcmp(y,"ao\0",3) == 0) - - sdsfree(y); - y = sdsdup(x); - sdsrange(y,2,1); - test_cond("sdsrange(...,2,1)", - sdslen(y) == 0 && memcmp(y,"\0",1) == 0) - - sdsfree(y); - y = sdsdup(x); - sdsrange(y,1,100); - test_cond("sdsrange(...,1,100)", - sdslen(y) == 3 && memcmp(y,"iao\0",4) == 0) - - sdsfree(y); - y = sdsdup(x); - sdsrange(y,100,100); - test_cond("sdsrange(...,100,100)", - sdslen(y) == 0 && memcmp(y,"\0",1) == 0) - - sdsfree(y); - sdsfree(x); - x = sdsnew("foo"); - y = sdsnew("foa"); - test_cond("sdscmp(foo,foa)", sdscmp(x,y) > 0) - - sdsfree(y); - sdsfree(x); - x = sdsnew("bar"); - y = sdsnew("bar"); - test_cond("sdscmp(bar,bar)", sdscmp(x,y) == 0) - - sdsfree(y); - sdsfree(x); - x = sdsnew("aar"); - y = sdsnew("bar"); - test_cond("sdscmp(bar,bar)", sdscmp(x,y) < 0) - - sdsfree(y); - sdsfree(x); - x = sdsnewlen("\a\n\0foo\r",7); - y = sdscatrepr(sdsempty(),x,sdslen(x)); - test_cond("sdscatrepr(...data...)", - memcmp(y,"\"\\a\\n\\x00foo\\r\"",15) == 0) - - { - unsigned int oldfree; - char *p; - int step = 10, j, i; - - sdsfree(x); - sdsfree(y); - x = sdsnew("0"); - test_cond("sdsnew() free/len buffers", sdslen(x) == 1 && sdsavail(x) == 0); - - /* Run the test a few times in order to hit the first two - * SDS header types. */ - for (i = 0; i < 10; i++) { - int oldlen = sdslen(x); - x = sdsMakeRoomFor(x,step); - int type = x[-1]&SDS_TYPE_MASK; - - test_cond("sdsMakeRoomFor() len", sdslen(x) == oldlen); - if (type != SDS_TYPE_5) { - test_cond("sdsMakeRoomFor() free", sdsavail(x) >= step); - oldfree = sdsavail(x); - } - p = x+oldlen; - for (j = 0; j < step; j++) { - p[j] = 'A'+j; - } - sdsIncrLen(x,step); - } - test_cond("sdsMakeRoomFor() content", - memcmp("0ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ",x,101) == 0); - test_cond("sdsMakeRoomFor() final length",sdslen(x)==101); - - sdsfree(x); - } - } - test_report() - return 0; -} -#endif - -#ifdef SDS_TEST_MAIN -int main(void) { - return sdsTest(); -} -#endif diff --git a/gateway/server.go b/gateway/server.go deleted file mode 100644 index 1c86f879cba4..000000000000 --- a/gateway/server.go +++ /dev/null @@ -1,1411 +0,0 @@ -package gateway - -import ( - "crypto/tls" - "fmt" - "html/template" - "io/ioutil" - stdlog "log" - "log/syslog" - "net" - "net/http" - pprof_http "net/http/pprof" - "os" - "path/filepath" - "runtime" - "runtime/pprof" - "strconv" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" - logstashHook "github.com/bshuster-repo/logrus-logstash-hook" - "github.com/evalphobia/logrus_sentry" - "github.com/facebookgo/pidfile" - graylogHook "github.com/gemnasium/logrus-graylog-hook" - "github.com/gorilla/mux" - "github.com/justinas/alice" - "github.com/lonelycode/osin" - newrelic "github.com/newrelic/go-agent" - "github.com/rs/cors" - uuid "github.com/satori/go.uuid" - "golang.org/x/net/http2" - "rsc.io/letsencrypt" - - "github.com/TykTechnologies/goagain" - gas "github.com/TykTechnologies/goautosocket" - "github.com/TykTechnologies/gorpc" - "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/certs" - "github.com/TykTechnologies/tyk/checkup" - "github.com/TykTechnologies/tyk/cli" - "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/dnscache" - logger "github.com/TykTechnologies/tyk/log" - "github.com/TykTechnologies/tyk/regexp" - "github.com/TykTechnologies/tyk/rpc" - "github.com/TykTechnologies/tyk/storage" - "github.com/TykTechnologies/tyk/trace" - "github.com/TykTechnologies/tyk/user" -) - -var ( - log = logger.Get() - mainLog = log.WithField("prefix", "main") - pubSubLog = log.WithField("prefix", "pub-sub") - rawLog = logger.GetRaw() - templates *template.Template - analytics RedisAnalyticsHandler - GlobalEventsJSVM JSVM - memProfFile *os.File - MainNotifier RedisNotifier - DefaultOrgStore DefaultSessionManager - DefaultQuotaStore DefaultSessionManager - FallbackKeySesionManager = SessionHandler(&DefaultSessionManager{}) - MonitoringHandler config.TykEventHandler - RPCListener RPCStorageHandler - DashService DashboardServiceSender - CertificateManager *certs.CertificateManager - NewRelicApplication newrelic.Application - - apisMu sync.RWMutex - apiSpecs []*APISpec - apisByID = map[string]*APISpec{} - - keyGen DefaultKeyGenerator - - policiesMu sync.RWMutex - policiesByID = map[string]user.Policy{} - - mainRouter *mux.Router - controlRouter *mux.Router - LE_MANAGER letsencrypt.Manager - LE_FIRSTRUN bool - - NodeID string - - runningTests = false - - // confPaths is the series of paths to try to use as config files. The - // first one to exist will be used. If none exists, a default config - // will be written to the first path in the list. - // - // When --conf=foo is used, this will be replaced by []string{"foo"}. - confPaths = []string{ - "tyk.conf", - // TODO: add ~/.config/tyk/tyk.conf here? - "/etc/tyk/tyk.conf", - } - - dnsCacheManager dnscache.IDnsCacheManager -) - -const ( - defReadTimeout = 120 * time.Second - defWriteTimeout = 120 * time.Second - appName = "tyk-gateway" -) - -func getApiSpec(apiID string) *APISpec { - apisMu.RLock() - spec := apisByID[apiID] - apisMu.RUnlock() - return spec -} - -func apisByIDLen() int { - apisMu.RLock() - defer apisMu.RUnlock() - return len(apisByID) -} - -var redisPurgeOnce sync.Once -var rpcPurgeOnce sync.Once -var purgeTicker = time.Tick(time.Second) -var rpcPurgeTicker = time.Tick(10 * time.Second) - -// Create all globals and init connection handlers -func setupGlobals() { - - reloadMu.Lock() - defer reloadMu.Unlock() - - dnsCacheManager = dnscache.NewDnsCacheManager(config.Global().DnsCache.MultipleIPsHandleStrategy) - if config.Global().DnsCache.Enabled { - dnsCacheManager.InitDNSCaching( - time.Duration(config.Global().DnsCache.TTL)*time.Second, - time.Duration(config.Global().DnsCache.CheckInterval)*time.Second) - } - - mainRouter = mux.NewRouter() - controlRouter = mux.NewRouter() - - if config.Global().EnableAnalytics && config.Global().Storage.Type != "redis" { - mainLog.Fatal("Analytics requires Redis Storage backend, please enable Redis in the tyk.conf file.") - } - - // Initialise our Host Checker - healthCheckStore := storage.RedisCluster{KeyPrefix: "host-checker:"} - InitHostCheckManager(&healthCheckStore) - - redisStore := storage.RedisCluster{KeyPrefix: "apikey-", HashKeys: config.Global().HashKeys} - FallbackKeySesionManager.Init(&redisStore) - - if config.Global().EnableAnalytics && analytics.Store == nil { - globalConf := config.Global() - globalConf.LoadIgnoredIPs() - config.SetGlobal(globalConf) - mainLog.Debug("Setting up analytics DB connection") - - analyticsStore := storage.RedisCluster{KeyPrefix: "analytics-"} - analytics.Store = &analyticsStore - analytics.Init(globalConf) - - redisPurgeOnce.Do(func() { - store := storage.RedisCluster{KeyPrefix: "analytics-"} - redisPurger := RedisPurger{Store: &store} - go redisPurger.PurgeLoop(purgeTicker) - }) - - if config.Global().AnalyticsConfig.Type == "rpc" { - mainLog.Debug("Using RPC cache purge") - - rpcPurgeOnce.Do(func() { - store := storage.RedisCluster{KeyPrefix: "analytics-"} - purger := rpc.Purger{ - Store: &store, - } - purger.Connect() - go purger.PurgeLoop(rpcPurgeTicker) - }) - } - } - - // Load all the files that have the "error" prefix. - templatesDir := filepath.Join(config.Global().TemplatePath, "error*") - templates = template.Must(template.ParseGlob(templatesDir)) - - if config.Global().CoProcessOptions.EnableCoProcess { - if err := CoProcessInit(); err != nil { - log.WithField("prefix", "coprocess").Error(err) - } - } - - // Get the notifier ready - mainLog.Debug("Notifier will not work in hybrid mode") - mainNotifierStore := &storage.RedisCluster{} - mainNotifierStore.Connect() - MainNotifier = RedisNotifier{mainNotifierStore, RedisPubSubChannel} - - if config.Global().Monitor.EnableTriggerMonitors { - h := &WebHookHandler{} - if err := h.Init(config.Global().Monitor.Config); err != nil { - mainLog.Error("Failed to initialise monitor! ", err) - } else { - MonitoringHandler = h - } - } - - if globalConfig := config.Global(); globalConfig.AnalyticsConfig.NormaliseUrls.Enabled { - mainLog.Info("Setting up analytics normaliser") - globalConfig.AnalyticsConfig.NormaliseUrls.CompiledPatternSet = initNormalisationPatterns() - config.SetGlobal(globalConfig) - } - - certificateSecret := config.Global().Secret - if config.Global().Security.PrivateCertificateEncodingSecret != "" { - certificateSecret = config.Global().Security.PrivateCertificateEncodingSecret - } - - CertificateManager = certs.NewCertificateManager(getGlobalStorageHandler("cert-", false), certificateSecret, log) - - if config.Global().NewRelic.AppName != "" { - NewRelicApplication = SetupNewRelic() - } -} - -func buildConnStr(resource string) string { - - if config.Global().DBAppConfOptions.ConnectionString == "" && config.Global().DisableDashboardZeroConf { - mainLog.Fatal("Connection string is empty, failing.") - } - - if !config.Global().DisableDashboardZeroConf && config.Global().DBAppConfOptions.ConnectionString == "" { - mainLog.Info("Waiting for zeroconf signal...") - for config.Global().DBAppConfOptions.ConnectionString == "" { - time.Sleep(1 * time.Second) - } - } - - return config.Global().DBAppConfOptions.ConnectionString + resource -} - -func syncAPISpecs() (int, error) { - loader := APIDefinitionLoader{} - - apisMu.Lock() - defer apisMu.Unlock() - - if config.Global().UseDBAppConfigs { - connStr := buildConnStr("/system/apis") - tmpSpecs, err := loader.FromDashboardService(connStr, config.Global().NodeSecret) - if err != nil { - log.Error("failed to load API specs: ", err) - return 0, err - } - - apiSpecs = tmpSpecs - - mainLog.Debug("Downloading API Configurations from Dashboard Service") - } else if config.Global().SlaveOptions.UseRPC { - mainLog.Debug("Using RPC Configuration") - - var err error - apiSpecs, err = loader.FromRPC(config.Global().SlaveOptions.RPCKey) - if err != nil { - return 0, err - } - } else { - apiSpecs = loader.FromDir(config.Global().AppPath) - } - - mainLog.Printf("Detected %v APIs", len(apiSpecs)) - - if config.Global().AuthOverride.ForceAuthProvider { - for i := range apiSpecs { - apiSpecs[i].AuthProvider = config.Global().AuthOverride.AuthProvider - } - } - - if config.Global().AuthOverride.ForceSessionProvider { - for i := range apiSpecs { - apiSpecs[i].SessionProvider = config.Global().AuthOverride.SessionProvider - } - } - - return len(apiSpecs), nil -} - -func syncPolicies() (count int, err error) { - var pols map[string]user.Policy - - mainLog.Info("Loading policies") - - switch config.Global().Policies.PolicySource { - case "service": - if config.Global().Policies.PolicyConnectionString == "" { - mainLog.Fatal("No connection string or node ID present. Failing.") - } - connStr := config.Global().Policies.PolicyConnectionString - connStr = connStr + "/system/policies" - - mainLog.Info("Using Policies from Dashboard Service") - - pols = LoadPoliciesFromDashboard(connStr, config.Global().NodeSecret, config.Global().Policies.AllowExplicitPolicyID) - case "rpc": - mainLog.Debug("Using Policies from RPC") - pols, err = LoadPoliciesFromRPC(config.Global().SlaveOptions.RPCKey) - default: - // this is the only case now where we need a policy record name - if config.Global().Policies.PolicyRecordName == "" { - mainLog.Debug("No policy record name defined, skipping...") - return 0, nil - } - pols = LoadPoliciesFromFile(config.Global().Policies.PolicyRecordName) - } - mainLog.Infof("Policies found (%d total):", len(pols)) - for id := range pols { - mainLog.Infof(" - %s", id) - } - - policiesMu.Lock() - defer policiesMu.Unlock() - if len(pols) > 0 { - policiesByID = pols - } - - return len(pols), err -} - -// stripSlashes removes any trailing slashes from the request's URL -// path. -func stripSlashes(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - path := r.URL.Path - if trim := strings.TrimRight(path, "/"); trim != path { - r2 := *r - r2.URL.Path = trim - r = &r2 - } - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) -} - -func controlAPICheckClientCertificate(certLevel string, next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if config.Global().Security.ControlAPIUseMutualTLS { - if err := CertificateManager.ValidateRequestCertificate(config.Global().Security.Certificates.ControlAPI, r); err != nil { - doJSONWrite(w, http.StatusForbidden, apiError(err.Error())) - return - } - } - - next.ServeHTTP(w, r) - }) -} - -// Set up default Tyk control API endpoints - these are global, so need to be added first -func loadAPIEndpoints(muxer *mux.Router) { - hostname := config.Global().HostName - if config.Global().ControlAPIHostname != "" { - hostname = config.Global().ControlAPIHostname - } - - r := mux.NewRouter() - muxer.PathPrefix("/tyk/").Handler(http.StripPrefix("/tyk", - stripSlashes(checkIsAPIOwner(controlAPICheckClientCertificate("/gateway/client", InstrumentationMW(r)))), - )) - - if hostname != "" { - muxer = muxer.Host(hostname).Subrouter() - mainLog.Info("Control API hostname set: ", hostname) - } - - if *cli.HTTPProfile || config.Global().HTTPProfile { - muxer.HandleFunc("/debug/pprof/profile", pprof_http.Profile) - muxer.HandleFunc("/debug/pprof/{_:.*}", pprof_http.Index) - } - - r.MethodNotAllowedHandler = MethodNotAllowedHandler{} - - mainLog.Info("Initialising Tyk REST API Endpoints") - - // set up main API handlers - r.HandleFunc("/reload/group", groupResetHandler).Methods("GET") - r.HandleFunc("/reload", resetHandler(nil)).Methods("GET") - - if !isRPCMode() { - r.HandleFunc("/org/keys", orgHandler).Methods("GET") - r.HandleFunc("/org/keys/{keyName:[^/]*}", orgHandler).Methods("POST", "PUT", "GET", "DELETE") - r.HandleFunc("/keys/policy/{keyName}", policyUpdateHandler).Methods("POST") - r.HandleFunc("/keys/create", createKeyHandler).Methods("POST") - r.HandleFunc("/apis", apiHandler).Methods("GET", "POST", "PUT", "DELETE") - r.HandleFunc("/apis/{apiID}", apiHandler).Methods("GET", "POST", "PUT", "DELETE") - r.HandleFunc("/health", healthCheckhandler).Methods("GET") - r.HandleFunc("/oauth/clients/create", createOauthClient).Methods("POST") - r.HandleFunc("/oauth/clients/{apiID}/{keyName:[^/]*}", oAuthClientHandler).Methods("PUT") - r.HandleFunc("/oauth/refresh/{keyName}", invalidateOauthRefresh).Methods("DELETE") - r.HandleFunc("/cache/{apiID}", invalidateCacheHandler).Methods("DELETE") - } else { - mainLog.Info("Node is slaved, REST API minimised") - } - - r.HandleFunc("/debug", traceHandler).Methods("POST") - - r.HandleFunc("/keys", keyHandler).Methods("POST", "PUT", "GET", "DELETE") - r.HandleFunc("/keys/{keyName:[^/]*}", keyHandler).Methods("POST", "PUT", "GET", "DELETE") - r.HandleFunc("/certs", certHandler).Methods("POST", "GET") - r.HandleFunc("/certs/{certID:[^/]*}", certHandler).Methods("POST", "GET", "DELETE") - r.HandleFunc("/oauth/clients/{apiID}", oAuthClientHandler).Methods("GET", "DELETE") - r.HandleFunc("/oauth/clients/{apiID}/{keyName:[^/]*}", oAuthClientHandler).Methods("GET", "DELETE") - r.HandleFunc("/oauth/clients/{apiID}/{keyName}/tokens", oAuthClientTokensHandler).Methods("GET") - - mainLog.Debug("Loaded API Endpoints") -} - -// checkIsAPIOwner will ensure that the accessor of the tyk API has the -// correct security credentials - this is a shared secret between the -// client and the owner and is set in the tyk.conf file. This should -// never be made public! -func checkIsAPIOwner(next http.Handler) http.Handler { - secret := config.Global().Secret - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - tykAuthKey := r.Header.Get("X-Tyk-Authorization") - if tykAuthKey != secret { - // Error - mainLog.Warning("Attempted administrative access with invalid or missing key!") - - doJSONWrite(w, http.StatusForbidden, apiError("Forbidden")) - return - } - next.ServeHTTP(w, r) - }) -} - -func generateOAuthPrefix(apiID string) string { - return "oauth-data." + apiID + "." -} - -// Create API-specific OAuth handlers and respective auth servers -func addOAuthHandlers(spec *APISpec, muxer *mux.Router) *OAuthManager { - apiAuthorizePath := spec.Proxy.ListenPath + "tyk/oauth/authorize-client{_:/?}" - clientAuthPath := spec.Proxy.ListenPath + "oauth/authorize{_:/?}" - clientAccessPath := spec.Proxy.ListenPath + "oauth/token{_:/?}" - - serverConfig := osin.NewServerConfig() - serverConfig.ErrorStatusCode = http.StatusForbidden - serverConfig.AllowedAccessTypes = spec.Oauth2Meta.AllowedAccessTypes - serverConfig.AllowedAuthorizeTypes = spec.Oauth2Meta.AllowedAuthorizeTypes - serverConfig.RedirectUriSeparator = config.Global().OauthRedirectUriSeparator - - prefix := generateOAuthPrefix(spec.APIID) - storageManager := getGlobalStorageHandler(prefix, false) - storageManager.Connect() - osinStorage := &RedisOsinStorageInterface{storageManager, spec.SessionManager} //TODO: Needs storage manager from APISpec - - osinServer := TykOsinNewServer(serverConfig, osinStorage) - - oauthManager := OAuthManager{spec, osinServer} - oauthHandlers := OAuthHandlers{oauthManager} - - muxer.Handle(apiAuthorizePath, checkIsAPIOwner(allowMethods(oauthHandlers.HandleGenerateAuthCodeData, "POST"))) - muxer.HandleFunc(clientAuthPath, allowMethods(oauthHandlers.HandleAuthorizePassthrough, "GET", "POST")) - muxer.HandleFunc(clientAccessPath, allowMethods(oauthHandlers.HandleAccessRequest, "GET", "POST")) - - return &oauthManager -} - -func addBatchEndpoint(spec *APISpec, muxer *mux.Router) { - mainLog.Debug("Batch requests enabled for API") - apiBatchPath := spec.Proxy.ListenPath + "tyk/batch/" - batchHandler := BatchRequestHandler{API: spec} - muxer.HandleFunc(apiBatchPath, batchHandler.HandleBatchRequest) -} - -func loadCustomMiddleware(spec *APISpec) ([]string, apidef.MiddlewareDefinition, []apidef.MiddlewareDefinition, []apidef.MiddlewareDefinition, []apidef.MiddlewareDefinition, apidef.MiddlewareDriver) { - mwPaths := []string{} - var mwAuthCheckFunc apidef.MiddlewareDefinition - mwPreFuncs := []apidef.MiddlewareDefinition{} - mwPostFuncs := []apidef.MiddlewareDefinition{} - mwPostKeyAuthFuncs := []apidef.MiddlewareDefinition{} - mwDriver := apidef.OttoDriver - - // Set AuthCheck hook - if spec.CustomMiddleware.AuthCheck.Name != "" { - mwAuthCheckFunc = spec.CustomMiddleware.AuthCheck - if spec.CustomMiddleware.AuthCheck.Path != "" { - // Feed a JS file to Otto - mwPaths = append(mwPaths, spec.CustomMiddleware.AuthCheck.Path) - } - } - - // Load from the configuration - for _, mwObj := range spec.CustomMiddleware.Pre { - mwPaths = append(mwPaths, mwObj.Path) - mwPreFuncs = append(mwPreFuncs, mwObj) - mainLog.Debug("Loading custom PRE-PROCESSOR middleware: ", mwObj.Name) - } - for _, mwObj := range spec.CustomMiddleware.Post { - mwPaths = append(mwPaths, mwObj.Path) - mwPostFuncs = append(mwPostFuncs, mwObj) - mainLog.Debug("Loading custom POST-PROCESSOR middleware: ", mwObj.Name) - } - - // Load from folders - for _, folder := range [...]struct { - name string - single *apidef.MiddlewareDefinition - slice *[]apidef.MiddlewareDefinition - }{ - {name: "pre", slice: &mwPreFuncs}, - {name: "auth", single: &mwAuthCheckFunc}, - {name: "post_auth", slice: &mwPostKeyAuthFuncs}, - {name: "post", slice: &mwPostFuncs}, - } { - globPath := filepath.Join(config.Global().MiddlewarePath, spec.APIID, folder.name, "*.js") - paths, _ := filepath.Glob(globPath) - for _, path := range paths { - mainLog.Debug("Loading file middleware from ", path) - - mwDef := apidef.MiddlewareDefinition{ - Name: strings.Split(filepath.Base(path), ".")[0], - Path: path, - } - mainLog.Debug("-- Middleware name ", mwDef.Name) - mwDef.RequireSession = strings.HasSuffix(mwDef.Name, "_with_session") - if mwDef.RequireSession { - switch folder.name { - case "post_auth", "post": - mainLog.Debug("-- Middleware requires session") - default: - mainLog.Warning("Middleware requires session, but isn't post-auth: ", mwDef.Name) - } - } - mwPaths = append(mwPaths, path) - if folder.single != nil { - *folder.single = mwDef - } else { - *folder.slice = append(*folder.slice, mwDef) - } - } - } - - // Set middleware driver, defaults to OttoDriver - if spec.CustomMiddleware.Driver != "" { - mwDriver = spec.CustomMiddleware.Driver - } - - // Load PostAuthCheck hooks - for _, mwObj := range spec.CustomMiddleware.PostKeyAuth { - if mwObj.Path != "" { - // Otto files are specified here - mwPaths = append(mwPaths, mwObj.Path) - } - mwPostKeyAuthFuncs = append(mwPostKeyAuthFuncs, mwObj) - } - - return mwPaths, mwAuthCheckFunc, mwPreFuncs, mwPostFuncs, mwPostKeyAuthFuncs, mwDriver -} - -func createResponseMiddlewareChain(spec *APISpec) { - // Create the response processors - - responseChain := make([]TykResponseHandler, len(spec.ResponseProcessors)) - for i, processorDetail := range spec.ResponseProcessors { - processor := responseProcessorByName(processorDetail.Name) - if processor == nil { - mainLog.Error("No such processor: ", processorDetail.Name) - return - } - if err := processor.Init(processorDetail.Options, spec); err != nil { - mainLog.Debug("Failed to init processor: ", err) - } - mainLog.Debug("Loading Response processor: ", processorDetail.Name) - responseChain[i] = processor - } - spec.ResponseChain = responseChain -} - -func handleCORS(chain *[]alice.Constructor, spec *APISpec) { - - if spec.CORS.Enable { - mainLog.Debug("CORS ENABLED") - c := cors.New(cors.Options{ - AllowedOrigins: spec.CORS.AllowedOrigins, - AllowedMethods: spec.CORS.AllowedMethods, - AllowedHeaders: spec.CORS.AllowedHeaders, - ExposedHeaders: spec.CORS.ExposedHeaders, - AllowCredentials: spec.CORS.AllowCredentials, - MaxAge: spec.CORS.MaxAge, - OptionsPassthrough: spec.CORS.OptionsPassthrough, - Debug: spec.CORS.Debug, - }) - - *chain = append(*chain, c.Handler) - } -} - -func isRPCMode() bool { - return config.Global().AuthOverride.ForceAuthProvider && - config.Global().AuthOverride.AuthProvider.StorageEngine == RPCStorageEngine -} - -func rpcReloadLoop(rpcKey string) { - for { - RPCListener.CheckForReload(rpcKey) - } -} - -var reloadMu sync.Mutex - -func doReload() { - reloadMu.Lock() - defer reloadMu.Unlock() - - // Initialize/reset the JSVM - if config.Global().EnableJSVM { - GlobalEventsJSVM.Init(nil, logrus.NewEntry(log)) - } - - // Load the API Policies - if _, err := syncPolicies(); err != nil { - mainLog.Error("Error during syncing policies:", err.Error()) - return - } - - // load the specs - if count, err := syncAPISpecs(); err != nil { - mainLog.Error("Error during syncing apis:", err.Error()) - return - } else { - // skip re-loading only if dashboard service reported 0 APIs - // and current registry had 0 APIs - if count == 0 && apisByIDLen() == 0 { - mainLog.Warning("No API Definitions found, not reloading") - return - } - } - - // We have updated specs, lets load those... - mainLog.Info("Preparing new router") - newRouter := mux.NewRouter() - if config.Global().HttpServerOptions.OverrideDefaults { - newRouter.SkipClean(config.Global().HttpServerOptions.SkipURLCleaning) - } - - if config.Global().ControlAPIPort == 0 { - loadAPIEndpoints(newRouter) - } - - loadGlobalApps(newRouter) - - mainLog.Info("API reload complete") - - mainRouter = newRouter -} - -// startReloadChan and reloadDoneChan are used by the two reload loops -// running in separate goroutines to talk. reloadQueueLoop will use -// startReloadChan to signal to reloadLoop to start a reload, and -// reloadLoop will use reloadDoneChan to signal back that it's done with -// the reload. Buffered simply to not make the goroutines block each -// other. -var startReloadChan = make(chan struct{}, 1) -var reloadDoneChan = make(chan struct{}, 1) - -func reloadLoop(tick <-chan time.Time) { - <-tick - for range startReloadChan { - mainLog.Info("reload: initiating") - doReload() - mainLog.Info("reload: complete") - - mainLog.Info("Initiating coprocess reload") - DoCoprocessReload() - - reloadDoneChan <- struct{}{} - <-tick - } -} - -// reloadQueue is used by reloadURLStructure to queue a reload. It's not -// buffered, as reloadQueueLoop should pick these up immediately. -var reloadQueue = make(chan func()) - -func reloadQueueLoop() { - reloading := false - var fns []func() - for { - select { - case <-reloadDoneChan: - for _, fn := range fns { - fn() - } - fns = fns[:0] - reloading = false - case fn := <-reloadQueue: - if fn != nil { - fns = append(fns, fn) - } - if !reloading { - mainLog.Info("Reload queued") - startReloadChan <- struct{}{} - reloading = true - } else { - mainLog.Info("Reload already queued") - } - } - } -} - -// reloadURLStructure will queue an API reload. The reload will -// eventually create a new muxer, reload all the app configs for an -// instance and then replace the DefaultServeMux with the new one. This -// enables a reconfiguration to take place without stopping any requests -// from being handled. -// -// done will be called when the reload is finished. Note that if a -// reload is already queued, another won't be queued, but done will -// still be called when said queued reload is finished. -func reloadURLStructure(done func()) { - reloadQueue <- done -} - -func setupLogger() { - if config.Global().UseSentry { - mainLog.Debug("Enabling Sentry support") - hook, err := logrus_sentry.NewSentryHook(config.Global().SentryCode, []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - }) - - hook.Timeout = 0 - - if err == nil { - log.Hooks.Add(hook) - rawLog.Hooks.Add(hook) - } - mainLog.Debug("Sentry hook active") - } - - if config.Global().UseSyslog { - mainLog.Debug("Enabling Syslog support") - hook, err := logrus_syslog.NewSyslogHook(config.Global().SyslogTransport, - config.Global().SyslogNetworkAddr, - syslog.LOG_INFO, "") - - if err == nil { - log.Hooks.Add(hook) - rawLog.Hooks.Add(hook) - } - mainLog.Debug("Syslog hook active") - } - - if config.Global().UseGraylog { - mainLog.Debug("Enabling Graylog support") - hook := graylogHook.NewGraylogHook(config.Global().GraylogNetworkAddr, - map[string]interface{}{"tyk-module": "gateway"}) - - log.Hooks.Add(hook) - rawLog.Hooks.Add(hook) - - mainLog.Debug("Graylog hook active") - } - - if config.Global().UseLogstash { - mainLog.Debug("Enabling Logstash support") - - var hook *logstashHook.Hook - var err error - var conn net.Conn - if config.Global().LogstashTransport == "udp" { - mainLog.Debug("Connecting to Logstash with udp") - hook, err = logstashHook.NewHook(config.Global().LogstashTransport, - config.Global().LogstashNetworkAddr, - appName) - } else { - mainLog.Debugf("Connecting to Logstash with %s", config.Global().LogstashTransport) - conn, err = gas.Dial(config.Global().LogstashTransport, config.Global().LogstashNetworkAddr) - if err == nil { - hook, err = logstashHook.NewHookWithConn(conn, appName) - } - } - - if err != nil { - log.Errorf("Error making connection for logstash: %v", err) - } else { - log.Hooks.Add(hook) - rawLog.Hooks.Add(hook) - mainLog.Debug("Logstash hook active") - } - } - - if config.Global().UseRedisLog { - hook := newRedisHook() - log.Hooks.Add(hook) - rawLog.Hooks.Add(hook) - - mainLog.Debug("Redis log hook active") - } -} - -func initialiseSystem() error { - if runningTests && os.Getenv("TYK_LOGLEVEL") == "" { - // `go test` without TYK_LOGLEVEL set defaults to no log - // output - log.Level = logrus.ErrorLevel - log.Out = ioutil.Discard - gorpc.SetErrorLogger(func(string, ...interface{}) {}) - stdlog.SetOutput(ioutil.Discard) - } else if *cli.DebugMode { - log.Level = logrus.DebugLevel - mainLog.Debug("Enabling debug-level output") - } - - if *cli.Conf != "" { - mainLog.Debugf("Using %s for configuration", *cli.Conf) - confPaths = []string{*cli.Conf} - } else { - mainLog.Debug("No configuration file defined, will try to use default (tyk.conf)") - } - - mainLog.Infof("Tyk API Gateway %s", VERSION) - - if !runningTests { - globalConf := config.Config{} - if err := config.Load(confPaths, &globalConf); err != nil { - return err - } - if globalConf.PIDFileLocation == "" { - globalConf.PIDFileLocation = "/var/run/tyk/tyk-gateway.pid" - } - // It's necessary to set global conf before and after calling afterConfSetup as global conf - // is being used by dependencies of the even handler init and then conf is modified again. - config.SetGlobal(globalConf) - afterConfSetup(&globalConf) - config.SetGlobal(globalConf) - } - - if os.Getenv("TYK_LOGLEVEL") == "" && !*cli.DebugMode { - level := strings.ToLower(config.Global().LogLevel) - switch level { - case "", "info": - // default, do nothing - case "error": - log.Level = logrus.ErrorLevel - case "warn": - log.Level = logrus.WarnLevel - case "debug": - log.Level = logrus.DebugLevel - default: - mainLog.Fatalf("Invalid log level %q specified in config, must be error, warn, debug or info. ", level) - } - } - - if config.Global().Storage.Type != "redis" { - mainLog.Fatal("Redis connection details not set, please ensure that the storage type is set to Redis and that the connection parameters are correct.") - } - - // suply rpc client globals to join it main loging and instrumentation sub systems - rpc.Log = log - rpc.Instrument = instrument - - setupGlobals() - - globalConf := config.Global() - - if *cli.Port != "" { - portNum, err := strconv.Atoi(*cli.Port) - if err != nil { - mainLog.Error("Port specified in flags must be a number: ", err) - } else { - globalConf.ListenPort = portNum - config.SetGlobal(globalConf) - } - } - - // Enable all the loggers - setupLogger() - - mainLog.Info("PIDFile location set to: ", config.Global().PIDFileLocation) - - pidfile.SetPidfilePath(config.Global().PIDFileLocation) - if err := pidfile.Write(); err != nil { - mainLog.Error("Failed to write PIDFile: ", err) - } - - if globalConf.UseDBAppConfigs && globalConf.Policies.PolicySource != config.DefaultDashPolicySource { - globalConf.Policies.PolicySource = config.DefaultDashPolicySource - globalConf.Policies.PolicyConnectionString = globalConf.DBAppConfOptions.ConnectionString - if globalConf.Policies.PolicyRecordName == "" { - globalConf.Policies.PolicyRecordName = config.DefaultDashPolicyRecordName - } - } - - getHostDetails() - setupInstrumentation() - - if config.Global().HttpServerOptions.UseLE_SSL { - go StartPeriodicStateBackup(&LE_MANAGER) - } - - return nil -} - -// afterConfSetup takes care of non-sensical config values (such as zero -// timeouts) and sets up a few globals that depend on the config. -func afterConfSetup(conf *config.Config) { - if conf.SlaveOptions.CallTimeout == 0 { - conf.SlaveOptions.CallTimeout = 30 - } - - if conf.SlaveOptions.PingTimeout == 0 { - conf.SlaveOptions.PingTimeout = 60 - } - - rpc.GlobalRPCPingTimeout = time.Second * time.Duration(conf.SlaveOptions.PingTimeout) - rpc.GlobalRPCCallTimeout = time.Second * time.Duration(conf.SlaveOptions.CallTimeout) - initGenericEventHandlers(conf) - regexp.ResetCache(time.Second*time.Duration(conf.RegexpCacheExpire), !conf.DisableRegexpCache) - - if conf.HealthCheckEndpointName == "" { - conf.HealthCheckEndpointName = "hello" - } -} - -var hostDetails struct { - Hostname string - PID int -} - -func getHostDetails() { - var err error - if hostDetails.PID, err = pidfile.Read(); err != nil { - mainLog.Error("Failed ot get host pid: ", err) - } - if hostDetails.Hostname, err = os.Hostname(); err != nil { - mainLog.Error("Failed ot get hostname: ", err) - } -} - -func getGlobalStorageHandler(keyPrefix string, hashKeys bool) storage.Handler { - if config.Global().SlaveOptions.UseRPC { - return &RPCStorageHandler{ - KeyPrefix: keyPrefix, - HashKeys: hashKeys, - } - } - return &storage.RedisCluster{KeyPrefix: keyPrefix, HashKeys: hashKeys} -} - -func Start() { - cli.Init(VERSION, confPaths) - cli.Parse() - // Stop gateway process if not running in "start" mode: - if !cli.DefaultMode { - os.Exit(0) - } - - NodeID = "solo-" + uuid.NewV4().String() - - if err := initialiseSystem(); err != nil { - mainLog.Fatalf("Error initialising system: %v", err) - } - - var controlListener net.Listener - - onFork := func() { - mainLog.Warning("PREPARING TO FORK") - - if controlListener != nil { - if err := controlListener.Close(); err != nil { - mainLog.Error("Control listen handler exit: ", err) - } - mainLog.Info("Control listen closed") - } - - if config.Global().UseDBAppConfigs { - mainLog.Info("Stopping heartbeat") - DashService.StopBeating() - mainLog.Info("Waiting to de-register") - time.Sleep(10 * time.Second) - - os.Setenv("TYK_SERVICE_NONCE", ServiceNonce) - os.Setenv("TYK_SERVICE_NODEID", NodeID) - } - } - - listener, goAgainErr := goagain.Listener(onFork) - - if controlAPIPort := config.Global().ControlAPIPort; controlAPIPort > 0 { - var err error - if controlListener, err = generateListener(controlAPIPort); err != nil { - mainLog.Fatalf("Error starting control API listener: %s", err) - } else { - mainLog.Info("Starting control API listener: ", controlListener, err, controlAPIPort) - } - } else { - mainLog.Warn("The control_api_port should be changed for production") - } - - checkup.Run(config.Global()) - if tr := config.Global().Tracer; tr.Enabled { - trace.SetupTracing(tr.Name, tr.Options) - trace.SetLogger(mainLog) - defer trace.Close() - } - start() - - // Wait while Redis connection pools are ready before start serving traffic - if !storage.IsConnected() { - mainLog.Fatal("Redis connection pools are not ready. Exiting...") - } - mainLog.Info("Redis connection pools are ready") - - if *cli.MemProfile { - mainLog.Debug("Memory profiling active") - var err error - if memProfFile, err = os.Create("tyk.mprof"); err != nil { - panic(err) - } - defer memProfFile.Close() - } - if *cli.CPUProfile { - mainLog.Info("Cpu profiling active") - cpuProfFile, err := os.Create("tyk.prof") - if err != nil { - panic(err) - } - pprof.StartCPUProfile(cpuProfFile) - defer pprof.StopCPUProfile() - } - if *cli.BlockProfile { - mainLog.Info("Block profiling active") - runtime.SetBlockProfileRate(1) - } - if *cli.MutexProfile { - mainLog.Info("Mutex profiling active") - runtime.SetMutexProfileFraction(1) - } - - if goAgainErr != nil { - var err error - if listener, err = generateListener(config.Global().ListenPort); err != nil { - mainLog.Fatalf("Error starting listener: %s", err) - } - - listen(listener, controlListener, goAgainErr) - } else { - listen(listener, controlListener, nil) - - // Kill the parent, now that the child has started successfully. - mainLog.Debug("KILLING PARENT PROCESS") - if err := goagain.Kill(); err != nil { - mainLog.Fatalln(err) - } - } - - // Block the main goroutine awaiting signals. - if _, err := goagain.Wait(listener); err != nil { - mainLog.Fatalln(err) - } - - // Do whatever's necessary to ensure a graceful exit - // In this case, we'll simply stop listening and wait one second. - if err := listener.Close(); err != nil { - mainLog.Error("Listen handler exit: ", err) - } - - mainLog.Info("Stop signal received.") - - // stop analytics workers - if config.Global().EnableAnalytics && analytics.Store == nil { - analytics.Stop() - } - - // if using async session writes stop workers - if config.Global().UseAsyncSessionWrite { - DefaultOrgStore.Stop() - for i := range apiSpecs { - apiSpecs[i].StopSessionManagerPool() - } - - } - - // write pprof profiles - writeProfiles() - - if config.Global().UseDBAppConfigs { - mainLog.Info("Stopping heartbeat...") - DashService.StopBeating() - time.Sleep(2 * time.Second) - DashService.DeRegister() - } - - mainLog.Info("Terminating.") - - time.Sleep(time.Second) -} - -func writeProfiles() { - if *cli.BlockProfile { - f, err := os.Create("tyk.blockprof") - if err != nil { - panic(err) - } - if err = pprof.Lookup("block").WriteTo(f, 0); err != nil { - panic(err) - } - f.Close() - } - if *cli.MutexProfile { - f, err := os.Create("tyk.mutexprof") - if err != nil { - panic(err) - } - if err = pprof.Lookup("mutex").WriteTo(f, 0); err != nil { - panic(err) - } - f.Close() - } -} - -func start() { - // Set up a default org manager so we can traverse non-live paths - if !config.Global().SupressDefaultOrgStore { - mainLog.Debug("Initialising default org store") - DefaultOrgStore.Init(getGlobalStorageHandler("orgkey.", false)) - //DefaultQuotaStore.Init(getGlobalStorageHandler(CloudHandler, "orgkey.", false)) - DefaultQuotaStore.Init(getGlobalStorageHandler("orgkey.", false)) - } - - if config.Global().ControlAPIPort == 0 { - loadAPIEndpoints(mainRouter) - } - - // Start listening for reload messages - if !config.Global().SuppressRedisSignalReload { - go startPubSubLoop() - } - - if slaveOptions := config.Global().SlaveOptions; slaveOptions.UseRPC { - mainLog.Debug("Starting RPC reload listener") - RPCListener = RPCStorageHandler{ - KeyPrefix: "rpc.listener.", - SuppressRegister: true, - } - - RPCListener.Connect() - go rpcReloadLoop(slaveOptions.RPCKey) - go RPCListener.StartRPCKeepaliveWatcher() - go RPCListener.StartRPCLoopCheck(slaveOptions.RPCKey) - } - - // 1s is the minimum amount of time between hot reloads. The - // interval counts from the start of one reload to the next. - go reloadLoop(time.Tick(time.Second)) - go reloadQueueLoop() -} - -func generateListener(listenPort int) (net.Listener, error) { - listenAddress := config.Global().ListenAddress - - targetPort := listenAddress + ":" + strconv.Itoa(listenPort) - - if httpServerOptions := config.Global().HttpServerOptions; httpServerOptions.UseSSL { - mainLog.Info("--> Using SSL (https)") - - tlsConfig := tls.Config{ - GetCertificate: dummyGetCertificate, - ServerName: httpServerOptions.ServerName, - MinVersion: httpServerOptions.MinVersion, - ClientAuth: tls.NoClientCert, - InsecureSkipVerify: httpServerOptions.SSLInsecureSkipVerify, - CipherSuites: getCipherAliases(httpServerOptions.Ciphers), - } - - if httpServerOptions.EnableHttp2 { - tlsConfig.NextProtos = append(tlsConfig.NextProtos, http2.NextProtoTLS) - } - - tlsConfig.GetConfigForClient = getTLSConfigForClient(&tlsConfig, listenPort) - - return tls.Listen("tcp", targetPort, &tlsConfig) - } else if config.Global().HttpServerOptions.UseLE_SSL { - - mainLog.Info("--> Using SSL LE (https)") - - GetLEState(&LE_MANAGER) - - conf := tls.Config{ - GetCertificate: LE_MANAGER.GetCertificate, - } - conf.GetConfigForClient = getTLSConfigForClient(&conf, listenPort) - - return tls.Listen("tcp", targetPort, &conf) - } else { - mainLog.WithField("port", targetPort).Info("--> Standard listener (http)") - return net.Listen("tcp", targetPort) - } -} - -func dashboardServiceInit() { - if DashService == nil { - DashService = &HTTPDashboardHandler{} - DashService.Init() - } -} - -func handleDashboardRegistration() { - if !config.Global().UseDBAppConfigs { - return - } - - dashboardServiceInit() - - // connStr := buildConnStr("/register/node") - if err := DashService.Register(); err != nil { - dashLog.Fatal("Registration failed: ", err) - } - - go DashService.StartBeating() -} - -var drlOnce sync.Once - -func startDRL() { - switch { - case config.Global().ManagementNode: - return - case config.Global().EnableSentinelRateLimiter, config.Global().EnableRedisRollingLimiter: - return - } - mainLog.Info("Initialising distributed rate limiter") - setupDRL() - startRateLimitNotifications() -} - -// mainHandler's only purpose is to allow mainRouter to be dynamically replaced -type mainHandler struct{} - -func (_ mainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - reloadMu.Lock() - AddNewRelicInstrumentation(NewRelicApplication, mainRouter) - reloadMu.Unlock() - - // make request body to be nopCloser and re-readable before serve it through chain of middlewares - nopCloseRequestBody(r) - mainRouter.ServeHTTP(w, r) -} - -func listen(listener, controlListener net.Listener, err error) { - - readTimeout := defReadTimeout - writeTimeout := defWriteTimeout - - targetPort := config.Global().ListenAddress + ":" + strconv.Itoa(config.Global().ListenPort) - if config.Global().HttpServerOptions.ReadTimeout > 0 { - readTimeout = time.Duration(config.Global().HttpServerOptions.ReadTimeout) * time.Second - } - - if config.Global().HttpServerOptions.WriteTimeout > 0 { - writeTimeout = time.Duration(config.Global().HttpServerOptions.WriteTimeout) * time.Second - } - - if config.Global().ControlAPIPort > 0 { - loadAPIEndpoints(controlRouter) - } - - // Error not empty if handle reload when SIGUSR2 is received - if err != nil { - // Listen on a TCP or a UNIX domain socket (TCP here). - mainLog.Info("Setting up Server") - - // handle dashboard registration and nonces if available - handleDashboardRegistration() - - // Use a custom server so we can control tves - if config.Global().HttpServerOptions.OverrideDefaults { - mainRouter.SkipClean(config.Global().HttpServerOptions.SkipURLCleaning) - - mainLog.Infof("Custom gateway started (%s)", VERSION) - - mainLog.Warning("HTTP Server Overrides detected, this could destabilise long-running http-requests") - - s := &http.Server{ - Addr: targetPort, - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - Handler: mainHandler{}, - } - - if config.Global().CloseConnections { - s.SetKeepAlivesEnabled(false) - } - - // Accept connections in a new goroutine. - go s.Serve(listener) - - if controlListener != nil { - cs := &http.Server{ - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - Handler: controlRouter, - } - go cs.Serve(controlListener) - } - } else { - mainLog.Printf("Gateway started") - - s := &http.Server{Handler: mainHandler{}} - if config.Global().CloseConnections { - s.SetKeepAlivesEnabled(false) - } - - go s.Serve(listener) - - if controlListener != nil { - go http.Serve(controlListener, controlRouter) - } - } - } else { - // handle dashboard registration and nonces if available - nonce := os.Getenv("TYK_SERVICE_NONCE") - nodeID := os.Getenv("TYK_SERVICE_NODEID") - if nonce == "" || nodeID == "" { - mainLog.Warning("No nonce found, re-registering") - handleDashboardRegistration() - - } else { - NodeID = nodeID - ServiceNonce = nonce - mainLog.Info("State recovered") - - os.Setenv("TYK_SERVICE_NONCE", "") - os.Setenv("TYK_SERVICE_NODEID", "") - } - - if config.Global().UseDBAppConfigs { - dashboardServiceInit() - go DashService.StartBeating() - } - - if config.Global().HttpServerOptions.OverrideDefaults { - mainRouter.SkipClean(config.Global().HttpServerOptions.SkipURLCleaning) - - mainLog.Warning("HTTP Server Overrides detected, this could destabilise long-running http-requests") - s := &http.Server{ - Addr: ":" + targetPort, - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - Handler: mainHandler{}, - } - - if config.Global().CloseConnections { - s.SetKeepAlivesEnabled(false) - } - - mainLog.Info("Custom gateway started") - go s.Serve(listener) - - if controlListener != nil { - cs := &http.Server{ - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - Handler: controlRouter, - } - go cs.Serve(controlListener) - } - } else { - mainLog.Printf("Gateway resumed (%s)", VERSION) - - s := &http.Server{Handler: mainHandler{}} - if config.Global().CloseConnections { - s.SetKeepAlivesEnabled(false) - } - - go s.Serve(listener) - - if controlListener != nil { - mainLog.Info("Control API listener started: ", controlListener, controlRouter) - - go http.Serve(controlListener, controlRouter) - } - } - - mainLog.Info("Resuming on", listener.Addr()) - } - - // at this point NodeID is ready to use by DRL - drlOnce.Do(startDRL) - - address := config.Global().ListenAddress - if config.Global().ListenAddress == "" { - address = "(open interface)" - } - mainLog.Info("--> Listening on address: ", address) - mainLog.Info("--> Listening on port: ", config.Global().ListenPort) - mainLog.Info("--> PID: ", hostDetails.PID) - - mainRouter.HandleFunc("/"+config.Global().HealthCheckEndpointName, func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "Hello Tiki") - }) - - if !rpc.IsEmergencyMode() { - doReload() - } -} diff --git a/gateway/swagger.go b/gateway/swagger.go deleted file mode 100644 index b21936eb71f5..000000000000 --- a/gateway/swagger.go +++ /dev/null @@ -1,25 +0,0 @@ -package gateway - -import ( - "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/user" -) - -// parameterBodies -// swagger:response parameterBodies -type swaggerParameterBodies struct { - // in: body - APIStatusMessage apiStatusMessage - // in: body - APIModifyKeySuccess apiModifyKeySuccess - // in: body - NewClientRequest NewClientRequest - // in: body - APIDefinition apidef.APIDefinition - // in: body - SessionState user.SessionState - // in:body - APIAllKeys apiAllKeys - // in: body - OAuthClientToken OAuthClientToken -} diff --git a/gateway/version.go b/gateway/version.go deleted file mode 100644 index 06ce1c955d9b..000000000000 --- a/gateway/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package gateway - -const VERSION = "v2.7.0" diff --git a/gateway/gateway_test.go b/gateway_test.go similarity index 81% rename from gateway/gateway_test.go rename to gateway_test.go index 92aded8414e0..3d44a06db8a8 100644 --- a/gateway/gateway_test.go +++ b/gateway_test.go @@ -1,24 +1,29 @@ -package gateway +package main import ( + "bytes" "encoding/json" "fmt" + "io" "io/ioutil" + "math/rand" "net" "net/http" "net/http/httptest" "net/url" "os" + "path/filepath" "runtime" - + "strconv" "strings" "sync" "testing" "time" "github.com/garyburd/redigo/redis" + "github.com/gorilla/mux" "github.com/gorilla/websocket" - msgpack "gopkg.in/vmihailenco/msgpack.v2" + "gopkg.in/vmihailenco/msgpack.v2" "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/cli" @@ -28,10 +33,141 @@ import ( "github.com/TykTechnologies/tyk/user" ) +func init() { + runningTests = true +} + +var ( + // to register to, but never used + discardMuxer = mux.NewRouter() + + // to simulate time ticks for tests that do reloads + reloadTick = make(chan time.Time) + + // Used to store the test bundles: + testMiddlewarePath, _ = ioutil.TempDir("", "tyk-middleware-path") +) + const defaultListenPort = 8080 +const mockOrgID = "507f1f77bcf86cd799439011" + +var defaultTestConfig config.Config +var testServerRouter *mux.Router + +func resetTestConfig() { + config.SetGlobal(defaultTestConfig) +} + +// simulate reloads in the background, i.e. writes to +// global variables that should not be accessed in a +// racy way like the policies and api specs maps. +func reloadSimulation() { + for { + policiesMu.Lock() + policiesByID["_"] = user.Policy{} + delete(policiesByID, "_") + policiesMu.Unlock() + apisMu.Lock() + old := apiSpecs + apiSpecs = append(apiSpecs, nil) + apiSpecs = old + apisByID["_"] = nil + delete(apisByID, "_") + apisMu.Unlock() + time.Sleep(5 * time.Millisecond) + } +} func TestMain(m *testing.M) { - os.Exit(InitTestMain(m)) + testServerRouter = testHttpHandler() + testServer := &http.Server{ + Addr: testHttpListen, + Handler: testServerRouter, + ReadTimeout: 1 * time.Second, + WriteTimeout: 1 * time.Second, + MaxHeaderBytes: 1 << 20, + } + go func() { + panic(testServer.ListenAndServe()) + }() + globalConf := config.Global() + if err := config.WriteDefault("", &globalConf); err != nil { + panic(err) + } + globalConf.Storage.Database = rand.Intn(15) + var err error + globalConf.AppPath, err = ioutil.TempDir("", "tyk-test-") + if err != nil { + panic(err) + } + globalConf.EnableAnalytics = true + globalConf.AnalyticsConfig.EnableGeoIP = true + globalConf.AnalyticsConfig.GeoIPDBLocation = filepath.Join("testdata", "MaxMind-DB-test-ipv4-24.mmdb") + globalConf.EnableJSVM = true + globalConf.Monitor.EnableTriggerMonitors = true + globalConf.AnalyticsConfig.NormaliseUrls.Enabled = true + + globalConf.AllowInsecureConfigs = true + // Enable coprocess and bundle downloader: + globalConf.CoProcessOptions.EnableCoProcess = true + globalConf.EnableBundleDownloader = true + globalConf.BundleBaseURL = testHttpBundles + globalConf.MiddlewarePath = testMiddlewarePath + + purgeTicker = make(chan time.Time) + rpcPurgeTicker = make(chan time.Time) + + // force ipv4 for now, to work around the docker bug affecting + // Go 1.8 and ealier + globalConf.ListenAddress = "127.0.0.1" + + initDNSMock() + + CoProcessInit() + + afterConfSetup(&globalConf) + + defaultTestConfig = globalConf + + config.SetGlobal(globalConf) + + if err := emptyRedis(); err != nil { + panic(err) + } + + cli.Init(VERSION, confPaths) + + initialiseSystem() + // Small part of start() + loadAPIEndpoints(mainRouter) + if analytics.GeoIPDB == nil { + panic("GeoIPDB was not initialized") + } + + go startPubSubLoop() + go reloadLoop(reloadTick) + go reloadQueueLoop() + go reloadSimulation() + + exitCode := m.Run() + + os.RemoveAll(config.Global().AppPath) + os.Exit(exitCode) +} + +func emptyRedis() error { + addr := config.Global().Storage.Host + ":" + strconv.Itoa(config.Global().Storage.Port) + c, err := redis.Dial("tcp", addr) + if err != nil { + return fmt.Errorf("could not connect to redis: %v", err) + } + defer c.Close() + dbName := strconv.Itoa(config.Global().Storage.Database) + if _, err := c.Do("SELECT", dbName); err != nil { + return err + } + _, err = c.Do("FLUSHDB") + return err } func createNonThrottledSession() *user.SessionState { @@ -48,41 +184,82 @@ func createNonThrottledSession() *user.SessionState { return session } -func TestAA(t *testing.T) { - ts := StartTest() +func createStandardSession() *user.SessionState { + session := new(user.SessionState) + session.Rate = 10000 + session.Allowance = session.Rate + session.LastCheck = time.Now().Unix() + session.Per = 60 + session.Expires = -1 + session.QuotaRenewalRate = 300 // 5 minutes + session.QuotaRenews = time.Now().Unix() + 20 + session.QuotaRemaining = 10 + session.QuotaMax = -1 + return session +} - ts.Start() - defer ts.Close() +type tykErrorResponse struct { + Error string +} - BuildAndLoadAPI(func(spec *APISpec) { - spec.Proxy.ListenPath = "/" +// ProxyHandler Proxies requests through to their final destination, if they make it through the middleware chain. +func ProxyHandler(p *ReverseProxy, apiSpec *APISpec) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + baseMid := BaseMiddleware{Spec: apiSpec, Proxy: p} + handler := SuccessHandler{baseMid} + // Skip all other execution + handler.ServeHTTP(w, r) }) - - ts.Run(t, []test.TestCase{ - {Code: 200}, - }...) - } -type tykErrorResponse struct { - Error string +func createSpecTest(t testing.TB, def string) *APISpec { + spec := createDefinitionFromString(def) + tname := t.Name() + redisStore := storage.RedisCluster{KeyPrefix: tname + "-apikey."} + healthStore := storage.RedisCluster{KeyPrefix: tname + "-apihealth."} + orgStore := storage.RedisCluster{KeyPrefix: tname + "-orgKey."} + spec.Init(redisStore, redisStore, healthStore, orgStore) + return spec } func testKey(testName string, name string) string { return fmt.Sprintf("%s-%s", testName, name) } +func testReqBody(t testing.TB, body interface{}) io.Reader { + switch x := body.(type) { + case []byte: + return bytes.NewReader(x) + case string: + return strings.NewReader(x) + case io.Reader: + return x + case nil: + return nil + default: // JSON objects (structs) + bs, err := json.Marshal(x) + if err != nil { + t.Fatal(err) + } + return bytes.NewReader(bs) + } +} + +func testReq(t testing.TB, method, urlStr string, body interface{}) *http.Request { + return httptest.NewRequest(method, urlStr, testReqBody(t, body)) +} + func TestParambasedAuth(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Auth.UseParam = true spec.UseKeylessAccess = false spec.Proxy.ListenPath = "/" }) - key := CreateSession(func(s *user.SessionState) { + key := createSession(func(s *user.SessionState) { s.AccessRights = map[string]user.AccessDefinition{"test": { APIID: "test", Versions: []string{"v1"}, }} @@ -106,12 +283,12 @@ func TestParambasedAuth(t *testing.T) { } func TestStripPathWithURLRewrite(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - defer ResetTestConfig() + defer resetTestConfig() t.Run("rewrite URL containing listen path", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { version := spec.VersionData.Versions["v1"] json.Unmarshal([]byte(`{ "use_extended_paths": true, @@ -137,16 +314,16 @@ func TestStripPathWithURLRewrite(t *testing.T) { } func TestSkipTargetPassEscapingOff(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - defer ResetTestConfig() + defer resetTestConfig() t.Run("With escaping, default", func(t *testing.T) { globalConf := config.Global() globalConf.HttpServerOptions.SkipTargetPathEscaping = false config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) @@ -161,7 +338,7 @@ func TestSkipTargetPassEscapingOff(t *testing.T) { globalConf.HttpServerOptions.SkipTargetPathEscaping = true config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) @@ -176,7 +353,7 @@ func TestSkipTargetPassEscapingOff(t *testing.T) { globalConf.HttpServerOptions.SkipTargetPathEscaping = false config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.StripListenPath = false spec.Proxy.ListenPath = "/listen_me" spec.Proxy.TargetURL = testHttpAny + "/sent_to_me" @@ -193,7 +370,7 @@ func TestSkipTargetPassEscapingOff(t *testing.T) { globalConf.HttpServerOptions.SkipTargetPathEscaping = true config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.StripListenPath = false spec.Proxy.ListenPath = "/listen_me" spec.Proxy.TargetURL = testHttpAny + "/sent_to_me" @@ -210,7 +387,7 @@ func TestSkipTargetPassEscapingOff(t *testing.T) { globalConf.HttpServerOptions.SkipTargetPathEscaping = false config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.StripListenPath = true spec.Proxy.ListenPath = "/listen_me" spec.Proxy.TargetURL = testHttpAny + "/sent_to_me" @@ -227,7 +404,7 @@ func TestSkipTargetPassEscapingOff(t *testing.T) { globalConf.HttpServerOptions.SkipTargetPathEscaping = true config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.StripListenPath = true spec.Proxy.ListenPath = "/listen_me" spec.Proxy.TargetURL = testHttpAny + "/sent_to_me" @@ -245,7 +422,7 @@ func TestSkipTargetPassEscapingOffWithSkipURLCleaningTrue(t *testing.T) { globalConf.HttpServerOptions.OverrideDefaults = true globalConf.HttpServerOptions.SkipURLCleaning = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() // here we expect that test gateway will be sending to test upstream requests with not cleaned URI // so test upstream shouldn't reply with 301 and process them as well @@ -254,7 +431,7 @@ func TestSkipTargetPassEscapingOffWithSkipURLCleaningTrue(t *testing.T) { testServerRouter.SkipClean(true) defer testServerRouter.SkipClean(prevSkipClean) - ts := StartTest() + ts := newTykTestServer() defer ts.Close() t.Run("With escaping, default", func(t *testing.T) { @@ -262,7 +439,7 @@ func TestSkipTargetPassEscapingOffWithSkipURLCleaningTrue(t *testing.T) { globalConf.HttpServerOptions.SkipTargetPathEscaping = false config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) @@ -276,7 +453,7 @@ func TestSkipTargetPassEscapingOffWithSkipURLCleaningTrue(t *testing.T) { globalConf.HttpServerOptions.SkipTargetPathEscaping = true config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) @@ -290,7 +467,7 @@ func TestSkipTargetPassEscapingOffWithSkipURLCleaningTrue(t *testing.T) { globalConf.HttpServerOptions.SkipTargetPathEscaping = false config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.StripListenPath = false spec.Proxy.ListenPath = "/listen_me" spec.Proxy.TargetURL = testHttpAny + "/sent_to_me" @@ -308,7 +485,7 @@ func TestSkipTargetPassEscapingOffWithSkipURLCleaningTrue(t *testing.T) { globalConf.HttpServerOptions.SkipTargetPathEscaping = true config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.StripListenPath = false spec.Proxy.ListenPath = "/listen_me" spec.Proxy.TargetURL = testHttpAny + "/sent_to_me" @@ -326,7 +503,7 @@ func TestSkipTargetPassEscapingOffWithSkipURLCleaningTrue(t *testing.T) { globalConf.HttpServerOptions.SkipTargetPathEscaping = false config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.StripListenPath = true spec.Proxy.ListenPath = "/listen_me" spec.Proxy.TargetURL = testHttpAny + "/sent_to_me" @@ -344,7 +521,7 @@ func TestSkipTargetPassEscapingOffWithSkipURLCleaningTrue(t *testing.T) { globalConf.HttpServerOptions.SkipTargetPathEscaping = true config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.StripListenPath = true spec.Proxy.ListenPath = "/listen_me" spec.Proxy.TargetURL = testHttpAny + "/sent_to_me" @@ -360,7 +537,7 @@ func TestSkipTargetPassEscapingOffWithSkipURLCleaningTrue(t *testing.T) { } func TestQuota(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() var keyID string @@ -383,7 +560,7 @@ func TestQuota(t *testing.T) { })) defer webhook.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.Proxy.ListenPath = "/" @@ -421,7 +598,7 @@ func TestQuota(t *testing.T) { }) // Create session with Quota = 2 - keyID = CreateSession(func(s *user.SessionState) { + keyID = createSession(func(s *user.SessionState) { s.QuotaMax = 2 }) @@ -443,12 +620,12 @@ func TestQuota(t *testing.T) { } func TestAnalytics(t *testing.T) { - ts := StartTest(TestConfig{ - Delay: 20 * time.Millisecond, + ts := newTykTestServer(tykTestServerConfig{ + delay: 20 * time.Millisecond, }) defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.Proxy.ListenPath = "/" }) @@ -480,7 +657,7 @@ func TestAnalytics(t *testing.T) { }) t.Run("Log success", func(t *testing.T) { - key := CreateSession() + key := createSession() authHeaders := map[string]string{ "authorization": key, @@ -506,17 +683,17 @@ func TestAnalytics(t *testing.T) { }) t.Run("Detailed analytics", func(t *testing.T) { - defer ResetTestConfig() + defer resetTestConfig() globalConf := config.Global() globalConf.AnalyticsConfig.EnableDetailedRecording = true config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.Proxy.ListenPath = "/" }) - key := CreateSession() + key := createSession() authHeaders := map[string]string{ "authorization": key, @@ -550,12 +727,12 @@ func TestAnalytics(t *testing.T) { }) t.Run("Detailed analytics with cache", func(t *testing.T) { - defer ResetTestConfig() + defer resetTestConfig() globalConf := config.Global() globalConf.AnalyticsConfig.EnableDetailedRecording = true config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.Proxy.ListenPath = "/" spec.CacheOptions = apidef.CacheOptions{ @@ -565,7 +742,7 @@ func TestAnalytics(t *testing.T) { } }) - key := CreateSession() + key := createSession() authHeaders := map[string]string{ "authorization": key, @@ -581,7 +758,7 @@ func TestAnalytics(t *testing.T) { results := analytics.Store.GetAndDeleteSet(analyticsKeyName) if len(results) != 2 { - t.Fatal("Should return 1 record: ", len(results)) + t.Error("Should return 1 record: ", len(results)) } // Take second cached request @@ -603,12 +780,12 @@ func TestAnalytics(t *testing.T) { func TestListener(t *testing.T) { // Trick to get spec JSON, without loading API - // Specs will be reseted when we do `StartTest` - specs := BuildAndLoadAPI() + // Specs will be reseted when we do `newTykTestServer` + specs := buildAndLoadAPI() specJSON, _ := json.Marshal(specs[0].APIDefinition) listJSON := fmt.Sprintf("[%s]", string(specJSON)) - ts := StartTest() + ts := newTykTestServer() defer ts.Close() tests := []test.TestCase{ @@ -639,7 +816,7 @@ func TestListener(t *testing.T) { // have all needed reload ticks ready go func() { for i := 0; i < 4*4; i++ { - ReloadTick <- time.Time{} + reloadTick <- time.Time{} } }() @@ -648,7 +825,7 @@ func TestListener(t *testing.T) { // Admin api located on separate port func TestControlListener(t *testing.T) { - ts := StartTest(TestConfig{ + ts := newTykTestServer(tykTestServerConfig{ sepatateControlAPI: true, }) defer ts.Close() @@ -672,7 +849,7 @@ func TestHttpPprof(t *testing.T) { old := cli.HTTPProfile defer func() { cli.HTTPProfile = old }() - ts := StartTest(TestConfig{ + ts := newTykTestServer(tykTestServerConfig{ sepatateControlAPI: true, }) @@ -694,7 +871,7 @@ func TestHttpPprof(t *testing.T) { } func TestManagementNodeRedisEvents(t *testing.T) { - defer ResetTestConfig() + defer resetTestConfig() globalConf := config.Global() globalConf.ManagementNode = false config.SetGlobal(globalConf) @@ -716,10 +893,10 @@ func TestManagementNodeRedisEvents(t *testing.T) { } func TestListenPathTykPrefix(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/tyk-foo/" }) @@ -730,22 +907,22 @@ func TestListenPathTykPrefix(t *testing.T) { } func TestReloadGoroutineLeakWithAsyncWrites(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() globalConf := config.Global() globalConf.UseAsyncSessionWrite = true globalConf.EnableJSVM = false config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - specs := BuildAndLoadAPI(func(spec *APISpec) { + specs := buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) before := runtime.NumGoroutine() - LoadAPI(specs...) // just doing doReload() doesn't load anything as BuildAndLoadAPI cleans up folder with API specs + loadAPI(specs...) // just doing doReload() doesn't load anything as buildAndLoadAPI cleans up folder with API specs time.Sleep(100 * time.Millisecond) @@ -757,17 +934,17 @@ func TestReloadGoroutineLeakWithAsyncWrites(t *testing.T) { } func TestReloadGoroutineLeakWithCircuitBreaker(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() globalConf := config.Global() globalConf.EnableJSVM = false config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - specs := BuildAndLoadAPI(func(spec *APISpec) { + specs := buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" - UpdateAPIVersion(spec, "v1", func(version *apidef.VersionInfo) { + updateAPIVersion(spec, "v1", func(version *apidef.VersionInfo) { version.ExtendedPaths = apidef.ExtendedPathsSet{ CircuitBreaker: []apidef.CircuitBreakerMeta{ { @@ -784,7 +961,7 @@ func TestReloadGoroutineLeakWithCircuitBreaker(t *testing.T) { before := runtime.NumGoroutine() - LoadAPI(specs...) // just doing doReload() doesn't load anything as BuildAndLoadAPI cleans up folder with API specs + loadAPI(specs...) // just doing doReload() doesn't load anything as buildAndLoadAPI cleans up folder with API specs time.Sleep(100 * time.Millisecond) @@ -796,10 +973,10 @@ func TestReloadGoroutineLeakWithCircuitBreaker(t *testing.T) { } func TestProxyUserAgent(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) @@ -820,9 +997,9 @@ func TestSkipUrlCleaning(t *testing.T) { globalConf.HttpServerOptions.OverrideDefaults = true globalConf.HttpServerOptions.SkipURLCleaning = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -830,7 +1007,7 @@ func TestSkipUrlCleaning(t *testing.T) { })) defer s.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.TargetURL = s.URL }) @@ -841,10 +1018,10 @@ func TestSkipUrlCleaning(t *testing.T) { } func TestMultiTargetProxy(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.VersionData.NotVersioned = false spec.VersionData.Versions = map[string]apidef.VersionInfo{ "vdef": {Name: "vdef"}, @@ -875,9 +1052,9 @@ func TestCustomDomain(t *testing.T) { globalConf := config.Global() globalConf.EnableCustomDomains = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - BuildAndLoadAPI( + buildAndLoadAPI( func(spec *APISpec) { spec.Domain = "localhost" }, @@ -888,7 +1065,7 @@ func TestCustomDomain(t *testing.T) { }) t.Run("Without custom domain support", func(t *testing.T) { - BuildAndLoadAPI( + buildAndLoadAPI( func(spec *APISpec) { spec.Domain = "localhost" }, @@ -900,7 +1077,7 @@ func TestCustomDomain(t *testing.T) { } func TestHelloHealthcheck(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() t.Run("Without APIs", func(t *testing.T) { @@ -910,7 +1087,7 @@ func TestHelloHealthcheck(t *testing.T) { }) t.Run("With APIs", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/sample" }) @@ -922,12 +1099,12 @@ func TestHelloHealthcheck(t *testing.T) { } func TestCacheAllSafeRequests(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() cache := storage.RedisCluster{KeyPrefix: "cache-"} defer cache.DeleteScanMatch("*") - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.CacheOptions = apidef.CacheOptions{ CacheTimeout: 120, EnableCache: true, @@ -947,45 +1124,8 @@ func TestCacheAllSafeRequests(t *testing.T) { }...) } -func TestCachePostRequest(t *testing.T) { - ts := StartTest() - defer ts.Close() - cache := storage.RedisCluster{KeyPrefix: "cache-"} - defer cache.DeleteScanMatch("*") - - BuildAndLoadAPI(func(spec *APISpec) { - spec.CacheOptions = apidef.CacheOptions{ - CacheTimeout: 120, - EnableCache: true, - CacheAllSafeRequests: false, - } - - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { - json.Unmarshal([]byte(`[{ - "method":"POST", - "path":"/", - "cache_key_regex":"\"id\":[^,]*" - } - ]`), &v.ExtendedPaths.AdvanceCacheConfig) - }) - - spec.Proxy.ListenPath = "/" - }) - - headerCache := map[string]string{"x-tyk-cached-response": "1"} - - ts.Run(t, []test.TestCase{ - {Method: "POST", Path: "/", Data: "{\"id\":\"1\",\"name\":\"test\"}", HeadersNotMatch: headerCache, Delay: 10 * time.Millisecond}, - {Method: "POST", Path: "/", Data: "{\"id\":\"1\",\"name\":\"test\"}", HeadersMatch: headerCache, Delay: 10 * time.Millisecond}, - {Method: "POST", Path: "/", Data: "{\"id\":\"2\",\"name\":\"test\"}", HeadersNotMatch: headerCache, Delay: 10 * time.Millisecond}, - // if regex match returns nil, then request body is ignored while generating cache key - {Method: "POST", Path: "/", Data: "{\"name\":\"test\"}", HeadersNotMatch: headerCache, Delay: 10 * time.Millisecond}, - {Method: "POST", Path: "/", Data: "{\"name\":\"test2\"}", HeadersMatch: headerCache, Delay: 10 * time.Millisecond}, - }...) -} - func TestCacheEtag(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() cache := storage.RedisCluster{KeyPrefix: "cache-"} defer cache.DeleteScanMatch("*") @@ -995,7 +1135,7 @@ func TestCacheEtag(t *testing.T) { w.Write([]byte("body")) })) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.CacheOptions = apidef.CacheOptions{ CacheTimeout: 120, EnableCache: true, @@ -1017,42 +1157,17 @@ func TestCacheEtag(t *testing.T) { }...) } -// func TestWebsocketsUpstreamUpgradeRequest(t *testing.T) { -// // setup spec and do test HTTP upgrade-request -// globalConf := config.Global() -// globalConf.HttpServerOptions.EnableWebSockets = true -// config.SetGlobal(globalConf) -// defer ResetTestConfig() - -// ts := StartTest() -// defer ts.Close() - -// BuildAndLoadAPI(func(spec *APISpec) { -// spec.Proxy.ListenPath = "/" -// }) - -// ts.Run(t, test.TestCase{ -// Path: "/ws", -// Headers: map[string]string{ -// "Connection": "Upgrade", -// "Upgrade": "websocket", -// "Sec-Websocket-Version": "13", -// "Sec-Websocket-Key": "abc", -// }, -// Code: http.StatusSwitchingProtocols, -// }) -// } func TestWebsocketsSeveralOpenClose(t *testing.T) { globalConf := config.Global() globalConf.HttpServerOptions.EnableWebSockets = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) @@ -1145,12 +1260,12 @@ func TestWebsocketsAndHTTPEndpointMatch(t *testing.T) { globalConf := config.Global() globalConf.HttpServerOptions.EnableWebSockets = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" }) @@ -1276,9 +1391,9 @@ func createTestUptream(t *testing.T, allowedConns int, readsPerConn int) net.Lis } func TestKeepAliveConns(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - defer ResetTestConfig() + defer resetTestConfig() t.Run("Should use same connection", func(t *testing.T) { // set keep alive option @@ -1290,7 +1405,7 @@ func TestKeepAliveConns(t *testing.T) { upstream := createTestUptream(t, 1, 3) defer upstream.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.TargetURL = "http://" + upstream.Addr().String() }) @@ -1311,7 +1426,7 @@ func TestKeepAliveConns(t *testing.T) { upstream := createTestUptream(t, 3, 1) defer upstream.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.TargetURL = "http://" + upstream.Addr().String() }) @@ -1333,7 +1448,7 @@ func TestKeepAliveConns(t *testing.T) { upstream := createTestUptream(t, 2, 2) defer upstream.Close() - spec := BuildAndLoadAPI(func(spec *APISpec) { + spec := buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.Proxy.TargetURL = "http://" + upstream.Addr().String() })[0] @@ -1361,13 +1476,12 @@ func TestRateLimitForAPIAndRateLimitAndQuotaCheck(t *testing.T) { globalCfg.EnableSentinelRateLimiter = true config.SetGlobal(globalCfg) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { - spec.APIID += "_" + time.Now().String() + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.DisableRateLimit = false spec.OrgID = "default" @@ -1378,13 +1492,13 @@ func TestRateLimitForAPIAndRateLimitAndQuotaCheck(t *testing.T) { spec.Proxy.ListenPath = "/" }) - sess1token := CreateSession(func(s *user.SessionState) { + sess1token := createSession(func(s *user.SessionState) { s.Rate = 1 s.Per = 60 }) defer FallbackKeySesionManager.RemoveSession(sess1token, false) - sess2token := CreateSession(func(s *user.SessionState) { + sess2token := createSession(func(s *user.SessionState) { s.Rate = 1 s.Per = 60 }) @@ -1399,15 +1513,15 @@ func TestRateLimitForAPIAndRateLimitAndQuotaCheck(t *testing.T) { } func TestTracing(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() prepareStorage() - spec := BuildAPI(func(spec *APISpec) { + spec := buildAPI(func(spec *APISpec) { spec.UseKeylessAccess = false })[0] - keyID := CreateSession(func(s *user.SessionState) {}) + keyID := createSession(func(s *user.SessionState) {}) authHeaders := map[string][]string{"Authorization": {keyID}} ts.Run(t, []test.TestCase{ @@ -1422,15 +1536,15 @@ func TestTracing(t *testing.T) { } func TestBrokenClients(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - defer ResetTestConfig() + defer resetTestConfig() globalConf := config.Global() globalConf.ProxyDefaultTimeout = 1 config.SetGlobal(globalConf) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = true spec.Proxy.ListenPath = "/" spec.EnforcedTimeoutEnabled = true diff --git a/goplugin/goplugin.go b/goplugin/goplugin.go deleted file mode 100644 index 3fd15681d9b3..000000000000 --- a/goplugin/goplugin.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build goplugin - -package goplugin - -import ( - "errors" - "net/http" - "plugin" -) - -func GetHandler(path string, symbol string) (http.HandlerFunc, error) { - // try to load plugin - loadedPlugin, err := plugin.Open(path) - if err != nil { - return nil, err - } - - // try to lookup function symbol - funcSymbol, err := loadedPlugin.Lookup(symbol) - if err != nil { - return nil, err - } - - // try to cast symbol to real func - pluginHandler, ok := funcSymbol.(func(http.ResponseWriter, *http.Request)) - if !ok { - return nil, errors.New("could not cast function symbol to http.HandlerFunc") - } - - return pluginHandler, nil -} diff --git a/goplugin/mw_go_plugin_test.go b/goplugin/mw_go_plugin_test.go deleted file mode 100644 index 94675f9b38d7..000000000000 --- a/goplugin/mw_go_plugin_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build goplugin - -package goplugin_test - -import ( - "net/http" - "os" - "testing" - "time" - - "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/gateway" - "github.com/TykTechnologies/tyk/test" -) - -func TestMain(m *testing.M) { - os.Exit(gateway.InitTestMain(m)) -} - -// TestGoPluginMWs tests all possible Go-plugin MWs ("pre", "auth_check", "post_key_auth" and "post") -// Please see ./test/goplugins/test_goplugins.go for plugin implementation details -func TestGoPluginMWs(t *testing.T) { - ts := gateway.StartTest() - defer ts.Close() - - gateway.BuildAndLoadAPI(func(spec *gateway.APISpec) { - spec.APIID = "plugin_api" - spec.Proxy.ListenPath = "/goplugin" - spec.UseKeylessAccess = false - spec.UseStandardAuth = false - spec.UseGoPluginAuth = true - spec.CustomMiddleware = apidef.MiddlewareSection{ - Driver: apidef.GoPluginDriver, - Pre: []apidef.MiddlewareDefinition{ - { - Name: "MyPluginPre", - Path: "../test/goplugins/goplugins.so", - }, - }, - AuthCheck: apidef.MiddlewareDefinition{ - Name: "MyPluginAuthCheck", - Path: "../test/goplugins/goplugins.so", - }, - PostKeyAuth: []apidef.MiddlewareDefinition{ - { - Name: "MyPluginPostKeyAuth", - Path: "../test/goplugins/goplugins.so", - }, - }, - Post: []apidef.MiddlewareDefinition{ - { - Name: "MyPluginPost", - Path: "../test/goplugins/goplugins.so", - }, - }, - } - }) - - time.Sleep(1 * time.Second) - - t.Run("Run Go-plugin auth failed", func(t *testing.T) { - ts.Run(t, []test.TestCase{ - { - Path: "/goplugin/plugin_hit", - Headers: map[string]string{"Authorization": "invalid_token"}, - HeadersMatch: map[string]string{ - "X-Auth-Result": "failed", - }, - Code: http.StatusForbidden, - }, - }...) - }) - - t.Run("Run Go-plugin all middle-wares", func(t *testing.T) { - ts.Run(t, []test.TestCase{ - { - Path: "/goplugin/plugin_hit", - Headers: map[string]string{"Authorization": "abc"}, - Code: http.StatusOK, - HeadersMatch: map[string]string{ - "X-Initial-URI": "/goplugin/plugin_hit", - "X-Auth-Result": "OK", - "X-Session-Alias": "abc-session", - }, - BodyMatch: `"message":"post message"`, - }, - { - Method: "DELETE", - Path: "/tyk/keys/abc", - AdminAuth: true, - Code: http.StatusOK, - BodyMatch: `"action":"deleted"`}, - }...) - }) -} diff --git a/goplugin/no_goplugin.go b/goplugin/no_goplugin.go deleted file mode 100644 index 8a0ffb273e79..000000000000 --- a/goplugin/no_goplugin.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !goplugin - -package goplugin - -import ( - "fmt" - "net/http" -) - -func GetHandler(path string, symbol string) (http.HandlerFunc, error) { - return nil, fmt.Errorf("goplugin.GetHandler is disabled, please disable build flag 'nogoplugin'") -} diff --git a/gateway/handler_error.go b/handler_error.go similarity index 69% rename from gateway/handler_error.go rename to handler_error.go index c8a351d46857..91d842643ea0 100644 --- a/gateway/handler_error.go +++ b/handler_error.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" @@ -30,58 +30,46 @@ type ErrorHandler struct { } // HandleError is the actual error handler and will store the error details in analytics if analytics processing is enabled. -func (e *ErrorHandler) HandleError(w http.ResponseWriter, r *http.Request, errMsg string, errCode int, writeResponse bool) { +func (e *ErrorHandler) HandleError(w http.ResponseWriter, r *http.Request, errMsg string, errCode int) { defer e.Base().UpdateRequestSession(r) - if writeResponse { - var templateExtension string - var contentType string - - switch r.Header.Get("Content-Type") { - case "application/xml": - templateExtension = "xml" - contentType = "application/xml" - default: - templateExtension = "json" - contentType = "application/json" - } - - w.Header().Set("Content-Type", contentType) + var templateExtension string + var contentType string - templateName := "error_" + strconv.Itoa(errCode) + "." + templateExtension + switch r.Header.Get("Content-Type") { + case "application/xml": + templateExtension = "xml" + contentType = "application/xml" + default: + templateExtension = "json" + contentType = "application/json" + } - // Try to use an error template that matches the HTTP error code and the content type: 500.json, 400.xml, etc. - tmpl := templates.Lookup(templateName) + w.Header().Set("Content-Type", contentType) - // Fallback to a generic error template, but match the content type: error.json, error.xml, etc. - if tmpl == nil { - templateName = defaultTemplateName + "." + templateExtension - tmpl = templates.Lookup(templateName) - } + templateName := "error_" + strconv.Itoa(errCode) + "." + templateExtension - // If no template is available for this content type, fallback to "error.json". - if tmpl == nil { - templateName = defaultTemplateName + "." + defaultTemplateFormat - tmpl = templates.Lookup(templateName) - w.Header().Set("Content-Type", defaultContentType) - } - - //If the config option is not set or is false, add the header - if !e.Spec.GlobalConfig.HideGeneratorHeader { - w.Header().Add("X-Generator", "tyk.io") - } + // Try to use an error template that matches the HTTP error code and the content type: 500.json, 400.xml, etc. + tmpl := templates.Lookup(templateName) - // Close connections - if e.Spec.GlobalConfig.CloseConnections { - w.Header().Add("Connection", "close") - } + // Fallback to a generic error template, but match the content type: error.json, error.xml, etc. + if tmpl == nil { + templateName = defaultTemplateName + "." + templateExtension + tmpl = templates.Lookup(templateName) + } - // Need to return the correct error code! - w.WriteHeader(errCode) - apiError := APIError{errMsg} - tmpl.Execute(w, &apiError) + // If no template is available for this content type, fallback to "error.json". + if tmpl == nil { + templateName = defaultTemplateName + "." + defaultTemplateFormat + tmpl = templates.Lookup(templateName) + w.Header().Set("Content-Type", defaultContentType) } + // Need to return the correct error code! + w.WriteHeader(errCode) + apiError := APIError{errMsg} + tmpl.Execute(w, &apiError) + if memProfFile != nil { pprof.WriteHeapProfile(memProfFile) } @@ -204,6 +192,16 @@ func (e *ErrorHandler) HandleError(w http.ResponseWriter, r *http.Request, errMs // Report in health check reportHealthValue(e.Spec, BlockedRequestLog, "-1") + //If the config option is not set or is false, add the header + if !e.Spec.GlobalConfig.HideGeneratorHeader { + w.Header().Add("X-Generator", "tyk.io") + } + + // Close connections + if e.Spec.GlobalConfig.CloseConnections { + w.Header().Add("Connection", "close") + } + if memProfFile != nil { pprof.WriteHeapProfile(memProfFile) } diff --git a/gateway/handler_success.go b/handler_success.go similarity index 94% rename from gateway/handler_success.go rename to handler_success.go index f8ea71973487..b1046e9b6e9b 100644 --- a/gateway/handler_success.go +++ b/handler_success.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" @@ -11,12 +11,35 @@ import ( "strings" "time" - cache "github.com/pmylund/go-cache" - "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/ctx" "github.com/TykTechnologies/tyk/request" "github.com/TykTechnologies/tyk/user" + cache "github.com/pmylund/go-cache" +) + +// Enums for keys to be stored in a session context - this is how gorilla expects +// these to be implemented and is lifted pretty much from docs +const ( + SessionData = iota + UpdateSession + AuthToken + HashedAuthToken + VersionData + VersionDefault + OrgSessionContext + ContextData + RetainHost + TrackThisEndpoint + DoNotTrackThisEndpoint + UrlRewritePath + RequestMethod + OrigRequestURL + LoopLevel + LoopLevelLimit + ThrottleLevel + ThrottleLevelLimit + Trace + CheckLoopLimits ) const ( @@ -265,7 +288,7 @@ func recordDetail(r *http.Request, globalConf config.Config) bool { } // We are, so get session data - ses := r.Context().Value(ctx.OrgSessionContext) + ses := r.Context().Value(OrgSessionContext) if ses == nil { // no session found, use global config return globalConf.AnalyticsConfig.EnableDetailedRecording diff --git a/gateway/handler_websocket.go b/handler_websocket.go similarity index 99% rename from gateway/handler_websocket.go rename to handler_websocket.go index 135de878716e..e2d79554cc37 100644 --- a/gateway/handler_websocket.go +++ b/handler_websocket.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "context" diff --git a/gateway/testutil.go b/helpers_test.go similarity index 56% rename from gateway/testutil.go rename to helpers_test.go index 4b8fc4217c5b..4a04abd5f6fa 100644 --- a/gateway/testutil.go +++ b/helpers_test.go @@ -1,38 +1,34 @@ -package gateway +package main import ( "archive/zip" "bytes" + "context" "compress/gzip" "crypto/tls" "encoding/binary" "encoding/json" "fmt" - "net/http/httptest" - - "golang.org/x/net/context" - "io" "io/ioutil" "math/rand" "net" "net/http" + "net/url" "os" "path/filepath" - "runtime" + "regexp" "strconv" "strings" "sync" "testing" "time" - "github.com/TykTechnologies/tyk/cli" - jwt "github.com/dgrijalva/jwt-go" - "github.com/garyburd/redigo/redis" "github.com/gorilla/mux" "github.com/gorilla/websocket" - uuid "github.com/satori/go.uuid" + "github.com/miekg/dns" + "github.com/satori/go.uuid" "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" @@ -41,156 +37,11 @@ import ( "github.com/TykTechnologies/tyk/user" ) -var ( - // to register to, but never used - discardMuxer = mux.NewRouter() - - // to simulate time ticks for tests that do reloads - ReloadTick = make(chan time.Time) - - // Used to store the test bundles: - testMiddlewarePath, _ = ioutil.TempDir("", "tyk-middleware-path") - - mockHandle *test.DnsMockHandle - - testServerRouter *mux.Router - defaultTestConfig config.Config - - EnableTestDNSMock = true -) - -func InitTestMain(m *testing.M, genConf ...func(globalConf *config.Config)) int { - runningTests = true - testServerRouter = testHttpHandler() - testServer := &http.Server{ - Addr: testHttpListen, - Handler: testServerRouter, - ReadTimeout: 1 * time.Second, - WriteTimeout: 1 * time.Second, - MaxHeaderBytes: 1 << 20, - } - - globalConf := config.Global() - if err := config.WriteDefault("", &globalConf); err != nil { - panic(err) - } - globalConf.Storage.Database = rand.Intn(15) - var err error - globalConf.AppPath, err = ioutil.TempDir("", "tyk-test-") - if err != nil { - panic(err) - } - globalConf.EnableAnalytics = true - globalConf.AnalyticsConfig.EnableGeoIP = true - _, b, _, _ := runtime.Caller(0) - gatewayPath := filepath.Dir(b) - rootPath := filepath.Dir(gatewayPath) - globalConf.AnalyticsConfig.GeoIPDBLocation = filepath.Join(rootPath, "testdata", "MaxMind-DB-test-ipv4-24.mmdb") - globalConf.EnableJSVM = true - globalConf.Monitor.EnableTriggerMonitors = true - globalConf.AnalyticsConfig.NormaliseUrls.Enabled = true - globalConf.AllowInsecureConfigs = true - // Enable coprocess and bundle downloader: - globalConf.CoProcessOptions.EnableCoProcess = true - globalConf.CoProcessOptions.PythonPathPrefix = "../../" - globalConf.EnableBundleDownloader = true - globalConf.BundleBaseURL = testHttpBundles - globalConf.MiddlewarePath = testMiddlewarePath - purgeTicker = make(chan time.Time) - rpcPurgeTicker = make(chan time.Time) - // force ipv4 for now, to work around the docker bug affecting - // Go 1.8 and ealier - globalConf.ListenAddress = "127.0.0.1" - if len(genConf) > 0 { - genConf[0](&globalConf) - } - - if EnableTestDNSMock { - mockHandle, err = test.InitDNSMock(test.DomainsToAddresses, nil) - if err != nil { - panic(err) - } - - defer mockHandle.ShutdownDnsMock() - } - - go func() { - err := testServer.ListenAndServe() - if err != nil { - log.Warn("testServer.ListenAndServe() err: ", err.Error()) - } - }() - - defer testServer.Shutdown(context.Background()) - - CoProcessInit() - afterConfSetup(&globalConf) - defaultTestConfig = globalConf - config.SetGlobal(globalConf) - if err := emptyRedis(); err != nil { - panic(err) - } - cli.Init(VERSION, confPaths) - initialiseSystem() - // Small part of start() - loadAPIEndpoints(mainRouter) - if analytics.GeoIPDB == nil { - panic("GeoIPDB was not initialized") - } - - go startPubSubLoop() - go reloadLoop(ReloadTick) - go reloadQueueLoop() - go reloadSimulation() - exitCode := m.Run() - os.RemoveAll(config.Global().AppPath) - return exitCode -} - -func ResetTestConfig() { - config.SetGlobal(defaultTestConfig) -} - -func emptyRedis() error { - addr := config.Global().Storage.Host + ":" + strconv.Itoa(config.Global().Storage.Port) - c, err := redis.Dial("tcp", addr) - if err != nil { - return fmt.Errorf("could not connect to redis: %v", err) - } - defer c.Close() - dbName := strconv.Itoa(config.Global().Storage.Database) - if _, err := c.Do("SELECT", dbName); err != nil { - return err - } - _, err = c.Do("FLUSHDB") - return err -} - -// simulate reloads in the background, i.e. writes to -// global variables that should not be accessed in a -// racy way like the policies and api specs maps. -func reloadSimulation() { - for { - policiesMu.Lock() - policiesByID["_"] = user.Policy{} - delete(policiesByID, "_") - policiesMu.Unlock() - apisMu.Lock() - old := apiSpecs - apiSpecs = append(apiSpecs, nil) - apiSpecs = old - apisByID["_"] = nil - delete(apisByID, "_") - apisMu.Unlock() - time.Sleep(5 * time.Millisecond) - } -} - // map[bundleName]map[fileName]fileContent var testBundles = map[string]map[string]string{} var testBundleMu sync.Mutex -func RegisterBundle(name string, files map[string]string) string { +func registerBundle(name string, files map[string]string) string { testBundleMu.Lock() defer testBundleMu.Unlock() @@ -222,7 +73,7 @@ func bundleHandleFunc(w http.ResponseWriter, r *http.Request) { z.Close() } -type TestHttpResponse struct { +type testHttpResponse struct { Method string URI string Url string @@ -231,16 +82,6 @@ type TestHttpResponse struct { Form map[string]string } -// ProxyHandler Proxies requests through to their final destination, if they make it through the middleware chain. -func ProxyHandler(p *ReverseProxy, apiSpec *APISpec) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - baseMid := BaseMiddleware{Spec: apiSpec, Proxy: p} - handler := SuccessHandler{baseMid} - // Skip all other execution - handler.ServeHTTP(w, r) - }) -} - const ( // We need a static port so that the urls can be used in static // test data, and to prevent the requests from being randomized @@ -249,7 +90,7 @@ const ( // Accepts any http requests on /, only allows GET on /get, etc. // All return a JSON with request info. testHttpAny = "http://" + testHttpListen - TestHttpGet = testHttpAny + "/get" + testHttpGet = testHttpAny + "/get" testHttpPost = testHttpAny + "/post" testHttpJWK = testHttpAny + "/jwk.json" testHttpBundles = testHttpAny + "/bundles/" @@ -258,7 +99,6 @@ const ( // testing TCP and HTTP failures. testHttpFailure = "127.0.0.1:16501" testHttpFailureAny = "http://" + testHttpFailure - MockOrgID = "507f1f77bcf86cd799439011" ) func testHttpHandler() *mux.Router { @@ -294,10 +134,10 @@ func testHttpHandler() *mux.Router { return } r.URL.Opaque = r.URL.RawPath - w.Header().Set("X-Tyk-Test", "1") + w.Header().Set("X-Tyk-Mock", "1") body, _ := ioutil.ReadAll(r.Body) - err := json.NewEncoder(w).Encode(TestHttpResponse{ + err := json.NewEncoder(w).Encode(testHttpResponse{ Method: r.Method, URI: r.RequestURI, Url: r.URL.String(), @@ -360,10 +200,10 @@ func withAuth(r *http.Request) *http.Request { return r } -// Deprecated: Use Test.CreateSession instead. -func CreateSession(sGen ...func(s *user.SessionState)) string { +// TODO: replace with /tyk/keys/create call +func createSession(sGen ...func(s *user.SessionState)) string { key := generateToken("", "") - session := CreateStandardSession() + session := createStandardSession() if len(sGen) > 0 { sGen[0](session) } @@ -375,23 +215,7 @@ func CreateSession(sGen ...func(s *user.SessionState)) string { return key } -func CreateStandardSession() *user.SessionState { - session := new(user.SessionState) - session.Rate = 10000 - session.Allowance = session.Rate - session.LastCheck = time.Now().Unix() - session.Per = 60 - session.Expires = -1 - session.QuotaRenewalRate = 300 // 5 minutes - session.QuotaRenews = time.Now().Unix() + 20 - session.QuotaRemaining = 10 - session.QuotaMax = -1 - session.Tags = []string{} - session.MetaData = make(map[string]interface{}) - return session -} - -func CreateStandardPolicy() *user.Policy { +func createStandardPolicy() *user.Policy { return &user.Policy{ Rate: 1000.0, Per: 1.0, @@ -403,9 +227,9 @@ func CreateStandardPolicy() *user.Policy { } } -func CreatePolicy(pGen ...func(p *user.Policy)) string { +func createPolicy(pGen ...func(p *user.Policy)) string { pID := keyGen.GenerateAuthKey("") - pol := CreateStandardPolicy() + pol := createStandardPolicy() pol.ID = pID if len(pGen) > 0 { @@ -419,7 +243,7 @@ func CreatePolicy(pGen ...func(p *user.Policy)) string { return pol.ID } -func CreateJWKToken(jGen ...func(*jwt.Token)) string { +func createJWKToken(jGen ...func(*jwt.Token)) string { // Create the token token := jwt.New(jwt.GetSigningMethod("RS512")) // Set the token ID @@ -458,46 +282,6 @@ func createJWKTokenHMAC(jGen ...func(*jwt.Token)) string { return tokenString } -func TestReqBody(t testing.TB, body interface{}) io.Reader { - switch x := body.(type) { - case []byte: - return bytes.NewReader(x) - case string: - return strings.NewReader(x) - case io.Reader: - return x - case nil: - return nil - default: // JSON objects (structs) - bs, err := json.Marshal(x) - if err != nil { - t.Fatal(err) - } - return bytes.NewReader(bs) - } -} - -func TestReq(t testing.TB, method, urlStr string, body interface{}) *http.Request { - return httptest.NewRequest(method, urlStr, TestReqBody(t, body)) -} - -func CreateDefinitionFromString(defStr string) *APISpec { - loader := APIDefinitionLoader{} - def := loader.ParseDefinition(strings.NewReader(defStr)) - spec := loader.MakeSpec(def, nil) - return spec -} - -func CreateSpecTest(t testing.TB, def string) *APISpec { - spec := CreateDefinitionFromString(def) - tname := t.Name() - redisStore := &storage.RedisCluster{KeyPrefix: tname + "-apikey."} - healthStore := &storage.RedisCluster{KeyPrefix: tname + "-apihealth."} - orgStore := &storage.RedisCluster{KeyPrefix: tname + "-orgKey."} - spec.Init(redisStore, redisStore, healthStore, orgStore) - return spec -} - func firstVals(vals map[string][]string) map[string]string { m := make(map[string]string, len(vals)) for k, vs := range vals { @@ -506,25 +290,24 @@ func firstVals(vals map[string][]string) map[string]string { return m } -type TestConfig struct { +type tykTestServerConfig struct { sepatateControlAPI bool - Delay time.Duration - HotReload bool + delay time.Duration + hotReload bool overrideDefaults bool - CoprocessConfig config.CoProcessConfig + coprocessConfig config.CoProcessConfig } -type Test struct { +type tykTestServer struct { ln net.Listener cln net.Listener URL string - testRunner *test.HTTPTestRunner - GlobalConfig config.Config - config TestConfig + globalConfig config.Config + config tykTestServerConfig } -func (s *Test) Start() { +func (s *tykTestServer) Start() { s.ln, _ = generateListener(0) _, port, _ := net.SplitHostPort(s.ln.Addr().String()) globalConf := config.Global() @@ -537,7 +320,7 @@ func (s *Test) Start() { globalConf.ControlAPIPort, _ = strconv.Atoi(port) } - globalConf.CoProcessOptions = s.config.CoprocessConfig + globalConf.CoProcessOptions = s.config.coprocessConfig config.SetGlobal(globalConf) @@ -553,67 +336,107 @@ func (s *Test) Start() { DefaultQuotaStore.Init(getGlobalStorageHandler("orgkey.", false)) } - if s.config.HotReload { + if s.config.hotReload { listen(s.ln, s.cln, nil) } else { listen(s.ln, s.cln, fmt.Errorf("Without goagain")) } - s.GlobalConfig = globalConf + s.URL = "http://" + s.ln.Addr().String() + s.globalConfig = globalConf +} +func (s *tykTestServer) Close() { + s.ln.Close() + + if s.config.sepatateControlAPI { + s.cln.Close() + globalConf := config.Global() + globalConf.ControlAPIPort = 0 + config.SetGlobal(globalConf) + } +} + +func (s *tykTestServer) Do(tc test.TestCase) (*http.Response, error) { scheme := "http://" - if s.GlobalConfig.HttpServerOptions.UseSSL { + if s.globalConfig.HttpServerOptions.UseSSL { scheme = "https://" } - s.URL = scheme + s.ln.Addr().String() - s.testRunner = &test.HTTPTestRunner{ - RequestBuilder: func(tc *test.TestCase) (*http.Request, error) { - tc.BaseURL = s.URL - if tc.ControlRequest { - if s.config.sepatateControlAPI { - tc.BaseURL = scheme + s.cln.Addr().String() - } else if s.GlobalConfig.ControlAPIHostname != "" { - tc.Domain = s.GlobalConfig.ControlAPIHostname - } - } - r, err := test.NewRequest(tc) + if tc.Domain == "" { + tc.Domain = "127.0.0.1" + } - if tc.AdminAuth { - r = withAuth(r) - } + baseUrl := scheme + strings.Replace(s.ln.Addr().String(), "[::]", tc.Domain, 1) + baseUrl = strings.Replace(baseUrl, "127.0.0.1", tc.Domain, 1) - if s.config.Delay > 0 { - tc.Delay = s.config.Delay - } + if tc.ControlRequest { + if s.config.sepatateControlAPI { + baseUrl = scheme + s.cln.Addr().String() + } else if s.globalConfig.ControlAPIHostname != "" { + baseUrl = strings.Replace(baseUrl, "127.0.0.1", s.globalConfig.ControlAPIHostname, 1) + } + } - return r, err - }, - Do: test.HttpServerRunner(), + req := test.NewRequest(tc) + req.URL, _ = url.Parse(baseUrl + tc.Path) + + if tc.AdminAuth { + req = withAuth(req) } -} -func (s *Test) Do(tc test.TestCase) (*http.Response, error) { - req, _ := s.testRunner.RequestBuilder(&tc) - return s.testRunner.Do(req, &tc) + if tc.Client == nil { + tc.Client = &http.Client{ + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + }, + } + } + + return tc.Client.Do(req) } -func (s *Test) Close() { - s.ln.Close() +func (s *tykTestServer) Run(t testing.TB, testCases ...test.TestCase) (*http.Response, error) { + var lastResponse *http.Response + var lastError error - if s.config.sepatateControlAPI { - s.cln.Close() - globalConf := config.Global() - globalConf.ControlAPIPort = 0 - config.SetGlobal(globalConf) + for ti, tc := range testCases { + lastResponse, lastError = s.Do(tc) + tcJSON, _ := json.Marshal(tc) + + if lastError != nil { + if tc.ErrorMatch != "" { + if !strings.Contains(lastError.Error(), tc.ErrorMatch) { + t.Errorf("[%d] Expect error `%s` to contain `%s`. %s", ti, lastError.Error(), tc.ErrorMatch, string(tcJSON)) + } + } else { + t.Errorf("[%d] Connection error: %s. %s", ti, lastError.Error(), string(tcJSON)) + } + continue + } else if tc.ErrorMatch != "" { + t.Error("Expect error.", string(tcJSON)) + continue + } + + respCopy := copyResponse(lastResponse) + if lastError = test.AssertResponse(respCopy, tc); lastError != nil { + t.Errorf("[%d] %s. %s\n", ti, lastError.Error(), string(tcJSON)) + } + + delay := tc.Delay + if delay == 0 { + delay = s.config.delay + } + + if delay > 0 { + time.Sleep(delay) + } } -} -func (s *Test) Run(t testing.TB, testCases ...test.TestCase) (*http.Response, error) { - return s.testRunner.Run(t, testCases...) + return lastResponse, lastError } -func (s *Test) RunExt(t testing.TB, testCases ...test.TestCase) { +func (s *tykTestServer) RunExt(t *testing.T, testCases ...test.TestCase) { var testMatrix = []struct { goagain bool overrideDefaults bool @@ -625,7 +448,7 @@ func (s *Test) RunExt(t testing.TB, testCases ...test.TestCase) { } for i, m := range testMatrix { - s.config.HotReload = m.goagain + s.config.hotReload = m.goagain s.config.overrideDefaults = m.overrideDefaults if i > 0 { @@ -634,48 +457,48 @@ func (s *Test) RunExt(t testing.TB, testCases ...test.TestCase) { } title := fmt.Sprintf("hotReload: %v, overrideDefaults: %v", m.goagain, m.overrideDefaults) - t.(*testing.T).Run(title, func(t *testing.T) { + t.Run(title, func(t *testing.T) { s.Run(t, testCases...) }) } } -func (s *Test) CreateSession(sGen ...func(s *user.SessionState)) (*user.SessionState, string) { - session := CreateStandardSession() +func (s *tykTestServer) createSession(sGen ...func(s *user.SessionState)) string { + session := createStandardSession() if len(sGen) > 0 { sGen[0](session) } resp, err := s.Do(test.TestCase{ - Method: http.MethodPost, - Path: "/tyk/keys/create", - Data: session, - AdminAuth: true, + Method: "POST", + Path: "/tyk/keys/create", + Data: session, }) if err != nil { log.Fatal("Error while creating session:", err) - return nil, "" + return "" } - keySuccess := apiModifyKeySuccess{} - err = json.NewDecoder(resp.Body).Decode(&keySuccess) + respJSON := apiModifyKeySuccess{} + err = json.NewDecoder(resp.Body).Decode(&respJSON) if err != nil { - log.Fatal("Error while decoding session response:", err) - return nil, "" + log.Fatal("Error while serializing session:", err) + return "" } + resp.Body.Close() - return session, keySuccess.Key + return respJSON.Key } -func StartTest(config ...TestConfig) Test { - t := Test{} +func newTykTestServer(config ...tykTestServerConfig) tykTestServer { + s := tykTestServer{} if len(config) > 0 { - t.config = config[0] + s.config = config[0] } - t.Start() + s.Start() - return t + return s } const sampleAPI = `{ @@ -703,7 +526,7 @@ const sampleAPI = `{ } }` -func UpdateAPIVersion(spec *APISpec, name string, verGen func(version *apidef.VersionInfo)) { +func updateAPIVersion(spec *APISpec, name string, verGen func(version *apidef.VersionInfo)) { version := spec.VersionData.Versions[name] verGen(&version) spec.VersionData.Versions[name] = version @@ -714,7 +537,7 @@ func jsonMarshalString(i interface{}) (out string) { return string(b) } -func BuildAPI(apiGens ...func(spec *APISpec)) (specs []*APISpec) { +func buildAPI(apiGens ...func(spec *APISpec)) (specs []*APISpec) { if len(apiGens) == 0 { apiGens = append(apiGens, func(spec *APISpec) {}) } @@ -725,14 +548,14 @@ func BuildAPI(apiGens ...func(spec *APISpec)) (specs []*APISpec) { panic(err) } - gen(spec) specs = append(specs, spec) + gen(spec) } return specs } -func LoadAPI(specs ...*APISpec) (out []*APISpec) { +func loadAPI(specs ...*APISpec) (out []*APISpec) { globalConf := config.Global() oldPath := globalConf.AppPath globalConf.AppPath, _ = ioutil.TempDir("", "apps") @@ -765,8 +588,67 @@ func LoadAPI(specs ...*APISpec) (out []*APISpec) { return out } -func BuildAndLoadAPI(apiGens ...func(spec *APISpec)) (specs []*APISpec) { - return LoadAPI(BuildAPI(apiGens...)...) +func buildAndLoadAPI(apiGens ...func(spec *APISpec)) (specs []*APISpec) { + return loadAPI(buildAPI(apiGens...)...) +} + +var domainsToAddresses = map[string]string{ + "host1.local.": "127.0.0.1", + "host2.local.": "127.0.0.1", + "host3.local.": "127.0.0.1", +} + +type dnsMockHandler struct{} + +func (d *dnsMockHandler) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { + msg := dns.Msg{} + msg.SetReply(r) + switch r.Question[0].Qtype { + case dns.TypeA: + msg.Authoritative = true + domain := msg.Question[0].Name + + address, ok := domainsToAddresses[domain] + if !ok { + // ^ start of line + // localhost\. match literally + // ()* match between 0 and unlimited times + // [[:alnum:]]+\. match single character in [a-zA-Z0-9] minimum one time and ending in . literally + reg := regexp.MustCompile(`^localhost\.([[:alnum:]]+\.)*`) + if matched := reg.MatchString(domain); !matched { + panic("domain not mocked: " + domain) + } + + address = "127.0.0.1" + } + + msg.Answer = append(msg.Answer, &dns.A{ + Hdr: dns.RR_Header{Name: domain, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP(address), + }) + } + w.WriteMsg(&msg) +} + +func initDNSMock() { + var dnsMock *dns.Server + addr, _ := net.ResolveUDPAddr("udp", ":0") + conn, _ := net.ListenUDP("udp", addr) + dnsMock = &dns.Server{PacketConn: conn} + dnsMock.Handler = &dnsMockHandler{} + go dnsMock.ActivateAndServe() + + http.DefaultTransport = &http.Transport{ + DialContext: (&net.Dialer{ + Resolver: &net.Resolver{ + PreferGo: true, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + d := net.Dialer{} + return d.DialContext(ctx, network, dnsMock.PacketConn.LocalAddr().String()) + }, + }, + }).DialContext, + } } // Taken from https://medium.com/@mlowicki/http-s-proxy-in-golang-in-less-than-100-lines-of-code-6a51c2f2c38c @@ -863,7 +745,7 @@ func initProxy(proto string, tlsConfig *tls.Config) *httpProxyHandler { return proxy } -func GenerateTestBinaryData() (buf *bytes.Buffer) { +func generateTestBinaryData() (buf *bytes.Buffer) { buf = new(bytes.Buffer) type testData struct { a float32 @@ -876,36 +758,3 @@ func GenerateTestBinaryData() (buf *bytes.Buffer) { } return buf } - -// openssl genrsa -out app.rsa -const jwtRSAPrivKey = ` ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAyqZ4rwKF8qCExS7kpY4cnJa/37FMkJNkalZ3OuslLB0oRL8T -4c94kdF4aeNzSFkSe2n99IBI6Ssl79vbfMZb+t06L0Q94k+/P37x7+/RJZiff4y1 -VGjrnrnMI2iu9l4iBBRYzNmG6eblroEMMWlgk5tysHgxB59CSNIcD9gqk1hx4n/F -gOmvKsfQgWHNlPSDTRcWGWGhB2/XgNVYG2pOlQxAPqLhBHeqGTXBbPfGF9cHzixp -sPr6GtbzPwhsQ/8bPxoJ7hdfn+rzztks3d6+HWURcyNTLRe0mjXjjee9Z6+gZ+H+ -fS4pnP9tqT7IgU6ePUWTpjoiPtLexgsAa/ctjQIDAQABAoIBAECWvnBJRZgHQUn3 -oDiECup9wbnyMI0D7UVXObk1qSteP69pl1SpY6xWLyLQs7WjbhiXt7FuEc7/SaAh -Wttx/W7/g8P85Bx1fmcmdsYakXaCJpPorQKyTibQ4ReIDfvIFN9n/MWNr0ptpVbx -GonFJFrneK52IGplgCLllLwYEbnULYcJc6E25Ro8U2gQjF2r43PDa07YiDrmB/GV -QQW4HTo+CA9rdK0bP8GpXgc0wpmBhx/t/YdnDg6qhzyUMk9As7JrAzYPjHO0cRun -vhA/aG/mdMmRumY75nj7wB5U5DgstsN2ER75Pjr1xe1knftIyNm15AShCPfLaLGo -dA2IpwECgYEA5E8h6ssa7QroCGwp/N0wSJW41hFYGygbOEg6yPWTJkqmMZVduD8X -/KFqJK4LcIbFQuR28+hWJpHm/RF1AMRhbbWkAj6h02gv5izFwDiFKev5paky4Evg -G8WfUOmSZ1D+fVxwaoG0OaRZpCovUTxYig3xrI659DMeKqpQ7e8l9ekCgYEA4zql -l4P4Dn0ydr+TI/s4NHIQHkaLQAVk3OWwyKowijXd8LCtuZRA1NKSpqQ4ZXi0B17o -9zzF5jEUjws3qWv4PKWdxJu3y+h/etsg7wxUeNizbY2ooUGeMbk0tWxJihbgaI7E -XxLIT50F3Ky4EJ2cUL9GmJ+gLCw0KIaVbkiyYAUCgYEA0WyVHB76r/2VIkS1rzHm -HG7ageKfAyoi7dmzsqsxM6q+EDWHJn8Zra8TAlp0O+AkClwvkUTJ4c9sJy9gODfr -dwtrSnPRVW74oRbovo4Z+H5xHbi65mwzQsZggYP/u63cA3pL1Cbt/wH3CFN52/aS -8PAhg7vYb1yEi3Z3jgoUtCECgYEAhSPX4u9waQzyhKG7lVmdlR1AVH0BGoIOl1/+ -NZWC23i0klLzd8lmM00uoHWYldwjoC38UuFJE5eudCIeeybITMC9sHWNO+z+xP2g -TnDrDePrPkXCiLnp9ziNqb/JVyAQXTNJ3Gsk84EN7j9Fmna/IJDyzHq7XyaHaTdy -VyxBWAECgYEA4jYS07bPx5UMhKiMJDqUmDfLNFD97XwPoJIkOdn6ezqeOSmlmo7t -jxHLbCmsDOAsCU/0BlLXg9wMU7n5QKSlfTVGok/PU0rq2FUXQwyKGnellrqODwFQ -YGivtXBGXk1hlVYlje1RB+W6RQuDAegI5h8vl8pYJS9JQH0wjatsDaE= ------END RSA PRIVATE KEY----- -` - -const jwtSecret = "9879879878787878" diff --git a/gateway/host_checker.go b/host_checker.go similarity index 94% rename from gateway/host_checker.go rename to host_checker.go index abb79b3a43f0..0b15bbb4601b 100644 --- a/gateway/host_checker.go +++ b/host_checker.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "crypto/tls" @@ -59,25 +59,12 @@ type HostUptimeChecker struct { stopPollingChan chan bool sampleCache *cache.Cache stopLoop bool - muStopLoop sync.RWMutex resetListMu sync.Mutex doResetList bool newList map[string]HostData } -func (h *HostUptimeChecker) getStopLoop() bool { - h.muStopLoop.RLock() - defer h.muStopLoop.RUnlock() - return h.stopLoop -} - -func (h *HostUptimeChecker) setStopLoop(newValue bool) { - h.muStopLoop.Lock() - h.stopLoop = newValue - h.muStopLoop.Unlock() -} - func (h *HostUptimeChecker) getStaggeredTime() time.Duration { if h.checkTimeout <= 5 { return time.Duration(h.checkTimeout) * time.Second @@ -93,7 +80,7 @@ func (h *HostUptimeChecker) getStaggeredTime() time.Duration { } func (h *HostUptimeChecker) HostCheckLoop() { - for !h.getStopLoop() { + for !h.stopLoop { if runningTests { <-hostCheckTicker } @@ -268,7 +255,7 @@ func (h *HostUptimeChecker) Init(workers, triggerLimit, timeout int, hostList ma func (h *HostUptimeChecker) Start() { // Start the loop that checks for bum hosts - h.setStopLoop(false) + h.stopLoop = false log.Debug("[HOST CHECKER] Starting...") go h.HostCheckLoop() log.Debug("[HOST CHECKER] Check loop started...") @@ -277,8 +264,7 @@ func (h *HostUptimeChecker) Start() { } func (h *HostUptimeChecker) Stop() { - h.setStopLoop(true) - + h.stopLoop = true h.stopPollingChan <- true log.Info("[HOST CHECKER] Stopping poller") h.pool.Close() diff --git a/gateway/host_checker_manager.go b/host_checker_manager.go similarity index 99% rename from gateway/host_checker_manager.go rename to host_checker_manager.go index ac43ecb02127..bb46dab068d6 100644 --- a/gateway/host_checker_manager.go +++ b/host_checker_manager.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/base64" @@ -10,8 +10,8 @@ import ( "time" "github.com/Sirupsen/logrus" - uuid "github.com/satori/go.uuid" - msgpack "gopkg.in/vmihailenco/msgpack.v2" + "github.com/satori/go.uuid" + "gopkg.in/vmihailenco/msgpack.v2" "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" diff --git a/gateway/host_checker_test.go b/host_checker_test.go similarity index 95% rename from gateway/host_checker_test.go rename to host_checker_test.go index 114488716096..b720993413ba 100644 --- a/gateway/host_checker_test.go +++ b/host_checker_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" @@ -70,7 +70,7 @@ func TestHostChecker(t *testing.T) { specBuf := &bytes.Buffer{} specTmpl.ExecuteTemplate(specBuf, specTmpl.Name(), &tmplData) - spec := CreateDefinitionFromString(specBuf.String()) + spec := createDefinitionFromString(specBuf.String()) // From api_loader.go#processSpec sl := apidef.NewHostListFromList(spec.Proxy.Targets) @@ -148,7 +148,7 @@ func TestHostChecker(t *testing.T) { t.Error("Should set defaults", GlobalHostChecker.checker.checkTimeout) } - redisStore := GlobalHostChecker.store.(*storage.RedisCluster) + redisStore := GlobalHostChecker.store.(storage.RedisCluster) if ttl, _ := redisStore.GetKeyTTL(PoolerHostSentinelKeyPrefix + testHttpFailure); int(ttl) != GlobalHostChecker.checker.checkTimeout*GlobalHostChecker.checker.sampleTriggerLimit { t.Error("HostDown expiration key should be checkTimeout + 1", ttl) } @@ -168,7 +168,7 @@ func TestReverseProxyAllDown(t *testing.T) { specBuf := &bytes.Buffer{} specTmpl.ExecuteTemplate(specBuf, specTmpl.Name(), &tmplData) - spec := CreateDefinitionFromString(specBuf.String()) + spec := createDefinitionFromString(specBuf.String()) // From api_loader.go#processSpec sl := apidef.NewHostListFromList(spec.Proxy.Targets) @@ -207,7 +207,7 @@ func TestReverseProxyAllDown(t *testing.T) { remote, _ := url.Parse(testHttpAny) proxy := TykNewSingleHostReverseProxy(remote, spec) - req := TestReq(t, "GET", "/", nil) + req := testReq(t, "GET", "/", nil) rec := httptest.NewRecorder() proxy.ServeHTTP(rec, req) if rec.Code != 503 { diff --git a/install/data/tyk.self_contained.conf b/install/data/tyk.self_contained.conf index 050e7e724fc4..f315094def54 100644 --- a/install/data/tyk.self_contained.conf +++ b/install/data/tyk.self_contained.conf @@ -30,15 +30,11 @@ "enable_health_checks": false, "health_check_value_timeouts": 60 }, - "dns_cache": { - "enabled": false, - "ttl": 3600 - }, "optimisations_use_async_session_write": true, "allow_master_keys": false, "policies": { "policy_source": "file", - "policy_record_name": "policies/policies.json" + "policy_record_name": "policies" }, "hash_keys": true, "suppress_redis_signal_reload": false, @@ -58,18 +54,15 @@ } }, "http_server_options": { - "enable_http2": true, "enable_websockets": true }, "hostname": "", "enable_custom_domains": true, - "proxy_enable_http2": true, "enable_jsvm": true, "oauth_redirect_uri_separator": ";", "coprocess_options": { "enable_coprocess": false, - "coprocess_grpc_server": "", - "python_path_prefix": "/opt/tyk-gateway" + "coprocess_grpc_server": "" }, "pid_file_location": "./tyk-gateway.pid", "allow_insecure_configs": true, diff --git a/install/data/tyk.with_dash.conf b/install/data/tyk.with_dash.conf index 37e11fa3d702..9ff3e0b9b4e0 100644 --- a/install/data/tyk.with_dash.conf +++ b/install/data/tyk.with_dash.conf @@ -40,10 +40,6 @@ "enable_health_checks": false, "health_check_value_timeouts": 60 }, - "dns_cache": { - "enabled": false, - "ttl": 3600 - }, "optimisations_use_async_session_write": true, "allow_master_keys": false, "policies": { @@ -63,7 +59,6 @@ "disable_cached_session_state": false }, "http_server_options": { - "enable_http2": true, "enable_websockets": true }, "uptime_tests": { @@ -77,13 +72,11 @@ }, "hostname": "", "enable_custom_domains": true, - "proxy_enable_http2": true, "enable_jsvm": true, "oauth_redirect_uri_separator": ";", "coprocess_options": { "enable_coprocess": false, - "coprocess_grpc_server": "", - "python_path_prefix": "/opt/tyk-gateway" + "coprocess_grpc_server": "" }, "pid_file_location": "./tyk-gateway.pid", "allow_insecure_configs": true, diff --git a/gateway/instrumentation_handlers.go b/instrumentation_handlers.go similarity index 99% rename from gateway/instrumentation_handlers.go rename to instrumentation_handlers.go index c5f522bed1c4..f4c08e918416 100644 --- a/gateway/instrumentation_handlers.go +++ b/instrumentation_handlers.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" diff --git a/gateway/instrumentation_statsd_sink.go b/instrumentation_statsd_sink.go similarity index 99% rename from gateway/instrumentation_statsd_sink.go rename to instrumentation_statsd_sink.go index 169ae5a80ff1..e38806c3748a 100644 --- a/gateway/instrumentation_statsd_sink.go +++ b/instrumentation_statsd_sink.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" diff --git a/gateway/jq.go b/jq.go similarity index 99% rename from gateway/jq.go rename to jq.go index b54c460a5217..48a0de141e74 100644 --- a/gateway/jq.go +++ b/jq.go @@ -1,6 +1,6 @@ // +build jq -package gateway +package main // #cgo LDFLAGS: -ljq // #include diff --git a/gateway/jsvm_event_handler.go b/jsvm_event_handler.go similarity index 98% rename from gateway/jsvm_event_handler.go rename to jsvm_event_handler.go index 1ad8ddbe98d9..53015832963d 100644 --- a/gateway/jsvm_event_handler.go +++ b/jsvm_event_handler.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/json" diff --git a/gateway/ldap_auth_handler.go b/ldap_auth_handler.go similarity index 99% rename from gateway/ldap_auth_handler.go rename to ldap_auth_handler.go index 4b8dee6cd75a..57e4e9246155 100644 --- a/gateway/ldap_auth_handler.go +++ b/ldap_auth_handler.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/le_helpers.go b/le_helpers.go similarity index 99% rename from gateway/le_helpers.go rename to le_helpers.go index 50888a4f7e6a..802792bb5fc9 100644 --- a/gateway/le_helpers.go +++ b/le_helpers.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/json" diff --git a/log/log.go b/log/log.go index a1f8af90d4b6..b47fd074df3f 100644 --- a/log/log.go +++ b/log/log.go @@ -5,38 +5,11 @@ import ( "strings" "github.com/Sirupsen/logrus" - "github.com/hashicorp/terraform/flatmap" prefixed "github.com/x-cray/logrus-prefixed-formatter" ) -var ( - log = logrus.New() - rawLog = logrus.New() - translations = make(map[string]string) -) - -// LoadTranslations takes a map[string]interface and flattens it to map[string]string -// Because translations have been loaded - we internally override log the formatter -// Nested entries are accessible using dot notation. -// example: `{"foo": {"bar": "baz"}}` -// flattened: `foo.bar: baz` -func LoadTranslations(thing map[string]interface{}) { - log.Formatter = &TranslationFormatter{new(prefixed.TextFormatter)} - translations = flatmap.Flatten(thing) -} - -type TranslationFormatter struct { - *prefixed.TextFormatter -} - -func (t *TranslationFormatter) Format(entry *logrus.Entry) ([]byte, error) { - if code, ok := entry.Data["code"]; ok { - if translation, ok := translations[code.(string)]; ok { - entry.Message = translation - } - } - return t.TextFormatter.Format(entry) -} +var log = logrus.New() +var rawLog = logrus.New() type RawFormatter struct{} diff --git a/gateway/log_helpers.go b/log_helpers.go similarity index 98% rename from gateway/log_helpers.go rename to log_helpers.go index b7515ad0ede2..9bb64c3d46c2 100644 --- a/gateway/log_helpers.go +++ b/log_helpers.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" diff --git a/gateway/log_helpers_test.go b/log_helpers_test.go similarity index 98% rename from gateway/log_helpers_test.go rename to log_helpers_test.go index bf2acece8058..e42c8f2098c1 100644 --- a/gateway/log_helpers_test.go +++ b/log_helpers_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http/httptest" @@ -10,7 +10,7 @@ import ( ) func TestGetLogEntryForRequest(t *testing.T) { - defer ResetTestConfig() + defer resetTestConfig() testReq := httptest.NewRequest("GET", "http://tyk.io/test", nil) testReq.RemoteAddr = "127.0.0.1:80" diff --git a/gateway/looping_test.go b/looping_test.go similarity index 96% rename from gateway/looping_test.go rename to looping_test.go index 704b9cfbbf89..dddef4092998 100644 --- a/gateway/looping_test.go +++ b/looping_test.go @@ -1,7 +1,7 @@ // +build !race // Looping by itself has race nature -package gateway +package main import ( "encoding/json" @@ -13,7 +13,7 @@ import ( ) func TestLooping(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() postAction := `data` @@ -22,7 +22,7 @@ func TestLooping(t *testing.T) { t.Run("Using advanced URL rewrite", func(t *testing.T) { // We defined internnal advanced rewrite based on body data // which rewrites to internal paths (marked as blacklist so they protected from outside world) - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { version := spec.VersionData.Versions["v1"] json.Unmarshal([]byte(`{ "use_extended_paths": true, @@ -89,7 +89,7 @@ func TestLooping(t *testing.T) { }) t.Run("Test multiple url rewrites", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { version := spec.VersionData.Versions["v1"] json.Unmarshal([]byte(`{ "use_extended_paths": true, @@ -129,7 +129,7 @@ func TestLooping(t *testing.T) { }) t.Run("Loop to another API", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.APIID = "testid" spec.Name = "hidden api" spec.Proxy.ListenPath = "/somesecret" @@ -212,7 +212,7 @@ func TestLooping(t *testing.T) { }) t.Run("Loop limit", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { version := spec.VersionData.Versions["v1"] json.Unmarshal([]byte(`{ "use_extended_paths": true, @@ -236,7 +236,7 @@ func TestLooping(t *testing.T) { }) t.Run("Quota and rate limit calculation", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { version := spec.VersionData.Versions["v1"] json.Unmarshal([]byte(`{ "use_extended_paths": true, @@ -255,7 +255,7 @@ func TestLooping(t *testing.T) { spec.UseKeylessAccess = false }) - keyID := CreateSession(func(s *user.SessionState) { + keyID := createSession(func(s *user.SessionState) { s.QuotaMax = 2 }) @@ -270,10 +270,10 @@ func TestLooping(t *testing.T) { func TestConcurrencyReloads(t *testing.T) { var wg sync.WaitGroup - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI() + buildAndLoadAPI() for i := 0; i < 10; i++ { wg.Add(1) @@ -284,7 +284,7 @@ func TestConcurrencyReloads(t *testing.T) { } for j := 0; j < 5; j++ { - BuildAndLoadAPI() + buildAndLoadAPI() } wg.Wait() diff --git a/main.go b/main.go index 79d9d03fb17d..c8a5c1db064c 100644 --- a/main.go +++ b/main.go @@ -1,7 +1,1384 @@ package main -import "github.com/TykTechnologies/tyk/gateway" +import ( + "crypto/tls" + "fmt" + "html/template" + "io/ioutil" + stdlog "log" + "log/syslog" + "net" + "net/http" + pprof_http "net/http/pprof" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "strconv" + "strings" + "sync" + "time" + + newrelic "github.com/newrelic/go-agent" + + "github.com/TykTechnologies/tyk/checkup" + + "github.com/Sirupsen/logrus" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + logstashHook "github.com/bshuster-repo/logrus-logstash-hook" + "github.com/evalphobia/logrus_sentry" + "github.com/facebookgo/pidfile" + graylogHook "github.com/gemnasium/logrus-graylog-hook" + "github.com/gorilla/mux" + "github.com/justinas/alice" + "github.com/lonelycode/gorpc" + "github.com/lonelycode/osin" + "github.com/rs/cors" + uuid "github.com/satori/go.uuid" + "rsc.io/letsencrypt" + + "github.com/TykTechnologies/goagain" + gas "github.com/TykTechnologies/goautosocket" + "github.com/TykTechnologies/tyk/apidef" + "github.com/TykTechnologies/tyk/certs" + cli "github.com/TykTechnologies/tyk/cli" + "github.com/TykTechnologies/tyk/config" + logger "github.com/TykTechnologies/tyk/log" + "github.com/TykTechnologies/tyk/regexp" + "github.com/TykTechnologies/tyk/storage" + "github.com/TykTechnologies/tyk/user" +) + +var ( + log = logger.Get() + mainLog = log.WithField("prefix", "main") + pubSubLog = log.WithField("prefix", "pub-sub") + rawLog = logger.GetRaw() + templates *template.Template + analytics RedisAnalyticsHandler + GlobalEventsJSVM JSVM + memProfFile *os.File + MainNotifier RedisNotifier + DefaultOrgStore DefaultSessionManager + DefaultQuotaStore DefaultSessionManager + FallbackKeySesionManager = SessionHandler(&DefaultSessionManager{}) + MonitoringHandler config.TykEventHandler + RPCListener RPCStorageHandler + DashService DashboardServiceSender + CertificateManager *certs.CertificateManager + NewRelicApplication newrelic.Application + + apisMu sync.RWMutex + apiSpecs []*APISpec + apisByID = map[string]*APISpec{} + + keyGen DefaultKeyGenerator + + policiesMu sync.RWMutex + policiesByID = map[string]user.Policy{} + + mainRouter *mux.Router + controlRouter *mux.Router + LE_MANAGER letsencrypt.Manager + LE_FIRSTRUN bool + + NodeID string + + runningTests = false + + // confPaths is the series of paths to try to use as config files. The + // first one to exist will be used. If none exists, a default config + // will be written to the first path in the list. + // + // When --conf=foo is used, this will be replaced by []string{"foo"}. + confPaths = []string{ + "tyk.conf", + // TODO: add ~/.config/tyk/tyk.conf here? + "/etc/tyk/tyk.conf", + } +) + +const ( + defReadTimeout = 120 * time.Second + defWriteTimeout = 120 * time.Second + appName = "tyk-gateway" +) + +func getApiSpec(apiID string) *APISpec { + apisMu.RLock() + spec := apisByID[apiID] + apisMu.RUnlock() + return spec +} + +func apisByIDLen() int { + apisMu.RLock() + defer apisMu.RUnlock() + return len(apisByID) +} + +var redisPurgeOnce sync.Once +var rpcPurgeOnce sync.Once +var purgeTicker = time.Tick(time.Second) +var rpcPurgeTicker = time.Tick(10 * time.Second) + +// Create all globals and init connection handlers +func setupGlobals() { + + reloadMu.Lock() + defer reloadMu.Unlock() + + mainRouter = mux.NewRouter() + controlRouter = mux.NewRouter() + + if config.Global().EnableAnalytics && config.Global().Storage.Type != "redis" { + mainLog.Fatal("Analytics requires Redis Storage backend, please enable Redis in the tyk.conf file.") + } + + // Initialise our Host Checker + healthCheckStore := storage.RedisCluster{KeyPrefix: "host-checker:"} + InitHostCheckManager(healthCheckStore) + + redisStore := storage.RedisCluster{KeyPrefix: "apikey-", HashKeys: config.Global().HashKeys} + FallbackKeySesionManager.Init(&redisStore) + + if config.Global().EnableAnalytics && analytics.Store == nil { + globalConf := config.Global() + globalConf.LoadIgnoredIPs() + config.SetGlobal(globalConf) + mainLog.Debug("Setting up analytics DB connection") + + analyticsStore := storage.RedisCluster{KeyPrefix: "analytics-"} + analytics.Store = &analyticsStore + analytics.Init(globalConf) + + redisPurgeOnce.Do(func() { + store := storage.RedisCluster{KeyPrefix: "analytics-"} + redisPurger := RedisPurger{Store: &store} + go redisPurger.PurgeLoop(purgeTicker) + }) + + if config.Global().AnalyticsConfig.Type == "rpc" { + mainLog.Debug("Using RPC cache purge") + + rpcPurgeOnce.Do(func() { + store := storage.RedisCluster{KeyPrefix: "analytics-"} + purger := RPCPurger{Store: &store} + purger.Connect() + go purger.PurgeLoop(rpcPurgeTicker) + }) + } + } + + // Load all the files that have the "error" prefix. + templatesDir := filepath.Join(config.Global().TemplatePath, "error*") + templates = template.Must(template.ParseGlob(templatesDir)) + + if config.Global().CoProcessOptions.EnableCoProcess { + if err := CoProcessInit(); err != nil { + log.WithField("prefix", "coprocess").Error(err) + } + } + + // Get the notifier ready + mainLog.Debug("Notifier will not work in hybrid mode") + mainNotifierStore := storage.RedisCluster{} + mainNotifierStore.Connect() + MainNotifier = RedisNotifier{mainNotifierStore, RedisPubSubChannel} + + if config.Global().Monitor.EnableTriggerMonitors { + h := &WebHookHandler{} + if err := h.Init(config.Global().Monitor.Config); err != nil { + mainLog.Error("Failed to initialise monitor! ", err) + } else { + MonitoringHandler = h + } + } + + if globalConfig := config.Global(); globalConfig.AnalyticsConfig.NormaliseUrls.Enabled { + mainLog.Info("Setting up analytics normaliser") + globalConfig.AnalyticsConfig.NormaliseUrls.CompiledPatternSet = initNormalisationPatterns() + config.SetGlobal(globalConfig) + } + + certificateSecret := config.Global().Secret + if config.Global().Security.PrivateCertificateEncodingSecret != "" { + certificateSecret = config.Global().Security.PrivateCertificateEncodingSecret + } + + CertificateManager = certs.NewCertificateManager(getGlobalStorageHandler("cert-", false), certificateSecret, log) + + if config.Global().NewRelic.AppName != "" { + NewRelicApplication = SetupNewRelic() + } +} + +func buildConnStr(resource string) string { + + if config.Global().DBAppConfOptions.ConnectionString == "" && config.Global().DisableDashboardZeroConf { + mainLog.Fatal("Connection string is empty, failing.") + } + + if !config.Global().DisableDashboardZeroConf && config.Global().DBAppConfOptions.ConnectionString == "" { + mainLog.Info("Waiting for zeroconf signal...") + for config.Global().DBAppConfOptions.ConnectionString == "" { + time.Sleep(1 * time.Second) + } + } + + return config.Global().DBAppConfOptions.ConnectionString + resource +} + +func syncAPISpecs() (int, error) { + loader := APIDefinitionLoader{} + + apisMu.Lock() + defer apisMu.Unlock() + + if config.Global().UseDBAppConfigs { + connStr := buildConnStr("/system/apis") + tmpSpecs, err := loader.FromDashboardService(connStr, config.Global().NodeSecret) + if err != nil { + log.Error("failed to load API specs: ", err) + return 0, err + } + + apiSpecs = tmpSpecs + + mainLog.Debug("Downloading API Configurations from Dashboard Service") + } else if config.Global().SlaveOptions.UseRPC { + mainLog.Debug("Using RPC Configuration") + + var err error + apiSpecs, err = loader.FromRPC(config.Global().SlaveOptions.RPCKey) + if err != nil { + return 0, err + } + } else { + apiSpecs = loader.FromDir(config.Global().AppPath) + } + + mainLog.Printf("Detected %v APIs", len(apiSpecs)) + + if config.Global().AuthOverride.ForceAuthProvider { + for i := range apiSpecs { + apiSpecs[i].AuthProvider = config.Global().AuthOverride.AuthProvider + } + } + + if config.Global().AuthOverride.ForceSessionProvider { + for i := range apiSpecs { + apiSpecs[i].SessionProvider = config.Global().AuthOverride.SessionProvider + } + } + + return len(apiSpecs), nil +} + +func syncPolicies() (count int, err error) { + var pols map[string]user.Policy + + mainLog.Info("Loading policies") + + switch config.Global().Policies.PolicySource { + case "service": + if config.Global().Policies.PolicyConnectionString == "" { + mainLog.Fatal("No connection string or node ID present. Failing.") + } + connStr := config.Global().Policies.PolicyConnectionString + connStr = connStr + "/system/policies" + + mainLog.Info("Using Policies from Dashboard Service") + + pols = LoadPoliciesFromDashboard(connStr, config.Global().NodeSecret, config.Global().Policies.AllowExplicitPolicyID) + case "rpc": + mainLog.Debug("Using Policies from RPC") + pols, err = LoadPoliciesFromRPC(config.Global().SlaveOptions.RPCKey) + default: + // this is the only case now where we need a policy record name + if config.Global().Policies.PolicyRecordName == "" { + mainLog.Debug("No policy record name defined, skipping...") + return 0, nil + } + pols = LoadPoliciesFromFile(config.Global().Policies.PolicyRecordName) + } + mainLog.Infof("Policies found (%d total):", len(pols)) + for id := range pols { + mainLog.Infof(" - %s", id) + } + + policiesMu.Lock() + defer policiesMu.Unlock() + if len(pols) > 0 { + policiesByID = pols + } + + return len(pols), err +} + +// stripSlashes removes any trailing slashes from the request's URL +// path. +func stripSlashes(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + if trim := strings.TrimRight(path, "/"); trim != path { + r2 := *r + r2.URL.Path = trim + r = &r2 + } + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) +} + +func controlAPICheckClientCertificate(certLevel string, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if config.Global().Security.ControlAPIUseMutualTLS { + if err := CertificateManager.ValidateRequestCertificate(config.Global().Security.Certificates.ControlAPI, r); err != nil { + doJSONWrite(w, http.StatusForbidden, apiError(err.Error())) + return + } + } + + next.ServeHTTP(w, r) + }) +} + +// Set up default Tyk control API endpoints - these are global, so need to be added first +func loadAPIEndpoints(muxer *mux.Router) { + hostname := config.Global().HostName + if config.Global().ControlAPIHostname != "" { + hostname = config.Global().ControlAPIHostname + } + + r := mux.NewRouter() + muxer.PathPrefix("/tyk/").Handler(http.StripPrefix("/tyk", + stripSlashes(checkIsAPIOwner(controlAPICheckClientCertificate("/gateway/client", InstrumentationMW(r)))), + )) + + if hostname != "" { + muxer = muxer.Host(hostname).Subrouter() + mainLog.Info("Control API hostname set: ", hostname) + } + + if *cli.HTTPProfile || config.Global().HTTPProfile { + muxer.HandleFunc("/debug/pprof/profile", pprof_http.Profile) + muxer.HandleFunc("/debug/pprof/{_:.*}", pprof_http.Index) + } + + r.MethodNotAllowedHandler = MethodNotAllowedHandler{} + + mainLog.Info("Initialising Tyk REST API Endpoints") + + // set up main API handlers + r.HandleFunc("/reload/group", groupResetHandler).Methods("GET") + r.HandleFunc("/reload", resetHandler(nil)).Methods("GET") + + if !isRPCMode() { + r.HandleFunc("/org/keys", orgHandler).Methods("GET") + r.HandleFunc("/org/keys/{keyName:[^/]*}", orgHandler).Methods("POST", "PUT", "GET", "DELETE") + r.HandleFunc("/keys/policy/{keyName}", policyUpdateHandler).Methods("POST") + r.HandleFunc("/keys/create", createKeyHandler).Methods("POST") + r.HandleFunc("/apis", apiHandler).Methods("GET", "POST", "PUT", "DELETE") + r.HandleFunc("/apis/{apiID}", apiHandler).Methods("GET", "POST", "PUT", "DELETE") + r.HandleFunc("/health", healthCheckhandler).Methods("GET") + r.HandleFunc("/oauth/clients/create", createOauthClient).Methods("POST") + r.HandleFunc("/oauth/clients/{apiID}/{keyName:[^/]*}", oAuthClientHandler).Methods("PUT") + r.HandleFunc("/oauth/refresh/{keyName}", invalidateOauthRefresh).Methods("DELETE") + r.HandleFunc("/cache/{apiID}", invalidateCacheHandler).Methods("DELETE") + } else { + mainLog.Info("Node is slaved, REST API minimised") + } + + r.HandleFunc("/debug", traceHandler).Methods("POST") + + r.HandleFunc("/keys", keyHandler).Methods("POST", "PUT", "GET", "DELETE") + r.HandleFunc("/keys/{keyName:[^/]*}", keyHandler).Methods("POST", "PUT", "GET", "DELETE") + r.HandleFunc("/certs", certHandler).Methods("POST", "GET") + r.HandleFunc("/certs/{certID:[^/]*}", certHandler).Methods("POST", "GET", "DELETE") + r.HandleFunc("/oauth/clients/{apiID}", oAuthClientHandler).Methods("GET", "DELETE") + r.HandleFunc("/oauth/clients/{apiID}/{keyName:[^/]*}", oAuthClientHandler).Methods("GET", "DELETE") + r.HandleFunc("/oauth/clients/{apiID}/{keyName}/tokens", oAuthClientTokensHandler).Methods("GET") + + mainLog.Debug("Loaded API Endpoints") +} + +// checkIsAPIOwner will ensure that the accessor of the tyk API has the +// correct security credentials - this is a shared secret between the +// client and the owner and is set in the tyk.conf file. This should +// never be made public! +func checkIsAPIOwner(next http.Handler) http.Handler { + secret := config.Global().Secret + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tykAuthKey := r.Header.Get("X-Tyk-Authorization") + if tykAuthKey != secret { + // Error + mainLog.Warning("Attempted administrative access with invalid or missing key!") + + doJSONWrite(w, http.StatusForbidden, apiError("Forbidden")) + return + } + next.ServeHTTP(w, r) + }) +} + +func generateOAuthPrefix(apiID string) string { + return "oauth-data." + apiID + "." +} + +// Create API-specific OAuth handlers and respective auth servers +func addOAuthHandlers(spec *APISpec, muxer *mux.Router) *OAuthManager { + apiAuthorizePath := spec.Proxy.ListenPath + "tyk/oauth/authorize-client{_:/?}" + clientAuthPath := spec.Proxy.ListenPath + "oauth/authorize{_:/?}" + clientAccessPath := spec.Proxy.ListenPath + "oauth/token{_:/?}" + + serverConfig := osin.NewServerConfig() + serverConfig.ErrorStatusCode = http.StatusForbidden + serverConfig.AllowedAccessTypes = spec.Oauth2Meta.AllowedAccessTypes + serverConfig.AllowedAuthorizeTypes = spec.Oauth2Meta.AllowedAuthorizeTypes + serverConfig.RedirectUriSeparator = config.Global().OauthRedirectUriSeparator + + prefix := generateOAuthPrefix(spec.APIID) + storageManager := getGlobalStorageHandler(prefix, false) + storageManager.Connect() + osinStorage := &RedisOsinStorageInterface{storageManager, spec.SessionManager} //TODO: Needs storage manager from APISpec + + osinServer := TykOsinNewServer(serverConfig, osinStorage) + + oauthManager := OAuthManager{spec, osinServer} + oauthHandlers := OAuthHandlers{oauthManager} + + muxer.Handle(apiAuthorizePath, checkIsAPIOwner(allowMethods(oauthHandlers.HandleGenerateAuthCodeData, "POST"))) + muxer.HandleFunc(clientAuthPath, allowMethods(oauthHandlers.HandleAuthorizePassthrough, "GET", "POST")) + muxer.HandleFunc(clientAccessPath, allowMethods(oauthHandlers.HandleAccessRequest, "GET", "POST")) + + return &oauthManager +} + +func addBatchEndpoint(spec *APISpec, muxer *mux.Router) { + mainLog.Debug("Batch requests enabled for API") + apiBatchPath := spec.Proxy.ListenPath + "tyk/batch/" + batchHandler := BatchRequestHandler{API: spec} + muxer.HandleFunc(apiBatchPath, batchHandler.HandleBatchRequest) +} + +func loadCustomMiddleware(spec *APISpec) ([]string, apidef.MiddlewareDefinition, []apidef.MiddlewareDefinition, []apidef.MiddlewareDefinition, []apidef.MiddlewareDefinition, apidef.MiddlewareDriver) { + mwPaths := []string{} + var mwAuthCheckFunc apidef.MiddlewareDefinition + mwPreFuncs := []apidef.MiddlewareDefinition{} + mwPostFuncs := []apidef.MiddlewareDefinition{} + mwPostKeyAuthFuncs := []apidef.MiddlewareDefinition{} + mwDriver := apidef.OttoDriver + + // Set AuthCheck hook + if spec.CustomMiddleware.AuthCheck.Name != "" { + mwAuthCheckFunc = spec.CustomMiddleware.AuthCheck + if spec.CustomMiddleware.AuthCheck.Path != "" { + // Feed a JS file to Otto + mwPaths = append(mwPaths, spec.CustomMiddleware.AuthCheck.Path) + } + } + + // Load from the configuration + for _, mwObj := range spec.CustomMiddleware.Pre { + mwPaths = append(mwPaths, mwObj.Path) + mwPreFuncs = append(mwPreFuncs, mwObj) + mainLog.Debug("Loading custom PRE-PROCESSOR middleware: ", mwObj.Name) + } + for _, mwObj := range spec.CustomMiddleware.Post { + mwPaths = append(mwPaths, mwObj.Path) + mwPostFuncs = append(mwPostFuncs, mwObj) + mainLog.Debug("Loading custom POST-PROCESSOR middleware: ", mwObj.Name) + } + + // Load from folders + for _, folder := range [...]struct { + name string + single *apidef.MiddlewareDefinition + slice *[]apidef.MiddlewareDefinition + }{ + {name: "pre", slice: &mwPreFuncs}, + {name: "auth", single: &mwAuthCheckFunc}, + {name: "post_auth", slice: &mwPostKeyAuthFuncs}, + {name: "post", slice: &mwPostFuncs}, + } { + globPath := filepath.Join(config.Global().MiddlewarePath, spec.APIID, folder.name, "*.js") + paths, _ := filepath.Glob(globPath) + for _, path := range paths { + mainLog.Debug("Loading file middleware from ", path) + + mwDef := apidef.MiddlewareDefinition{ + Name: strings.Split(filepath.Base(path), ".")[0], + Path: path, + } + mainLog.Debug("-- Middleware name ", mwDef.Name) + mwDef.RequireSession = strings.HasSuffix(mwDef.Name, "_with_session") + if mwDef.RequireSession { + switch folder.name { + case "post_auth", "post": + mainLog.Debug("-- Middleware requires session") + default: + mainLog.Warning("Middleware requires session, but isn't post-auth: ", mwDef.Name) + } + } + mwPaths = append(mwPaths, path) + if folder.single != nil { + *folder.single = mwDef + } else { + *folder.slice = append(*folder.slice, mwDef) + } + } + } + + // Set middleware driver, defaults to OttoDriver + if spec.CustomMiddleware.Driver != "" { + mwDriver = spec.CustomMiddleware.Driver + } + + // Load PostAuthCheck hooks + for _, mwObj := range spec.CustomMiddleware.PostKeyAuth { + if mwObj.Path != "" { + // Otto files are specified here + mwPaths = append(mwPaths, mwObj.Path) + } + mwPostKeyAuthFuncs = append(mwPostKeyAuthFuncs, mwObj) + } + + return mwPaths, mwAuthCheckFunc, mwPreFuncs, mwPostFuncs, mwPostKeyAuthFuncs, mwDriver +} + +func createResponseMiddlewareChain(spec *APISpec) { + // Create the response processors + + responseChain := make([]TykResponseHandler, len(spec.ResponseProcessors)) + for i, processorDetail := range spec.ResponseProcessors { + processor := responseProcessorByName(processorDetail.Name) + if processor == nil { + mainLog.Error("No such processor: ", processorDetail.Name) + return + } + if err := processor.Init(processorDetail.Options, spec); err != nil { + mainLog.Debug("Failed to init processor: ", err) + } + mainLog.Debug("Loading Response processor: ", processorDetail.Name) + responseChain[i] = processor + } + spec.ResponseChain = responseChain +} + +func handleCORS(chain *[]alice.Constructor, spec *APISpec) { + + if spec.CORS.Enable { + mainLog.Debug("CORS ENABLED") + c := cors.New(cors.Options{ + AllowedOrigins: spec.CORS.AllowedOrigins, + AllowedMethods: spec.CORS.AllowedMethods, + AllowedHeaders: spec.CORS.AllowedHeaders, + ExposedHeaders: spec.CORS.ExposedHeaders, + AllowCredentials: spec.CORS.AllowCredentials, + MaxAge: spec.CORS.MaxAge, + OptionsPassthrough: spec.CORS.OptionsPassthrough, + Debug: spec.CORS.Debug, + }) + + *chain = append(*chain, c.Handler) + } +} + +func isRPCMode() bool { + return config.Global().AuthOverride.ForceAuthProvider && + config.Global().AuthOverride.AuthProvider.StorageEngine == RPCStorageEngine +} + +func rpcReloadLoop(rpcKey string) { + for { + RPCListener.CheckForReload(rpcKey) + } +} + +var reloadMu sync.Mutex + +func doReload() { + reloadMu.Lock() + defer reloadMu.Unlock() + + // Initialize/reset the JSVM + if config.Global().EnableJSVM { + GlobalEventsJSVM.Init(nil, logrus.NewEntry(log)) + } + + // Load the API Policies + if _, err := syncPolicies(); err != nil { + mainLog.Error("Error during syncing policies:", err.Error()) + return + } + + // load the specs + if count, err := syncAPISpecs(); err != nil { + mainLog.Error("Error during syncing apis:", err.Error()) + return + } else { + // skip re-loading only if dashboard service reported 0 APIs + // and current registry had 0 APIs + if count == 0 && apisByIDLen() == 0 { + mainLog.Warning("No API Definitions found, not reloading") + return + } + } + + // We have updated specs, lets load those... + mainLog.Info("Preparing new router") + newRouter := mux.NewRouter() + if config.Global().HttpServerOptions.OverrideDefaults { + newRouter.SkipClean(config.Global().HttpServerOptions.SkipURLCleaning) + } + + if config.Global().ControlAPIPort == 0 { + loadAPIEndpoints(newRouter) + } + + loadGlobalApps(newRouter) + + mainLog.Info("API reload complete") + + mainRouter = newRouter +} + +// startReloadChan and reloadDoneChan are used by the two reload loops +// running in separate goroutines to talk. reloadQueueLoop will use +// startReloadChan to signal to reloadLoop to start a reload, and +// reloadLoop will use reloadDoneChan to signal back that it's done with +// the reload. Buffered simply to not make the goroutines block each +// other. +var startReloadChan = make(chan struct{}, 1) +var reloadDoneChan = make(chan struct{}, 1) + +func reloadLoop(tick <-chan time.Time) { + <-tick + for range startReloadChan { + mainLog.Info("reload: initiating") + doReload() + mainLog.Info("reload: complete") + + mainLog.Info("Initiating coprocess reload") + doCoprocessReload() + + reloadDoneChan <- struct{}{} + <-tick + } +} + +// reloadQueue is used by reloadURLStructure to queue a reload. It's not +// buffered, as reloadQueueLoop should pick these up immediately. +var reloadQueue = make(chan func()) + +func reloadQueueLoop() { + reloading := false + var fns []func() + for { + select { + case <-reloadDoneChan: + for _, fn := range fns { + fn() + } + fns = fns[:0] + reloading = false + case fn := <-reloadQueue: + if fn != nil { + fns = append(fns, fn) + } + if !reloading { + mainLog.Info("Reload queued") + startReloadChan <- struct{}{} + reloading = true + } else { + mainLog.Info("Reload already queued") + } + } + } +} + +// reloadURLStructure will queue an API reload. The reload will +// eventually create a new muxer, reload all the app configs for an +// instance and then replace the DefaultServeMux with the new one. This +// enables a reconfiguration to take place without stopping any requests +// from being handled. +// +// done will be called when the reload is finished. Note that if a +// reload is already queued, another won't be queued, but done will +// still be called when said queued reload is finished. +func reloadURLStructure(done func()) { + reloadQueue <- done +} + +func setupLogger() { + if config.Global().UseSentry { + mainLog.Debug("Enabling Sentry support") + hook, err := logrus_sentry.NewSentryHook(config.Global().SentryCode, []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + }) + + hook.Timeout = 0 + + if err == nil { + log.Hooks.Add(hook) + rawLog.Hooks.Add(hook) + } + mainLog.Debug("Sentry hook active") + } + + if config.Global().UseSyslog { + mainLog.Debug("Enabling Syslog support") + hook, err := logrus_syslog.NewSyslogHook(config.Global().SyslogTransport, + config.Global().SyslogNetworkAddr, + syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + rawLog.Hooks.Add(hook) + } + mainLog.Debug("Syslog hook active") + } + + if config.Global().UseGraylog { + mainLog.Debug("Enabling Graylog support") + hook := graylogHook.NewGraylogHook(config.Global().GraylogNetworkAddr, + map[string]interface{}{"tyk-module": "gateway"}) + + log.Hooks.Add(hook) + rawLog.Hooks.Add(hook) + + mainLog.Debug("Graylog hook active") + } + + if config.Global().UseLogstash { + mainLog.Debug("Enabling Logstash support") + + var hook *logstashHook.Hook + var err error + var conn net.Conn + if config.Global().LogstashTransport == "udp" { + mainLog.Debug("Connecting to Logstash with udp") + hook, err = logstashHook.NewHook(config.Global().LogstashTransport, + config.Global().LogstashNetworkAddr, + appName) + } else { + mainLog.Debugf("Connecting to Logstash with %s", config.Global().LogstashTransport) + conn, err = gas.Dial(config.Global().LogstashTransport, config.Global().LogstashNetworkAddr) + if err == nil { + hook, err = logstashHook.NewHookWithConn(conn, appName) + } + } + + if err != nil { + log.Errorf("Error making connection for logstash: %v", err) + } else { + log.Hooks.Add(hook) + rawLog.Hooks.Add(hook) + mainLog.Debug("Logstash hook active") + } + } + + if config.Global().UseRedisLog { + hook := newRedisHook() + log.Hooks.Add(hook) + rawLog.Hooks.Add(hook) + + mainLog.Debug("Redis log hook active") + } +} + +func initialiseSystem() error { + if runningTests && os.Getenv("TYK_LOGLEVEL") == "" { + // `go test` without TYK_LOGLEVEL set defaults to no log + // output + log.Level = logrus.ErrorLevel + log.Out = ioutil.Discard + gorpc.SetErrorLogger(func(string, ...interface{}) {}) + stdlog.SetOutput(ioutil.Discard) + } else if *cli.DebugMode { + log.Level = logrus.DebugLevel + mainLog.Debug("Enabling debug-level output") + } + + if *cli.Conf != "" { + mainLog.Debugf("Using %s for configuration", *cli.Conf) + confPaths = []string{*cli.Conf} + } else { + mainLog.Debug("No configuration file defined, will try to use default (tyk.conf)") + } + + if !runningTests { + globalConf := config.Config{} + if err := config.Load(confPaths, &globalConf); err != nil { + return err + } + if globalConf.PIDFileLocation == "" { + globalConf.PIDFileLocation = "/var/run/tyk/tyk-gateway.pid" + } + // It's necessary to set global conf before and after calling afterConfSetup as global conf + // is being used by dependencies of the even handler init and then conf is modified again. + config.SetGlobal(globalConf) + afterConfSetup(&globalConf) + config.SetGlobal(globalConf) + } + + if os.Getenv("TYK_LOGLEVEL") == "" && !*cli.DebugMode { + level := strings.ToLower(config.Global().LogLevel) + switch level { + case "", "info": + // default, do nothing + case "error": + log.Level = logrus.ErrorLevel + case "warn": + log.Level = logrus.WarnLevel + case "debug": + log.Level = logrus.DebugLevel + default: + mainLog.Fatalf("Invalid log level %q specified in config, must be error, warn, debug or info. ", level) + } + } + + if config.Global().Storage.Type != "redis" { + mainLog.Fatal("Redis connection details not set, please ensure that the storage type is set to Redis and that the connection parameters are correct.") + } + + setupGlobals() + + if *cli.Port != "" { + portNum, err := strconv.Atoi(*cli.Port) + if err != nil { + mainLog.Error("Port specified in flags must be a number: ", err) + } else { + globalConf := config.Global() + globalConf.ListenPort = portNum + config.SetGlobal(globalConf) + } + } + + // Enable all the loggers + setupLogger() + + mainLog.Info("PIDFile location set to: ", config.Global().PIDFileLocation) + + pidfile.SetPidfilePath(config.Global().PIDFileLocation) + if err := pidfile.Write(); err != nil { + mainLog.Error("Failed to write PIDFile: ", err) + } + + getHostDetails() + setupInstrumentation() + + if config.Global().HttpServerOptions.UseLE_SSL { + go StartPeriodicStateBackup(&LE_MANAGER) + } + + return nil +} + +// afterConfSetup takes care of non-sensical config values (such as zero +// timeouts) and sets up a few globals that depend on the config. +func afterConfSetup(conf *config.Config) { + if conf.SlaveOptions.CallTimeout == 0 { + conf.SlaveOptions.CallTimeout = 30 + } + + if conf.SlaveOptions.PingTimeout == 0 { + conf.SlaveOptions.PingTimeout = 60 + } + + GlobalRPCPingTimeout = time.Second * time.Duration(conf.SlaveOptions.PingTimeout) + GlobalRPCCallTimeout = time.Second * time.Duration(conf.SlaveOptions.CallTimeout) + initGenericEventHandlers(conf) + regexp.ResetCache(time.Second*time.Duration(conf.RegexpCacheExpire), !conf.DisableRegexpCache) + + if conf.HealthCheckEndpointName == "" { + conf.HealthCheckEndpointName = "hello" + } +} + +var hostDetails struct { + Hostname string + PID int +} + +func getHostDetails() { + var err error + if hostDetails.PID, err = pidfile.Read(); err != nil { + mainLog.Error("Failed ot get host pid: ", err) + } + if hostDetails.Hostname, err = os.Hostname(); err != nil { + mainLog.Error("Failed ot get hostname: ", err) + } +} + +func getGlobalStorageHandler(keyPrefix string, hashKeys bool) storage.Handler { + if config.Global().SlaveOptions.UseRPC { + return &RPCStorageHandler{ + KeyPrefix: keyPrefix, + HashKeys: hashKeys, + UserKey: config.Global().SlaveOptions.APIKey, + Address: config.Global().SlaveOptions.ConnectionString, + } + } + return storage.RedisCluster{KeyPrefix: keyPrefix, HashKeys: hashKeys} +} func main() { - gateway.Start() + cli.Init(VERSION, confPaths) + cli.Parse() + // Stop gateway process if not running in "start" mode: + if !cli.DefaultMode { + os.Exit(0) + } + + NodeID = "solo-" + uuid.NewV4().String() + + if err := initialiseSystem(); err != nil { + mainLog.Fatalf("Error initialising system: %v", err) + } + + var controlListener net.Listener + + onFork := func() { + mainLog.Warning("PREPARING TO FORK") + + if controlListener != nil { + if err := controlListener.Close(); err != nil { + mainLog.Error("Control listen handler exit: ", err) + } + mainLog.Info("Control listen closed") + } + + if config.Global().UseDBAppConfigs { + mainLog.Info("Stopping heartbeat") + DashService.StopBeating() + mainLog.Info("Waiting to de-register") + time.Sleep(10 * time.Second) + + os.Setenv("TYK_SERVICE_NONCE", ServiceNonce) + os.Setenv("TYK_SERVICE_NODEID", NodeID) + } + } + + listener, goAgainErr := goagain.Listener(onFork) + + if controlAPIPort := config.Global().ControlAPIPort; controlAPIPort > 0 { + var err error + if controlListener, err = generateListener(controlAPIPort); err != nil { + mainLog.Fatalf("Error starting control API listener: %s", err) + } else { + mainLog.Info("Starting control API listener: ", controlListener, err, controlAPIPort) + } + } else { + mainLog.Warn("The control_api_port should be changed for production") + } + + start() + + checkup.CheckFileDescriptors() + checkup.CheckCpus() + checkup.CheckDefaultSecrets(config.Global()) + + // Wait while Redis connection pools are ready before start serving traffic + if !storage.IsConnected() { + mainLog.Fatal("Redis connection pools are not ready. Exiting...") + } + mainLog.Info("Redis connection pools are ready") + + if *cli.MemProfile { + mainLog.Debug("Memory profiling active") + var err error + if memProfFile, err = os.Create("tyk.mprof"); err != nil { + panic(err) + } + defer memProfFile.Close() + } + if *cli.CPUProfile { + mainLog.Info("Cpu profiling active") + cpuProfFile, err := os.Create("tyk.prof") + if err != nil { + panic(err) + } + pprof.StartCPUProfile(cpuProfFile) + defer pprof.StopCPUProfile() + } + if *cli.BlockProfile { + mainLog.Info("Block profiling active") + runtime.SetBlockProfileRate(1) + } + if *cli.MutexProfile { + mainLog.Info("Mutex profiling active") + runtime.SetMutexProfileFraction(1) + } + + if goAgainErr != nil { + var err error + if listener, err = generateListener(config.Global().ListenPort); err != nil { + mainLog.Fatalf("Error starting listener: %s", err) + } + + listen(listener, controlListener, goAgainErr) + } else { + listen(listener, controlListener, nil) + + // Kill the parent, now that the child has started successfully. + mainLog.Debug("KILLING PARENT PROCESS") + if err := goagain.Kill(); err != nil { + mainLog.Fatalln(err) + } + } + + // Block the main goroutine awaiting signals. + if _, err := goagain.Wait(listener); err != nil { + mainLog.Fatalln(err) + } + + // Do whatever's necessary to ensure a graceful exit + // In this case, we'll simply stop listening and wait one second. + if err := listener.Close(); err != nil { + mainLog.Error("Listen handler exit: ", err) + } + + mainLog.Info("Stop signal received.") + + // stop analytics workers + if config.Global().EnableAnalytics && analytics.Store == nil { + analytics.Stop() + } + + // if using async session writes stop workers + if config.Global().UseAsyncSessionWrite { + DefaultOrgStore.Stop() + for i := range apiSpecs { + apiSpecs[i].StopSessionManagerPool() + } + + } + + // write pprof profiles + writeProfiles() + + if config.Global().UseDBAppConfigs { + mainLog.Info("Stopping heartbeat...") + DashService.StopBeating() + time.Sleep(2 * time.Second) + DashService.DeRegister() + } + + mainLog.Info("Terminating.") + + time.Sleep(time.Second) +} + +func writeProfiles() { + if *cli.BlockProfile { + f, err := os.Create("tyk.blockprof") + if err != nil { + panic(err) + } + if err = pprof.Lookup("block").WriteTo(f, 0); err != nil { + panic(err) + } + f.Close() + } + if *cli.MutexProfile { + f, err := os.Create("tyk.mutexprof") + if err != nil { + panic(err) + } + if err = pprof.Lookup("mutex").WriteTo(f, 0); err != nil { + panic(err) + } + f.Close() + } +} + +func start() { + // Set up a default org manager so we can traverse non-live paths + if !config.Global().SupressDefaultOrgStore { + mainLog.Debug("Initialising default org store") + DefaultOrgStore.Init(getGlobalStorageHandler("orgkey.", false)) + //DefaultQuotaStore.Init(getGlobalStorageHandler(CloudHandler, "orgkey.", false)) + DefaultQuotaStore.Init(getGlobalStorageHandler("orgkey.", false)) + } + + if config.Global().ControlAPIPort == 0 { + loadAPIEndpoints(mainRouter) + } + + // Start listening for reload messages + if !config.Global().SuppressRedisSignalReload { + go startPubSubLoop() + } + + if slaveOptions := config.Global().SlaveOptions; slaveOptions.UseRPC { + mainLog.Debug("Starting RPC reload listener") + RPCListener = RPCStorageHandler{ + KeyPrefix: "rpc.listener.", + UserKey: slaveOptions.APIKey, + Address: slaveOptions.ConnectionString, + SuppressRegister: true, + } + + RPCListener.Connect() + go rpcReloadLoop(slaveOptions.RPCKey) + go RPCListener.StartRPCKeepaliveWatcher() + go RPCListener.StartRPCLoopCheck(slaveOptions.RPCKey) + } + + // 1s is the minimum amount of time between hot reloads. The + // interval counts from the start of one reload to the next. + go reloadLoop(time.Tick(time.Second)) + go reloadQueueLoop() +} + +func generateListener(listenPort int) (net.Listener, error) { + listenAddress := config.Global().ListenAddress + + targetPort := listenAddress + ":" + strconv.Itoa(listenPort) + + if httpServerOptions := config.Global().HttpServerOptions; httpServerOptions.UseSSL { + mainLog.Info("--> Using SSL (https)") + + tlsConfig := tls.Config{ + GetCertificate: dummyGetCertificate, + ServerName: httpServerOptions.ServerName, + MinVersion: httpServerOptions.MinVersion, + ClientAuth: tls.NoClientCert, + InsecureSkipVerify: httpServerOptions.SSLInsecureSkipVerify, + CipherSuites: getCipherAliases(httpServerOptions.Ciphers), + } + + tlsConfig.GetConfigForClient = getTLSConfigForClient(&tlsConfig, listenPort) + + return tls.Listen("tcp", targetPort, &tlsConfig) + } else if config.Global().HttpServerOptions.UseLE_SSL { + + mainLog.Info("--> Using SSL LE (https)") + + GetLEState(&LE_MANAGER) + + conf := tls.Config{ + GetCertificate: LE_MANAGER.GetCertificate, + } + conf.GetConfigForClient = getTLSConfigForClient(&conf, listenPort) + + return tls.Listen("tcp", targetPort, &conf) + } else { + mainLog.WithField("port", targetPort).Info("--> Standard listener (http)") + return net.Listen("tcp", targetPort) + } +} + +func dashboardServiceInit() { + if DashService == nil { + DashService = &HTTPDashboardHandler{} + DashService.Init() + } +} + +func handleDashboardRegistration() { + if !config.Global().UseDBAppConfigs { + return + } + + dashboardServiceInit() + + // connStr := buildConnStr("/register/node") + + mainLog.Info("Registering node.") + if err := DashService.Register(); err != nil { + mainLog.Fatal("Registration failed: ", err) + } + + go DashService.StartBeating() +} + +var drlOnce sync.Once + +func startDRL() { + switch { + case config.Global().ManagementNode: + return + case config.Global().EnableSentinelRateLimiter, + config.Global().EnableRedisRollingLimiter: + mainLog.Warning("The old, non-distributed rate limiter is deprecated and we no longer recommend its use.") + return + } + mainLog.Info("Initialising distributed rate limiter") + setupDRL() + startRateLimitNotifications() +} + +// mainHandler's only purpose is to allow mainRouter to be dynamically replaced +type mainHandler struct{} + +func (_ mainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + AddNewRelicInstrumentation(NewRelicApplication, mainRouter) + + // make request body to be nopCloser and re-readable before serve it through chain of middlewares + nopCloseRequestBody(r) + + mainRouter.ServeHTTP(w, r) +} + +func listen(listener, controlListener net.Listener, err error) { + + readTimeout := defReadTimeout + writeTimeout := defWriteTimeout + + targetPort := config.Global().ListenAddress + ":" + strconv.Itoa(config.Global().ListenPort) + if config.Global().HttpServerOptions.ReadTimeout > 0 { + readTimeout = time.Duration(config.Global().HttpServerOptions.ReadTimeout) * time.Second + } + + if config.Global().HttpServerOptions.WriteTimeout > 0 { + writeTimeout = time.Duration(config.Global().HttpServerOptions.WriteTimeout) * time.Second + } + + if config.Global().ControlAPIPort > 0 { + loadAPIEndpoints(controlRouter) + } + + // Error not empty if handle reload when SIGUSR2 is received + if err != nil { + // Listen on a TCP or a UNIX domain socket (TCP here). + mainLog.Info("Setting up Server") + + // handle dashboard registration and nonces if available + handleDashboardRegistration() + + // Use a custom server so we can control tves + if config.Global().HttpServerOptions.OverrideDefaults { + mainRouter.SkipClean(config.Global().HttpServerOptions.SkipURLCleaning) + + mainLog.Infof("Custom gateway started (%s)", VERSION) + + mainLog.Warning("HTTP Server Overrides detected, this could destabilise long-running http-requests") + + s := &http.Server{ + Addr: targetPort, + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + Handler: mainHandler{}, + } + + if config.Global().CloseConnections { + s.SetKeepAlivesEnabled(false) + } + + // Accept connections in a new goroutine. + go s.Serve(listener) + + if controlListener != nil { + cs := &http.Server{ + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + Handler: controlRouter, + } + go cs.Serve(controlListener) + } + } else { + mainLog.Printf("Gateway started (%s)", VERSION) + + s := &http.Server{Handler: mainHandler{}} + if config.Global().CloseConnections { + s.SetKeepAlivesEnabled(false) + } + + go s.Serve(listener) + + if controlListener != nil { + go http.Serve(controlListener, controlRouter) + } + } + } else { + // handle dashboard registration and nonces if available + nonce := os.Getenv("TYK_SERVICE_NONCE") + nodeID := os.Getenv("TYK_SERVICE_NODEID") + if nonce == "" || nodeID == "" { + mainLog.Warning("No nonce found, re-registering") + handleDashboardRegistration() + + } else { + NodeID = nodeID + ServiceNonce = nonce + mainLog.Info("State recovered") + + os.Setenv("TYK_SERVICE_NONCE", "") + os.Setenv("TYK_SERVICE_NODEID", "") + } + + if config.Global().UseDBAppConfigs { + dashboardServiceInit() + go DashService.StartBeating() + } + + if config.Global().HttpServerOptions.OverrideDefaults { + mainRouter.SkipClean(config.Global().HttpServerOptions.SkipURLCleaning) + + mainLog.Warning("HTTP Server Overrides detected, this could destabilise long-running http-requests") + s := &http.Server{ + Addr: ":" + targetPort, + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + Handler: mainHandler{}, + } + + if config.Global().CloseConnections { + s.SetKeepAlivesEnabled(false) + } + + mainLog.Info("Custom gateway started") + go s.Serve(listener) + + if controlListener != nil { + cs := &http.Server{ + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + Handler: controlRouter, + } + go cs.Serve(controlListener) + } + } else { + mainLog.Printf("Gateway resumed (%s)", VERSION) + + s := &http.Server{Handler: mainHandler{}} + if config.Global().CloseConnections { + s.SetKeepAlivesEnabled(false) + } + + go s.Serve(listener) + + if controlListener != nil { + mainLog.Info("Control API listener started: ", controlListener, controlRouter) + + go http.Serve(controlListener, controlRouter) + } + } + + mainLog.Info("Resuming on", listener.Addr()) + } + + // at this point NodeID is ready to use by DRL + drlOnce.Do(startDRL) + + address := config.Global().ListenAddress + if config.Global().ListenAddress == "" { + address = "(open interface)" + } + mainLog.Info("--> Listening on address: ", address) + mainLog.Info("--> Listening on port: ", config.Global().ListenPort) + mainLog.Info("--> PID: ", hostDetails.PID) + + mainRouter.HandleFunc("/"+config.Global().HealthCheckEndpointName, func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello Tiki") + }) + + if !rpcEmergencyMode { + doReload() + } } diff --git a/gateway/middleware.go b/middleware.go similarity index 92% rename from gateway/middleware.go rename to middleware.go index 9dfd45e52fab..7714702e983f 100644 --- a/gateway/middleware.go +++ b/middleware.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" @@ -20,7 +20,6 @@ import ( "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/request" "github.com/TykTechnologies/tyk/storage" - "github.com/TykTechnologies/tyk/trace" "github.com/TykTechnologies/tyk/user" ) @@ -40,21 +39,6 @@ type TykMiddleware interface { Name() string } -type TraceMiddleware struct { - TykMiddleware -} - -func (tr TraceMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Request, conf interface{}) (error, int) { - if trace.IsEnabled() { - span, ctx := trace.Span(r.Context(), - tr.Name(), - ) - defer span.Finish() - return tr.TykMiddleware.ProcessRequest(w, r.WithContext(ctx), conf) - } - return tr.TykMiddleware.ProcessRequest(w, r, conf) -} - func createDynamicMiddleware(name string, isPre, useSession bool, baseMid BaseMiddleware) func(http.Handler) http.Handler { dMiddleware := &DynamicMiddleware{ BaseMiddleware: baseMid, @@ -67,10 +51,7 @@ func createDynamicMiddleware(name string, isPre, useSession bool, baseMid BaseMi } // Generic middleware caller to make extension easier -func createMiddleware(actualMW TykMiddleware) func(http.Handler) http.Handler { - mw := &TraceMiddleware{ - TykMiddleware: actualMW, - } +func createMiddleware(mw TykMiddleware) func(http.Handler) http.Handler { // construct a new instance mw.Init() mw.SetName(mw.Name()) @@ -118,12 +99,8 @@ func createMiddleware(actualMW TykMiddleware) func(http.Handler) http.Handler { } err, errCode := mw.ProcessRequest(w, r, mwConf) if err != nil { - // GoPluginMiddleware are expected to send response in case of error - // but we still want to record error - _, isGoPlugin := actualMW.(*GoPluginMiddleware) - handler := ErrorHandler{*mw.Base()} - handler.HandleError(w, r, err.Error(), errCode, !isGoPlugin) + handler.HandleError(w, r, err.Error(), errCode) meta["error"] = err.Error() @@ -484,7 +461,6 @@ func (t BaseMiddleware) CheckSessionAndIdentityForValidKey(key string, r *http.R session := cachedVal.(user.SessionState) if err := t.ApplyPolicies(&session); err != nil { t.Logger().Error(err) - return session, false } return session, true } @@ -504,7 +480,6 @@ func (t BaseMiddleware) CheckSessionAndIdentityForValidKey(key string, r *http.R // Check for a policy, if there is a policy, pull it and overwrite the session values if err := t.ApplyPolicies(&session); err != nil { t.Logger().Error(err) - return session, false } t.Logger().Debug("Got key") return session, true @@ -526,7 +501,6 @@ func (t BaseMiddleware) CheckSessionAndIdentityForValidKey(key string, r *http.R // Check for a policy, if there is a policy, pull it and overwrite the session values if err := t.ApplyPolicies(&session); err != nil { t.Logger().Error(err) - return session, false } t.Logger().Debug("Lifetime is: ", session.Lifetime(t.Spec.SessionLifetime)) @@ -543,7 +517,6 @@ func (t BaseMiddleware) FireEvent(name apidef.TykEvent, meta interface{}) { type TykResponseHandler interface { Init(interface{}, *APISpec) error - Name() string HandleResponse(http.ResponseWriter, *http.Response, *http.Request, *user.SessionState) error } @@ -562,24 +535,14 @@ func responseProcessorByName(name string) TykResponseHandler { } func handleResponseChain(chain []TykResponseHandler, rw http.ResponseWriter, res *http.Response, req *http.Request, ses *user.SessionState) error { - traceIsEnabled := trace.IsEnabled() for _, rh := range chain { - if err := handleResponse(rh, rw, res, req, ses, traceIsEnabled); err != nil { + if err := rh.HandleResponse(rw, res, req, ses); err != nil { return err } } return nil } -func handleResponse(rh TykResponseHandler, rw http.ResponseWriter, res *http.Response, req *http.Request, ses *user.SessionState, shouldTrace bool) error { - if shouldTrace { - span, ctx := trace.Span(req.Context(), rh.Name()) - defer span.Finish() - req = req.WithContext(ctx) - } - return rh.HandleResponse(rw, res, req, ses) -} - func parseForm(r *http.Request) { // https://golang.org/pkg/net/http/#Request.ParseForm // ParseForm drains the request body for a request with Content-Type of diff --git a/gateway/monitor.go b/monitor.go similarity index 99% rename from gateway/monitor.go rename to monitor.go index fd5a9d81f62a..c97b72382382 100644 --- a/gateway/monitor.go +++ b/monitor.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "time" diff --git a/gateway/multi_target_proxy_handler.go b/multi_target_proxy_handler.go similarity index 99% rename from gateway/multi_target_proxy_handler.go rename to multi_target_proxy_handler.go index 4d452fe8ac7f..042a89496dc0 100644 --- a/gateway/multi_target_proxy_handler.go +++ b/multi_target_proxy_handler.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "io" diff --git a/gateway/multiauth_test.go b/multiauth_test.go similarity index 95% rename from gateway/multiauth_test.go rename to multiauth_test.go index c2d6a650687d..67ad4e41b66a 100644 --- a/gateway/multiauth_test.go +++ b/multiauth_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/base64" @@ -88,7 +88,7 @@ func getMultiAuthStandardAndBasicAuthChain(spec *APISpec) http.Handler { } func testPrepareMultiSessionBA(t testing.TB, isBench bool) (*APISpec, *http.Request) { - spec := CreateSpecTest(t, multiAuthDev) + spec := createSpecTest(t, multiAuthDev) // Create BA baSession := createMultiBasicAuthSession(isBench) @@ -117,7 +117,7 @@ func testPrepareMultiSessionBA(t testing.TB, isBench bool) (*APISpec, *http.Requ toEncode := strings.Join([]string{username, password}, ":") encodedPass := base64.StdEncoding.EncodeToString([]byte(toEncode)) - req := TestReq(t, "GET", "/", nil) + req := testReq(t, "GET", "/", nil) req.Header.Set("Authorization", fmt.Sprintf("Basic %s", encodedPass)) req.Header.Set("x-standard-auth", fmt.Sprintf("Bearer %s", customToken)) @@ -153,7 +153,7 @@ func BenchmarkMultiSession_BA_Standard_OK(b *testing.B) { } func TestMultiSession_BA_Standard_Identity(t *testing.T) { - spec := CreateSpecTest(t, multiAuthDev) + spec := createSpecTest(t, multiAuthDev) // Create BA baSession := createMultiBasicAuthSession(false) @@ -172,7 +172,7 @@ func TestMultiSession_BA_Standard_Identity(t *testing.T) { encodedPass := base64.StdEncoding.EncodeToString([]byte(to_encode)) recorder := httptest.NewRecorder() - req := TestReq(t, "GET", "/", nil) + req := testReq(t, "GET", "/", nil) req.Header.Set("Authorization", fmt.Sprintf("Basic %s", encodedPass)) req.Header.Set("x-standard-auth", fmt.Sprintf("Bearer %s", customToken)) @@ -190,7 +190,7 @@ func TestMultiSession_BA_Standard_Identity(t *testing.T) { } func TestMultiSession_BA_Standard_FAILBA(t *testing.T) { - spec := CreateSpecTest(t, multiAuthDev) + spec := createSpecTest(t, multiAuthDev) // Create BA baSession := createMultiBasicAuthSession(false) @@ -209,7 +209,7 @@ func TestMultiSession_BA_Standard_FAILBA(t *testing.T) { encodedPass := base64.StdEncoding.EncodeToString([]byte(to_encode)) recorder := httptest.NewRecorder() - req := TestReq(t, "GET", "/", nil) + req := testReq(t, "GET", "/", nil) req.Header.Set("Authorization", fmt.Sprintf("Basic %s", encodedPass)) req.Header.Set("x-standard-auth", fmt.Sprintf("Bearer %s", customToken)) @@ -222,7 +222,7 @@ func TestMultiSession_BA_Standard_FAILBA(t *testing.T) { } func TestMultiSession_BA_Standard_FAILAuth(t *testing.T) { - spec := CreateSpecTest(t, multiAuthDev) + spec := createSpecTest(t, multiAuthDev) // Create BA baSession := createMultiBasicAuthSession(false) @@ -241,7 +241,7 @@ func TestMultiSession_BA_Standard_FAILAuth(t *testing.T) { encodedPass := base64.StdEncoding.EncodeToString([]byte(to_encode)) recorder := httptest.NewRecorder() - req := TestReq(t, "GET", "/", nil) + req := testReq(t, "GET", "/", nil) req.Header.Set("Authorization", fmt.Sprintf("Basic %s", encodedPass)) req.Header.Set("x-standard-auth", fmt.Sprintf("Bearer %s", "WRONGTOKEN")) diff --git a/gateway/mw_access_rights.go b/mw_access_rights.go similarity index 99% rename from gateway/mw_access_rights.go rename to mw_access_rights.go index 5d02adcf6ccf..cabc7f5dbaaa 100644 --- a/gateway/mw_access_rights.go +++ b/mw_access_rights.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_api_rate_limit.go b/mw_api_rate_limit.go similarity index 99% rename from gateway/mw_api_rate_limit.go rename to mw_api_rate_limit.go index afa791ed532e..971a024b3c8f 100644 --- a/gateway/mw_api_rate_limit.go +++ b/mw_api_rate_limit.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_api_rate_limit_test.go b/mw_api_rate_limit_test.go similarity index 94% rename from gateway/mw_api_rate_limit_test.go rename to mw_api_rate_limit_test.go index d17116ce8de7..1486b0a883d7 100644 --- a/gateway/mw_api_rate_limit_test.go +++ b/mw_api_rate_limit_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" @@ -9,9 +9,8 @@ import ( "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/test" - uuid "github.com/satori/go.uuid" - "github.com/justinas/alice" + uuid "github.com/satori/go.uuid" "github.com/TykTechnologies/tyk/user" ) @@ -64,9 +63,9 @@ func getGlobalRLAuthKeyChain(spec *APISpec) http.Handler { } func TestRLOpen(t *testing.T) { - spec := CreateSpecTest(t, openRLDefSmall) + spec := createSpecTest(t, openRLDefSmall) - req := TestReq(t, "GET", "/rl_test/", nil) + req := testReq(t, "GET", "/rl_test/", nil) DRLManager.CurrentTokenValue = 1 DRLManager.RequestTokenValue = 1 @@ -94,9 +93,9 @@ func TestRLOpen(t *testing.T) { func requestThrottlingTest(limiter string, testLevel string) func(t *testing.T) { return func(t *testing.T) { - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() globalCfg := config.Global() @@ -125,7 +124,7 @@ func requestThrottlingTest(limiter string, testLevel string) func(t *testing.T) for _, requestThrottlingEnabled := range []bool{false, true} { - spec := BuildAndLoadAPI(func(spec *APISpec) { + spec := buildAndLoadAPI(func(spec *APISpec) { spec.Name = "test" spec.APIID = "test" spec.OrgID = "default" @@ -133,7 +132,7 @@ func requestThrottlingTest(limiter string, testLevel string) func(t *testing.T) spec.Proxy.ListenPath = "/" })[0] - policyID := CreatePolicy(func(p *user.Policy) { + policyID := createPolicy(func(p *user.Policy) { p.OrgID = "default" p.AccessRights = map[string]user.AccessDefinition{ @@ -169,7 +168,7 @@ func requestThrottlingTest(limiter string, testLevel string) func(t *testing.T) } }) - key := CreateSession(func(s *user.SessionState) { + key := createSession(func(s *user.SessionState) { s.ApplyPolicies = []string{policyID} }) @@ -209,9 +208,9 @@ func TestRequestThrottling(t *testing.T) { } func TestRLClosed(t *testing.T) { - spec := CreateSpecTest(t, closedRLDefSmall) + spec := createSpecTest(t, closedRLDefSmall) - req := TestReq(t, "GET", "/rl_closed_test/", nil) + req := testReq(t, "GET", "/rl_closed_test/", nil) session := createRLSession() customToken := uuid.NewV4().String() @@ -244,9 +243,9 @@ func TestRLClosed(t *testing.T) { } func TestRLOpenWithReload(t *testing.T) { - spec := CreateSpecTest(t, openRLDefSmall) + spec := createSpecTest(t, openRLDefSmall) - req := TestReq(t, "GET", "/rl_test/", nil) + req := testReq(t, "GET", "/rl_test/", nil) DRLManager.CurrentTokenValue = 1 DRLManager.RequestTokenValue = 1 diff --git a/gateway/mw_auth_key.go b/mw_auth_key.go similarity index 99% rename from gateway/mw_auth_key.go rename to mw_auth_key.go index 8197a9ade712..26780aaa183e 100644 --- a/gateway/mw_auth_key.go +++ b/mw_auth_key.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_auth_key_test.go b/mw_auth_key_test.go similarity index 90% rename from gateway/mw_auth_key_test.go rename to mw_auth_key_test.go index dc861157204b..b6d3be08b3c3 100644 --- a/gateway/mw_auth_key_test.go +++ b/mw_auth_key_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/hex" @@ -19,11 +19,11 @@ import ( ) func TestMurmur3CharBug(t *testing.T) { - defer ResetTestConfig() - ts := StartTest() + defer resetTestConfig() + ts := newTykTestServer() defer ts.Close() - api := BuildAPI(func(spec *APISpec) { + api := buildAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.Proxy.ListenPath = "/" })[0] @@ -37,9 +37,9 @@ func TestMurmur3CharBug(t *testing.T) { globalConf.HashKeys = false config.SetGlobal(globalConf) - LoadAPI(api) + loadAPI(api) - key := CreateSession() + key := createSession() ts.Run(t, []test.TestCase{ genTestCase("wrong", 403), @@ -54,9 +54,9 @@ func TestMurmur3CharBug(t *testing.T) { globalConf.HashKeyFunction = "" config.SetGlobal(globalConf) - LoadAPI(api) + loadAPI(api) - key := CreateSession() + key := createSession() ts.Run(t, []test.TestCase{ genTestCase("wrong", 403), @@ -72,9 +72,9 @@ func TestMurmur3CharBug(t *testing.T) { globalConf.HashKeyFunction = "murmur32" config.SetGlobal(globalConf) - LoadAPI(api) + loadAPI(api) - key := CreateSession() + key := createSession() ts.Run(t, []test.TestCase{ genTestCase("wrong", 403), @@ -90,9 +90,9 @@ func TestMurmur3CharBug(t *testing.T) { globalConf.HashKeyFunction = "murmur64" config.SetGlobal(globalConf) - LoadAPI(api) + loadAPI(api) - key := CreateSession() + key := createSession() ts.Run(t, []test.TestCase{ genTestCase("wrong", 403), @@ -104,11 +104,11 @@ func TestMurmur3CharBug(t *testing.T) { } func TestSignatureValidation(t *testing.T) { - defer ResetTestConfig() - ts := StartTest() + defer resetTestConfig() + ts := newTykTestServer() defer ts.Close() - api := BuildAPI(func(spec *APISpec) { + api := buildAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.Proxy.ListenPath = "/" spec.Auth.ValidateSignature = true @@ -120,9 +120,9 @@ func TestSignatureValidation(t *testing.T) { t.Run("Static signature", func(t *testing.T) { api.Auth.Signature.Secret = "foobar" - LoadAPI(api) + loadAPI(api) - key := CreateSession() + key := createSession() hasher := signature_validator.MasheryMd5sum{} validHash := hasher.Hash(key, "foobar", time.Now().Unix()) @@ -149,9 +149,9 @@ func TestSignatureValidation(t *testing.T) { t.Run("Dynamic signature", func(t *testing.T) { api.Auth.Signature.Secret = "$tyk_meta.signature_secret" - LoadAPI(api) + loadAPI(api) - key := CreateSession(func(s *user.SessionState) { + key := createSession(func(s *user.SessionState) { s.MetaData = map[string]interface{}{ "signature_secret": "foobar", } @@ -215,7 +215,7 @@ func getAuthKeyChain(spec *APISpec) http.Handler { } func testPrepareAuthKeySession(tb testing.TB, apiDef string, isBench bool) (string, *APISpec) { - spec := CreateSpecTest(tb, apiDef) + spec := createSpecTest(tb, apiDef) session := createAuthKeyAuthSession(isBench) customToken := "" if isBench { @@ -232,7 +232,7 @@ func TestBearerTokenAuthKeySession(t *testing.T) { customToken, spec := testPrepareAuthKeySession(t, authKeyDef, false) recorder := httptest.NewRecorder() - req := TestReq(t, "GET", "/auth_key_test/", nil) + req := testReq(t, "GET", "/auth_key_test/", nil) req.Header.Set("authorization", "Bearer "+customToken) @@ -251,7 +251,7 @@ func BenchmarkBearerTokenAuthKeySession(b *testing.B) { customToken, spec := testPrepareAuthKeySession(b, authKeyDef, true) recorder := httptest.NewRecorder() - req := TestReq(b, "GET", "/auth_key_test/", nil) + req := testReq(b, "GET", "/auth_key_test/", nil) req.Header.Set("authorization", "Bearer "+customToken) @@ -286,7 +286,7 @@ func TestMultiAuthBackwardsCompatibleSession(t *testing.T) { customToken, spec := testPrepareAuthKeySession(t, multiAuthBackwardsCompatible, false) recorder := httptest.NewRecorder() - req := TestReq(t, "GET", fmt.Sprintf("/auth_key_test/?token=%s", customToken), nil) + req := testReq(t, "GET", fmt.Sprintf("/auth_key_test/?token=%s", customToken), nil) chain := getAuthKeyChain(spec) chain.ServeHTTP(recorder, req) @@ -303,7 +303,7 @@ func BenchmarkMultiAuthBackwardsCompatibleSession(b *testing.B) { customToken, spec := testPrepareAuthKeySession(b, multiAuthBackwardsCompatible, true) recorder := httptest.NewRecorder() - req := TestReq(b, "GET", fmt.Sprintf("/auth_key_test/?token=%s", customToken), nil) + req := testReq(b, "GET", fmt.Sprintf("/auth_key_test/?token=%s", customToken), nil) chain := getAuthKeyChain(spec) @@ -336,7 +336,7 @@ const multiAuthBackwardsCompatible = `{ }` func TestMultiAuthSession(t *testing.T) { - spec := CreateSpecTest(t, multiAuthDef) + spec := createSpecTest(t, multiAuthDef) session := createAuthKeyAuthSession(false) customToken := "54321111" // AuthKey sessions are stored by {token} @@ -344,7 +344,7 @@ func TestMultiAuthSession(t *testing.T) { // Set the url param recorder := httptest.NewRecorder() - req := TestReq(t, "GET", fmt.Sprintf("/auth_key_test/?token=%s", customToken), nil) + req := testReq(t, "GET", fmt.Sprintf("/auth_key_test/?token=%s", customToken), nil) chain := getAuthKeyChain(spec) chain.ServeHTTP(recorder, req) @@ -356,7 +356,7 @@ func TestMultiAuthSession(t *testing.T) { // Set the header recorder = httptest.NewRecorder() - req = TestReq(t, "GET", "/auth_key_test/?token=", nil) + req = testReq(t, "GET", "/auth_key_test/?token=", nil) req.Header.Set("authorization", customToken) chain.ServeHTTP(recorder, req) @@ -368,7 +368,7 @@ func TestMultiAuthSession(t *testing.T) { // Set the cookie recorder = httptest.NewRecorder() - req = TestReq(t, "GET", "/auth_key_test/?token=", nil) + req = testReq(t, "GET", "/auth_key_test/?token=", nil) req.AddCookie(&http.Cookie{Name: "oreo", Value: customToken}) chain.ServeHTTP(recorder, req) @@ -380,7 +380,7 @@ func TestMultiAuthSession(t *testing.T) { // No header, param or cookie recorder = httptest.NewRecorder() - req = TestReq(t, "GET", "/auth_key_test/", nil) + req = testReq(t, "GET", "/auth_key_test/", nil) chain.ServeHTTP(recorder, req) diff --git a/gateway/mw_basic_auth.go b/mw_basic_auth.go similarity index 99% rename from gateway/mw_basic_auth.go rename to mw_basic_auth.go index ea30da24b505..c892d80ea1a7 100644 --- a/gateway/mw_basic_auth.go +++ b/mw_basic_auth.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" diff --git a/gateway/mw_basic_auth_test.go b/mw_basic_auth_test.go similarity index 96% rename from gateway/mw_basic_auth_test.go rename to mw_basic_auth_test.go index 9376fe7daeb4..d53bb32f7387 100644 --- a/gateway/mw_basic_auth_test.go +++ b/mw_basic_auth_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/base64" @@ -21,12 +21,12 @@ func genAuthHeader(username, password string) string { } func testPrepareBasicAuth(cacheDisabled bool) *user.SessionState { - session := CreateStandardSession() + session := createStandardSession() session.BasicAuthData.Password = "password" session.AccessRights = map[string]user.AccessDefinition{"test": {APIID: "test", Versions: []string{"v1"}}} session.OrgID = "default" - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseBasicAuth = true spec.BasicAuth.DisableCaching = cacheDisabled spec.UseKeylessAccess = false @@ -38,7 +38,7 @@ func testPrepareBasicAuth(cacheDisabled bool) *user.SessionState { } func TestBasicAuth(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() session := testPrepareBasicAuth(false) @@ -60,15 +60,15 @@ func TestBasicAuth(t *testing.T) { } func TestBasicAuthFromBody(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - session := CreateStandardSession() + session := createStandardSession() session.BasicAuthData.Password = "password" session.AccessRights = map[string]user.AccessDefinition{"test": {APIID: "test", Versions: []string{"v1"}}} session.OrgID = "default" - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseBasicAuth = true spec.BasicAuth.ExtractFromBody = true spec.BasicAuth.BodyUserRegexp = `(.*)` @@ -103,9 +103,9 @@ func TestBasicAuthLegacyWithHashFunc(t *testing.T) { // settings to create BA session with legacy key format globalConf.HashKeyFunction = "" config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // create session with legacy key format @@ -134,9 +134,9 @@ func TestBasicAuthCachedUserCollision(t *testing.T) { globalConf.HashKeys = true globalConf.HashKeyFunction = "murmur64" config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() session := testPrepareBasicAuth(false) @@ -168,7 +168,7 @@ func TestBasicAuthCachedUserCollision(t *testing.T) { } func TestBasicAuthCachedPasswordCollision(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() for _, useCache := range []bool{true, false} { @@ -206,7 +206,7 @@ func TestBasicAuthCachedPasswordCollision(t *testing.T) { func BenchmarkBasicAuth(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() session := testPrepareBasicAuth(false) @@ -239,7 +239,7 @@ func BenchmarkBasicAuth(b *testing.B) { func BenchmarkBasicAuth_CacheEnabled(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() session := testPrepareBasicAuth(false) @@ -265,7 +265,7 @@ func BenchmarkBasicAuth_CacheEnabled(b *testing.B) { func BenchmarkBasicAuth_CacheDisabled(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() session := testPrepareBasicAuth(true) diff --git a/gateway/mw_certificate_check.go b/mw_certificate_check.go similarity index 98% rename from gateway/mw_certificate_check.go rename to mw_certificate_check.go index 28b2ba31041c..359542a254bd 100644 --- a/gateway/mw_certificate_check.go +++ b/mw_certificate_check.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" diff --git a/gateway/mw_context_vars.go b/mw_context_vars.go similarity index 96% rename from gateway/mw_context_vars.go rename to mw_context_vars.go index 7e17dc1fb143..265760b324b9 100644 --- a/gateway/mw_context_vars.go +++ b/mw_context_vars.go @@ -1,10 +1,10 @@ -package gateway +package main import ( "net/http" "strings" - uuid "github.com/satori/go.uuid" + "github.com/satori/go.uuid" "github.com/TykTechnologies/tyk/request" ) diff --git a/gateway/mw_context_vars_test.go b/mw_context_vars_test.go similarity index 98% rename from gateway/mw_context_vars_test.go rename to mw_context_vars_test.go index 335f60b83409..dd6871dfecc3 100644 --- a/gateway/mw_context_vars_test.go +++ b/mw_context_vars_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "io" @@ -14,7 +14,7 @@ import ( ) func testPrepareContextVarsMiddleware() { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.EnableContextVars = true spec.VersionData.Versions = map[string]apidef.VersionInfo{ @@ -32,7 +32,7 @@ func testPrepareContextVarsMiddleware() { } func TestContextVarsMiddleware(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() testPrepareContextVarsMiddleware() @@ -48,7 +48,7 @@ func TestContextVarsMiddleware(t *testing.T) { func BenchmarkContextVarsMiddleware(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() testPrepareContextVarsMiddleware() diff --git a/gateway/mw_example_test.go b/mw_example_test.go similarity index 98% rename from gateway/mw_example_test.go rename to mw_example_test.go index 4644455ed37f..5b07799d41d8 100644 --- a/gateway/mw_example_test.go +++ b/mw_example_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_granular_access.go b/mw_granular_access.go similarity index 98% rename from gateway/mw_granular_access.go rename to mw_granular_access.go index 37ff441a328e..4fd53b093fb0 100644 --- a/gateway/mw_granular_access.go +++ b/mw_granular_access.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_hmac.go b/mw_hmac.go similarity index 99% rename from gateway/mw_hmac.go rename to mw_hmac.go index d175d0ae9488..d7b10f6d2d9c 100644 --- a/gateway/mw_hmac.go +++ b/mw_hmac.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "crypto/hmac" diff --git a/gateway/mw_hmac_test.go b/mw_hmac_test.go similarity index 97% rename from gateway/mw_hmac_test.go rename to mw_hmac_test.go index b12cbf416de6..01eed8b6ee5b 100644 --- a/gateway/mw_hmac_test.go +++ b/mw_hmac_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "crypto/hmac" @@ -101,7 +101,7 @@ func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { } func testPrepareHMACAuthSessionPass(tb testing.TB, hashFn func() hash.Hash, eventWG *sync.WaitGroup, withHeader bool, isBench bool) (string, *APISpec, *http.Request, string) { - spec := CreateSpecTest(tb, hmacAuthDef) + spec := createSpecTest(tb, hmacAuthDef) session := createHMACAuthSession() // Should not receive an AuthFailure event @@ -121,7 +121,7 @@ func testPrepareHMACAuthSessionPass(tb testing.TB, hashFn func() hash.Hash, even spec.SessionManager.UpdateSession(sessionKey, session, 60, false) - req := TestReq(tb, "GET", "/", nil) + req := testReq(tb, "GET", "/", nil) refDate := "Mon, 02 Jan 2006 15:04:05 MST" @@ -219,7 +219,7 @@ func BenchmarkHMACAuthSessionPass(b *testing.B) { } func TestHMACAuthSessionAuxDateHeader(t *testing.T) { - spec := CreateSpecTest(t, hmacAuthDef) + spec := createSpecTest(t, hmacAuthDef) session := createHMACAuthSession() // Should not receive an AuthFailure event @@ -236,7 +236,7 @@ func TestHMACAuthSessionAuxDateHeader(t *testing.T) { spec.SessionManager.UpdateSession("9876", session, 60, false) recorder := httptest.NewRecorder() - req := TestReq(t, "GET", "/", nil) + req := testReq(t, "GET", "/", nil) refDate := "Mon, 02 Jan 2006 15:04:05 MST" @@ -271,7 +271,7 @@ func TestHMACAuthSessionAuxDateHeader(t *testing.T) { } func TestHMACAuthSessionFailureDateExpired(t *testing.T) { - spec := CreateSpecTest(t, hmacAuthDef) + spec := createSpecTest(t, hmacAuthDef) session := createHMACAuthSession() // Should receive an AuthFailure event @@ -288,7 +288,7 @@ func TestHMACAuthSessionFailureDateExpired(t *testing.T) { spec.SessionManager.UpdateSession("9876", session, 60, false) recorder := httptest.NewRecorder() - req := TestReq(t, "GET", "/", nil) + req := testReq(t, "GET", "/", nil) refDate := "Mon, 02 Jan 2006 15:04:05 MST" @@ -323,7 +323,7 @@ func TestHMACAuthSessionFailureDateExpired(t *testing.T) { } func TestHMACAuthSessionKeyMissing(t *testing.T) { - spec := CreateSpecTest(t, hmacAuthDef) + spec := createSpecTest(t, hmacAuthDef) session := createHMACAuthSession() // Should receive an AuthFailure event @@ -340,7 +340,7 @@ func TestHMACAuthSessionKeyMissing(t *testing.T) { spec.SessionManager.UpdateSession("9876", session, 60, false) recorder := httptest.NewRecorder() - req := TestReq(t, "GET", "/", nil) + req := testReq(t, "GET", "/", nil) refDate := "Mon, 02 Jan 2006 15:04:05 MST" @@ -375,7 +375,7 @@ func TestHMACAuthSessionKeyMissing(t *testing.T) { } func TestHMACAuthSessionMalformedHeader(t *testing.T) { - spec := CreateSpecTest(t, hmacAuthDef) + spec := createSpecTest(t, hmacAuthDef) session := createHMACAuthSession() // Should receive an AuthFailure event @@ -392,7 +392,7 @@ func TestHMACAuthSessionMalformedHeader(t *testing.T) { spec.SessionManager.UpdateSession("9876", session, 60, false) recorder := httptest.NewRecorder() - req := TestReq(t, "GET", "/", nil) + req := testReq(t, "GET", "/", nil) refDate := "Mon, 02 Jan 2006 15:04:05 MST" @@ -485,7 +485,7 @@ func replaceUpperCase(originalSignature string, lowercaseList []string) string { } func TestHMACAuthSessionPassWithHeaderFieldLowerCase(t *testing.T) { - spec := CreateSpecTest(t, hmacAuthDef) + spec := createSpecTest(t, hmacAuthDef) session := createHMACAuthSession() // Should not receive an AuthFailure event @@ -502,7 +502,7 @@ func TestHMACAuthSessionPassWithHeaderFieldLowerCase(t *testing.T) { spec.SessionManager.UpdateSession("9876", session, 60, false) recorder := httptest.NewRecorder() - req := TestReq(t, "GET", "/", nil) + req := testReq(t, "GET", "/", nil) refDate := "Mon, 02 Jan 2006 15:04:05 MST" diff --git a/gateway/mw_ip_blacklist.go b/mw_ip_blacklist.go similarity index 99% rename from gateway/mw_ip_blacklist.go rename to mw_ip_blacklist.go index 2aa3cf235906..7bb5db897d6f 100644 --- a/gateway/mw_ip_blacklist.go +++ b/mw_ip_blacklist.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_ip_blacklist_test.go b/mw_ip_blacklist_test.go similarity index 93% rename from gateway/mw_ip_blacklist_test.go rename to mw_ip_blacklist_test.go index 324d1d72b9fd..d3429312ec58 100644 --- a/gateway/mw_ip_blacklist_test.go +++ b/mw_ip_blacklist_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" @@ -18,7 +18,7 @@ var testBlackListIPData = []struct { } func testPrepareIPBlacklistMiddleware() *APISpec { - return BuildAPI(func(spec *APISpec) { + return buildAPI(func(spec *APISpec) { spec.EnableIpBlacklisting = true spec.BlacklistedIPs = []string{"127.0.0.1", "127.0.0.1/24"} })[0] @@ -29,7 +29,7 @@ func TestIPBlacklistMiddleware(t *testing.T) { for ti, tc := range testBlackListIPData { rec := httptest.NewRecorder() - req := TestReq(t, "GET", "/", nil) + req := testReq(t, "GET", "/", nil) req.RemoteAddr = tc.remote if tc.forwarded != "" { req.Header.Set("X-Forwarded-For", tc.forwarded) @@ -57,7 +57,7 @@ func BenchmarkIPBlacklistMiddleware(b *testing.B) { rec := httptest.NewRecorder() for i := 0; i < b.N; i++ { for ti, tc := range testBlackListIPData { - req := TestReq(b, "GET", "/", nil) + req := testReq(b, "GET", "/", nil) req.RemoteAddr = tc.remote if tc.forwarded != "" { req.Header.Set("X-Forwarded-For", tc.forwarded) diff --git a/gateway/mw_ip_whitelist.go b/mw_ip_whitelist.go similarity index 98% rename from gateway/mw_ip_whitelist.go rename to mw_ip_whitelist.go index 693f6b94f339..7dd5257362e3 100644 --- a/gateway/mw_ip_whitelist.go +++ b/mw_ip_whitelist.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_ip_whitelist_test.go b/mw_ip_whitelist_test.go similarity index 92% rename from gateway/mw_ip_whitelist_test.go rename to mw_ip_whitelist_test.go index 943dee9b76f8..0b414370c5d2 100644 --- a/gateway/mw_ip_whitelist_test.go +++ b/mw_ip_whitelist_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" @@ -18,7 +18,7 @@ var testWhiteListIPData = []struct { } func testPrepareIPMiddlewarePass() *APISpec { - return BuildAPI(func(spec *APISpec) { + return buildAPI(func(spec *APISpec) { spec.EnableIpWhiteListing = true spec.AllowedIPs = []string{"127.0.0.1", "127.0.0.1/24"} })[0] @@ -29,7 +29,7 @@ func TestIPMiddlewarePass(t *testing.T) { for ti, tc := range testWhiteListIPData { rec := httptest.NewRecorder() - req := TestReq(t, "GET", "/", nil) + req := testReq(t, "GET", "/", nil) req.RemoteAddr = tc.remote if tc.forwarded != "" { req.Header.Set("X-Forwarded-For", tc.forwarded) @@ -56,7 +56,7 @@ func BenchmarkIPMiddlewarePass(b *testing.B) { rec := httptest.NewRecorder() for i := 0; i < b.N; i++ { for ti, tc := range testWhiteListIPData { - req := TestReq(b, "GET", "/", nil) + req := testReq(b, "GET", "/", nil) req.RemoteAddr = tc.remote if tc.forwarded != "" { req.Header.Set("X-Forwarded-For", tc.forwarded) diff --git a/gateway/mw_js_plugin.go b/mw_js_plugin.go similarity index 98% rename from gateway/mw_js_plugin.go rename to mw_js_plugin.go index 3832024aa0da..5eafdb17905f 100644 --- a/gateway/mw_js_plugin.go +++ b/mw_js_plugin.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" @@ -91,13 +91,12 @@ func (d *DynamicMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Reques logger := d.Logger() // Create the proxy object + defer r.Body.Close() originalBody, err := ioutil.ReadAll(r.Body) if err != nil { logger.WithError(err).Error("Failed to read request body") return nil, http.StatusOK } - defer r.Body.Close() - headers := r.Header host := r.Host if host == "" && r.URL != nil { @@ -110,24 +109,20 @@ func (d *DynamicMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Reques } headers.Set("Host", host) } - scheme := "http" - if r.TLS != nil { - scheme = "https" - } requestData := MiniRequestObject{ Headers: headers, SetHeaders: map[string]string{}, DeleteHeaders: []string{}, Body: originalBody, - URL: r.URL.String(), + URL: r.URL.Path, Params: r.URL.Query(), AddParams: map[string]string{}, ExtendedParams: map[string][]string{}, DeleteParams: []string{}, Method: r.Method, RequestURI: r.RequestURI, - Scheme: scheme, + Scheme: r.URL.Scheme, } requestAsJson, err := json.Marshal(requestData) @@ -208,10 +203,7 @@ func (d *DynamicMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Reques r.Body = ioutil.NopCloser(bytes.NewReader(newRequestData.Request.Body)) } - r.URL, err = url.ParseRequestURI(newRequestData.Request.URL) - if err != nil { - return nil, http.StatusOK - } + r.URL.Path = newRequestData.Request.URL // Delete and set headers for _, dh := range newRequestData.Request.DeleteHeaders { @@ -240,7 +232,7 @@ func (d *DynamicMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Reques r.URL.RawQuery = values.Encode() - // Save the session data (if modified) + // Save the sesison data (if modified) if !d.Pre && d.UseSession { newMeta := mapStrsToIfaces(newRequestData.SessionMeta) if !reflect.DeepEqual(session.MetaData, newMeta) { @@ -471,7 +463,8 @@ func (j *JSVM) LoadTykJSApi() { data.Set(k, v) } - u, _ := url.ParseRequestURI(domain + hro.Resource) + u, _ := url.ParseRequestURI(domain) + u.Path = hro.Resource urlStr := u.String() // "https://api.com/user/" var d string diff --git a/gateway/mw_js_plugin_test.go b/mw_js_plugin_test.go similarity index 86% rename from gateway/mw_js_plugin_test.go rename to mw_js_plugin_test.go index 01281aa78901..d6da9cb6ce71 100644 --- a/gateway/mw_js_plugin_test.go +++ b/mw_js_plugin_test.go @@ -1,4 +1,6 @@ -package gateway +// +build coprocess + +package main import ( "bytes" @@ -170,7 +172,7 @@ testJSVMData.NewProcessRequest(function(request, session, spec) { } dynMid.Spec.JSVM = jsvm - r := TestReq(t, "GET", "/v1/test-data", nil) + r := testReq(t, "GET", "/v1/test-data", nil) dynMid.ProcessRequest(nil, r, nil) if want, got := "bar", r.Header.Get("data-foo"); want != got { t.Fatalf("wanted header to be %q, got %q", want, got) @@ -207,7 +209,7 @@ testJSVMData.NewProcessRequest(function(request, session, config) { dynMid.Spec.JSVM = jsvm rec := httptest.NewRecorder() - r := TestReq(t, "GET", "/v1/test-data", nil) + r := testReq(t, "GET", "/v1/test-data", nil) dynMid.ProcessRequest(rec, r, nil) wantBody := "Foobarbaz" @@ -252,7 +254,7 @@ testJSVMData.NewProcessRequest(function(request, session, config) { } dynMid.Spec.JSVM = jsvm - r := TestReq(t, "GET", "/v1/test-data", nil) + r := testReq(t, "GET", "/v1/test-data", nil) err, code := dynMid.ProcessRequest(nil, r, nil) if want := 401; code != 401 { @@ -301,7 +303,7 @@ testJSVMCore.NewProcessRequest(function(request, session, config) { } dynMid.Spec.JSVM = jsvm - r := TestReq(t, "GET", "/foo", nil) + r := testReq(t, "GET", "/foo", nil) dynMid.ProcessRequest(nil, r, nil) if want, got := "globalValue", r.Header.Get("global"); want != got { @@ -318,6 +320,7 @@ func TestJSVMRequestScheme(t *testing.T) { Pre: true, } req := httptest.NewRequest("GET", "/foo", nil) + req.URL.Scheme = "http" jsvm := JSVM{} jsvm.Init(nil, logrus.NewEntry(log)) @@ -349,10 +352,10 @@ leakMid.NewProcessRequest(function(request, session) { } func TestTykMakeHTTPRequest(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - bundle := RegisterBundle("jsvm_make_http_request", map[string]string{ + bundle := registerBundle("jsvm_make_http_request", map[string]string{ "manifest.json": ` { "file_list": [], @@ -373,25 +376,23 @@ func TestTykMakeHTTPRequest(t *testing.T) { "Method": "GET", "Headers": {"Accept": "application/json"}, "Domain": spec.config_data.base_url, - "Resource": "/api/get?param1=dummy" + "Resource": "/api/get" } var resp = TykMakeHttpRequest(JSON.stringify(newRequest)); - var usableResponse = JSON.parse(resp); + var useableResponse = JSON.parse(resp); - if(usableResponse.Code > 400) { - request.ReturnOverrides.ResponseCode = usableResponse.code + if(useableResponse.Code > 400) { + request.ReturnOverrides.ResponseCode = useableResponse.code request.ReturnOverrides.ResponseError = "error" } - request.Body = usableResponse.Body - return testTykMakeHTTPRequest.ReturnData(request, {}) }); `}) t.Run("Existing endpoint", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/sample" spec.ConfigData = map[string]interface{}{ "base_url": ts.URL, @@ -405,7 +406,7 @@ func TestTykMakeHTTPRequest(t *testing.T) { }) t.Run("Nonexistent endpoint", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/sample" spec.ConfigData = map[string]interface{}{ "base_url": ts.URL, @@ -415,49 +416,6 @@ func TestTykMakeHTTPRequest(t *testing.T) { ts.Run(t, test.TestCase{Path: "/sample", Code: 404}) }) - - t.Run("Endpoint with query", func(t *testing.T) { - BuildAndLoadAPI(func(spec *APISpec) { - spec.Proxy.ListenPath = "/sample" - spec.ConfigData = map[string]interface{}{ - "base_url": ts.URL, - } - spec.CustomMiddlewareBundle = bundle - }, func(spec *APISpec) { - spec.Proxy.ListenPath = "/api" - }) - - ts.Run(t, test.TestCase{Path: "/sample", BodyMatch: "/api/get?param1=dummy", Code: 200}) - }) - - t.Run("Endpoint with skip cleaning", func(t *testing.T) { - ts.Close() - globalConf := config.Global() - globalConf.HttpServerOptions.SkipURLCleaning = true - globalConf.HttpServerOptions.OverrideDefaults = true - config.SetGlobal(globalConf) - - prevSkipClean := defaultTestConfig.HttpServerOptions.OverrideDefaults && - defaultTestConfig.HttpServerOptions.SkipURLCleaning - testServerRouter.SkipClean(true) - defer testServerRouter.SkipClean(prevSkipClean) - - ts := StartTest() - defer ts.Close() - defer ResetTestConfig() - - BuildAndLoadAPI(func(spec *APISpec) { - spec.Proxy.ListenPath = "/sample" - spec.ConfigData = map[string]interface{}{ - "base_url": ts.URL, - } - spec.CustomMiddlewareBundle = bundle - }, func(spec *APISpec) { - spec.Proxy.ListenPath = "/api" - }) - - ts.Run(t, test.TestCase{Path: "/sample/99999-XXXX+%2F%2F+dog+9+fff%C3%A9o+party", BodyMatch: "URI\":\"/sample/99999-XXXX+%2F%2F+dog+9+fff%C3%A9o+party", Code: 200}) - }) } func TestJSVMBase64(t *testing.T) { diff --git a/gateway/mw_jwt.go b/mw_jwt.go similarity index 90% rename from gateway/mw_jwt.go rename to mw_jwt.go index 7bb9349c1959..0d51bd49559d 100644 --- a/gateway/mw_jwt.go +++ b/mw_jwt.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "crypto/md5" @@ -10,7 +10,7 @@ import ( "strings" "time" - jwt "github.com/dgrijalva/jwt-go" + "github.com/dgrijalva/jwt-go" cache "github.com/pmylund/go-cache" "github.com/TykTechnologies/tyk/apidef" @@ -250,33 +250,6 @@ func (k *JWTMiddleware) getUserIdFromClaim(claims jwt.MapClaims) (string, error) return "", errors.New(message) } -func getScopeFromClaim(claims jwt.MapClaims, scopeClaimName string) []string { - // get claim with scopes and turn it into slice of strings - if scope, found := claims[scopeClaimName].(string); found { - return strings.Split(scope, " ") // by standard is space separated list of values - } - - // claim with scopes is optional so return nothing if it is not present - return nil -} - -func mapScopeToPolicies(mapping map[string]string, scope []string) []string { - polIDs := []string{} - - // add all policies matched from scope-policy mapping - policiesToApply := map[string]bool{} - for _, scopeItem := range scope { - if policyID, ok := mapping[scopeItem]; ok { - policiesToApply[policyID] = true - } - } - for id := range policiesToApply { - polIDs = append(polIDs, id) - } - - return polIDs -} - // processCentralisedJWT Will check a JWT token centrally against the secret stored in the API Definition. func (k *JWTMiddleware) processCentralisedJWT(r *http.Request, token *jwt.Token) (error, int) { k.Logger().Debug("JWT authority is centralised") @@ -340,33 +313,6 @@ func (k *JWTMiddleware) processCentralisedJWT(r *http.Request, token *jwt.Token) return errors.New("failed to create key: " + err.Error()), http.StatusInternalServerError } - // apply policies from scope if scope-to-policy mapping is specified for this API - if len(k.Spec.JWTScopeToPolicyMapping) != 0 { - scopeClaimName := k.Spec.JWTScopeClaimName - if scopeClaimName == "" { - scopeClaimName = "scope" - } - - if scope := getScopeFromClaim(claims, scopeClaimName); scope != nil { - polIDs := []string{ - basePolicyID, // add base policy as a first one - } - - // add all policies matched from scope-policy mapping - mappedPolIDs := mapScopeToPolicies(k.Spec.JWTScopeToPolicyMapping, scope) - - polIDs = append(polIDs, mappedPolIDs...) - session.SetPolicies(polIDs...) - - // multiple policies assigned to a key, check if it is applicable - if err := k.ApplyPolicies(&session); err != nil { - k.reportLoginFailure(baseFieldData, r) - k.Logger().WithError(err).Error("Could not several policies from scope-claim mapping to JWT to session") - return errors.New("key not authorized: could not apply several policies"), http.StatusForbidden - } - } - } - if err != nil { k.reportLoginFailure(baseFieldData, r) k.Logger().Error("Could not find a valid policy to apply to this token!") @@ -697,10 +643,7 @@ func generateSessionFromPolicy(policyID, orgID string, enforceOrg bool) (user.Se session.ThrottleRetryLimit = policy.ThrottleRetryLimit session.QuotaMax = policy.QuotaMax session.QuotaRenewalRate = policy.QuotaRenewalRate - session.AccessRights = make(map[string]user.AccessDefinition) - for apiID, access := range policy.AccessRights { - session.AccessRights[apiID] = access - } + session.AccessRights = policy.AccessRights session.HMACEnabled = policy.HMACEnabled session.IsInactive = policy.IsInactive session.Tags = policy.Tags diff --git a/gateway/mw_jwt_test.go b/mw_jwt_test.go similarity index 82% rename from gateway/mw_jwt_test.go rename to mw_jwt_test.go index 32f81950d803..80d06f8d15fa 100644 --- a/gateway/mw_jwt_test.go +++ b/mw_jwt_test.go @@ -1,22 +1,54 @@ -package gateway +package main import ( "crypto/md5" "encoding/base64" - "encoding/json" "fmt" "net/http" "reflect" "testing" "time" - jwt "github.com/dgrijalva/jwt-go" + "github.com/dgrijalva/jwt-go" "github.com/lonelycode/go-uuid/uuid" "github.com/TykTechnologies/tyk/test" "github.com/TykTechnologies/tyk/user" ) +const jwtSecret = "9879879878787878" + +// openssl genrsa -out app.rsa +const jwtRSAPrivKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAyqZ4rwKF8qCExS7kpY4cnJa/37FMkJNkalZ3OuslLB0oRL8T +4c94kdF4aeNzSFkSe2n99IBI6Ssl79vbfMZb+t06L0Q94k+/P37x7+/RJZiff4y1 +VGjrnrnMI2iu9l4iBBRYzNmG6eblroEMMWlgk5tysHgxB59CSNIcD9gqk1hx4n/F +gOmvKsfQgWHNlPSDTRcWGWGhB2/XgNVYG2pOlQxAPqLhBHeqGTXBbPfGF9cHzixp +sPr6GtbzPwhsQ/8bPxoJ7hdfn+rzztks3d6+HWURcyNTLRe0mjXjjee9Z6+gZ+H+ +fS4pnP9tqT7IgU6ePUWTpjoiPtLexgsAa/ctjQIDAQABAoIBAECWvnBJRZgHQUn3 +oDiECup9wbnyMI0D7UVXObk1qSteP69pl1SpY6xWLyLQs7WjbhiXt7FuEc7/SaAh +Wttx/W7/g8P85Bx1fmcmdsYakXaCJpPorQKyTibQ4ReIDfvIFN9n/MWNr0ptpVbx +GonFJFrneK52IGplgCLllLwYEbnULYcJc6E25Ro8U2gQjF2r43PDa07YiDrmB/GV +QQW4HTo+CA9rdK0bP8GpXgc0wpmBhx/t/YdnDg6qhzyUMk9As7JrAzYPjHO0cRun +vhA/aG/mdMmRumY75nj7wB5U5DgstsN2ER75Pjr1xe1knftIyNm15AShCPfLaLGo +dA2IpwECgYEA5E8h6ssa7QroCGwp/N0wSJW41hFYGygbOEg6yPWTJkqmMZVduD8X +/KFqJK4LcIbFQuR28+hWJpHm/RF1AMRhbbWkAj6h02gv5izFwDiFKev5paky4Evg +G8WfUOmSZ1D+fVxwaoG0OaRZpCovUTxYig3xrI659DMeKqpQ7e8l9ekCgYEA4zql +l4P4Dn0ydr+TI/s4NHIQHkaLQAVk3OWwyKowijXd8LCtuZRA1NKSpqQ4ZXi0B17o +9zzF5jEUjws3qWv4PKWdxJu3y+h/etsg7wxUeNizbY2ooUGeMbk0tWxJihbgaI7E +XxLIT50F3Ky4EJ2cUL9GmJ+gLCw0KIaVbkiyYAUCgYEA0WyVHB76r/2VIkS1rzHm +HG7ageKfAyoi7dmzsqsxM6q+EDWHJn8Zra8TAlp0O+AkClwvkUTJ4c9sJy9gODfr +dwtrSnPRVW74oRbovo4Z+H5xHbi65mwzQsZggYP/u63cA3pL1Cbt/wH3CFN52/aS +8PAhg7vYb1yEi3Z3jgoUtCECgYEAhSPX4u9waQzyhKG7lVmdlR1AVH0BGoIOl1/+ +NZWC23i0klLzd8lmM00uoHWYldwjoC38UuFJE5eudCIeeybITMC9sHWNO+z+xP2g +TnDrDePrPkXCiLnp9ziNqb/JVyAQXTNJ3Gsk84EN7j9Fmna/IJDyzHq7XyaHaTdy +VyxBWAECgYEA4jYS07bPx5UMhKiMJDqUmDfLNFD97XwPoJIkOdn6ezqeOSmlmo7t +jxHLbCmsDOAsCU/0BlLXg9wMU7n5QKSlfTVGok/PU0rq2FUXQwyKGnellrqODwFQ +YGivtXBGXk1hlVYlje1RB+W6RQuDAegI5h8vl8pYJS9JQH0wjatsDaE= +-----END RSA PRIVATE KEY----- +` + // openssl rsa -in app.rsa -pubout > app.rsa.pub const jwtRSAPubKey = ` -----BEGIN PUBLIC KEY----- @@ -97,7 +129,7 @@ func prepareGenericJWTSession(testName string, method string, claimName string, case RSASign: sessionFunc = createJWTSessionWithRSA - jwtToken = CreateJWKToken(func(t *jwt.Token) { + jwtToken = createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["exp"] = time.Now().Add(time.Hour * 72).Unix() @@ -110,7 +142,7 @@ func prepareGenericJWTSession(testName string, method string, claimName string, }) } - spec := BuildAndLoadAPI(func(spec *APISpec) { + spec := buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.JWTSigningMethod = method spec.EnableJWT = true @@ -128,12 +160,12 @@ func prepareGenericJWTSession(testName string, method string, claimName string, } func TestJWTSessionHMAC(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //If we skip the check then the Id will be taken from SUB and the call will succeed _, jwtToken := prepareGenericJWTSession(t.Name(), HMACSign, KID, false) - defer ResetTestConfig() + defer resetTestConfig() authHeaders := map[string]string{"authorization": jwtToken} t.Run("Request with valid JWT signed with HMAC", func(t *testing.T) { @@ -146,12 +178,12 @@ func TestJWTSessionHMAC(t *testing.T) { func BenchmarkJWTSessionHMAC(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //If we skip the check then the Id will be taken from SUB and the call will succeed _, jwtToken := prepareGenericJWTSession(b.Name(), HMACSign, KID, false) - defer ResetTestConfig() + defer resetTestConfig() authHeaders := map[string]string{"authorization": jwtToken} for i := 0; i < b.N; i++ { @@ -163,7 +195,7 @@ func BenchmarkJWTSessionHMAC(b *testing.B) { func TestJWTHMACIdInSubClaim(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //Same as above @@ -190,7 +222,7 @@ func TestJWTHMACIdInSubClaim(t *testing.T) { // Case where the gw always check the 'kid' claim first but if this JWTSkipCheckKidAsId is set on the api level, // then it'll work _, jwtToken = prepareGenericJWTSession(t.Name(), HMACSign, SUB, true) - defer ResetTestConfig() + defer resetTestConfig() authHeaders = map[string]string{"authorization": jwtToken} t.Run("Request with valid JWT/HMAC/Id in SuB/Global-dont-skip-kid/Api-skip-kid", func(t *testing.T) { ts.Run(t, test.TestCase{ @@ -200,7 +232,7 @@ func TestJWTHMACIdInSubClaim(t *testing.T) { } func TestJWTRSAIdInSubClaim(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() _, jwtToken := prepareGenericJWTSession(t.Name(), RSASign, SUB, true) @@ -231,7 +263,7 @@ func TestJWTRSAIdInSubClaim(t *testing.T) { } func TestJWTSessionRSA(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //default values, keep backward compatibility @@ -247,7 +279,7 @@ func TestJWTSessionRSA(t *testing.T) { func BenchmarkJWTSessionRSA(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //default values, keep backward compatibility @@ -262,7 +294,7 @@ func BenchmarkJWTSessionRSA(b *testing.B) { } func TestJWTSessionFailRSA_EmptyJWT(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //default values, same as before (keeps backward compatibility) @@ -277,7 +309,7 @@ func TestJWTSessionFailRSA_EmptyJWT(t *testing.T) { } func TestJWTSessionFailRSA_NoAuthHeader(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //default values, same as before (keeps backward compatibility) @@ -292,7 +324,7 @@ func TestJWTSessionFailRSA_NoAuthHeader(t *testing.T) { } func TestJWTSessionFailRSA_MalformedJWT(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //default values, same as before (keeps backward compatibility) @@ -309,7 +341,7 @@ func TestJWTSessionFailRSA_MalformedJWT(t *testing.T) { } func TestJWTSessionFailRSA_MalformedJWT_NOTRACK(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //default values, same as before (keeps backward compatibility) @@ -327,7 +359,7 @@ func TestJWTSessionFailRSA_MalformedJWT_NOTRACK(t *testing.T) { } func TestJWTSessionFailRSA_WrongJWT(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //default values, same as before (keeps backward compatibility) @@ -344,7 +376,7 @@ func TestJWTSessionFailRSA_WrongJWT(t *testing.T) { } func TestJWTSessionRSABearer(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //default values, same as before (keeps backward compatibility) @@ -361,7 +393,7 @@ func TestJWTSessionRSABearer(t *testing.T) { func BenchmarkJWTSessionRSABearer(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //default values, same as before (keeps backward compatibility) @@ -376,7 +408,7 @@ func BenchmarkJWTSessionRSABearer(b *testing.B) { } func TestJWTSessionRSABearerInvalid(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //default values, same as before (keeps backward compatibility) @@ -393,7 +425,7 @@ func TestJWTSessionRSABearerInvalid(t *testing.T) { } func TestJWTSessionRSABearerInvalidTwoBears(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //default values, same as before (keeps backward compatibility) @@ -418,7 +450,7 @@ func TestJWTSessionRSABearerInvalidTwoBears(t *testing.T) { // JWTSessionRSAWithRawSourceOnWithClientID func prepareJWTSessionRSAWithRawSourceOnWithClientID(isBench bool) string { - spec := BuildAndLoadAPI(func(spec *APISpec) { + spec := buildAndLoadAPI(func(spec *APISpec) { spec.APIID = "777888" spec.OrgID = "default" spec.UseKeylessAccess = false @@ -430,7 +462,7 @@ func prepareJWTSessionRSAWithRawSourceOnWithClientID(isBench bool) string { spec.Proxy.ListenPath = "/" })[0] - policyID := CreatePolicy(func(p *user.Policy) { + policyID := createPolicy(func(p *user.Policy) { p.OrgID = "default" p.AccessRights = map[string]user.AccessDefinition{ spec.APIID: { @@ -452,7 +484,7 @@ func prepareJWTSessionRSAWithRawSourceOnWithClientID(isBench bool) string { spec.SessionManager.ResetQuota(tokenID, session, false) spec.SessionManager.UpdateSession(tokenID, session, 60, false) - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["user_id"] = "user" @@ -464,7 +496,7 @@ func prepareJWTSessionRSAWithRawSourceOnWithClientID(isBench bool) string { } func TestJWTSessionRSAWithRawSourceOnWithClientID(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() jwtToken := prepareJWTSessionRSAWithRawSourceOnWithClientID(false) @@ -480,7 +512,7 @@ func TestJWTSessionRSAWithRawSourceOnWithClientID(t *testing.T) { func BenchmarkJWTSessionRSAWithRawSourceOnWithClientID(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() jwtToken := prepareJWTSessionRSAWithRawSourceOnWithClientID(true) @@ -496,7 +528,7 @@ func BenchmarkJWTSessionRSAWithRawSourceOnWithClientID(b *testing.B) { // JWTSessionRSAWithRawSource func prepareJWTSessionRSAWithRawSource() string { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.JWTSigningMethod = RSASign @@ -506,9 +538,9 @@ func prepareJWTSessionRSAWithRawSource() string { spec.Proxy.ListenPath = "/" }) - pID := CreatePolicy() + pID := createPolicy() - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["user_id"] = "user" @@ -520,7 +552,7 @@ func prepareJWTSessionRSAWithRawSource() string { } func TestJWTSessionRSAWithRawSource(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() jwtToken := prepareJWTSessionRSAWithRawSource() @@ -536,7 +568,7 @@ func TestJWTSessionRSAWithRawSource(t *testing.T) { func BenchmarkJWTSessionRSAWithRawSource(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() jwtToken := prepareJWTSessionRSAWithRawSource() @@ -555,10 +587,10 @@ func BenchmarkJWTSessionRSAWithRawSource(b *testing.B) { } func TestJWTSessionRSAWithRawSourceInvalidPolicyID(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - spec := BuildAPI(func(spec *APISpec) { + spec := buildAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.JWTSigningMethod = RSASign @@ -568,11 +600,11 @@ func TestJWTSessionRSAWithRawSourceInvalidPolicyID(t *testing.T) { spec.Proxy.ListenPath = "/" })[0] - LoadAPI(spec) + loadAPI(spec) - CreatePolicy() + createPolicy() - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["user_id"] = "user" @@ -591,12 +623,12 @@ func TestJWTSessionRSAWithRawSourceInvalidPolicyID(t *testing.T) { } func TestJWTSessionExpiresAtValidationConfigs(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - pID := CreatePolicy() + pID := createPolicy() jwtAuthHeaderGen := func(skew time.Duration) map[string]string { - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)["policy_id"] = pID t.Claims.(jwt.MapClaims)["user_id"] = "user123" t.Claims.(jwt.MapClaims)["exp"] = time.Now().Add(skew).Unix() @@ -605,7 +637,7 @@ func TestJWTSessionExpiresAtValidationConfigs(t *testing.T) { return map[string]string{"authorization": jwtToken} } - spec := BuildAPI(func(spec *APISpec) { + spec := buildAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.JWTSigningMethod = RSASign @@ -618,7 +650,7 @@ func TestJWTSessionExpiresAtValidationConfigs(t *testing.T) { // This test is successful by definition t.Run("Expiry_After_now--Valid_jwt", func(t *testing.T) { spec.JWTExpiresAtValidationSkew = 0 //Default value - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(+time.Second), Code: http.StatusOK, @@ -628,7 +660,7 @@ func TestJWTSessionExpiresAtValidationConfigs(t *testing.T) { // This test is successful by definition, so it's true also with skew, but just to avoid confusion. t.Run("Expiry_After_now-Add_skew--Valid_jwt", func(t *testing.T) { spec.JWTExpiresAtValidationSkew = 1 - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(+time.Second), Code: http.StatusOK, @@ -637,7 +669,7 @@ func TestJWTSessionExpiresAtValidationConfigs(t *testing.T) { t.Run("Expiry_Before_now--Invalid_jwt", func(t *testing.T) { spec.JWTExpiresAtValidationSkew = 0 //Default value - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(-time.Second), @@ -648,7 +680,7 @@ func TestJWTSessionExpiresAtValidationConfigs(t *testing.T) { t.Run("Expired_token-Before_now-Huge_skew--Valid_jwt", func(t *testing.T) { spec.JWTExpiresAtValidationSkew = 1000 // This value doesn't matter since validation is disabled - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(-time.Second), Code: http.StatusOK, @@ -657,7 +689,7 @@ func TestJWTSessionExpiresAtValidationConfigs(t *testing.T) { t.Run("Expired_token-Before_now-Add_skew--Valid_jwt", func(t *testing.T) { spec.JWTExpiresAtValidationSkew = 2 - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(-time.Second), Code: http.StatusOK, @@ -666,12 +698,12 @@ func TestJWTSessionExpiresAtValidationConfigs(t *testing.T) { } func TestJWTSessionIssueAtValidationConfigs(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - pID := CreatePolicy() + pID := createPolicy() jwtAuthHeaderGen := func(skew time.Duration) map[string]string { - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)["policy_id"] = pID t.Claims.(jwt.MapClaims)["user_id"] = "user123" t.Claims.(jwt.MapClaims)["iat"] = time.Now().Add(skew).Unix() @@ -680,7 +712,7 @@ func TestJWTSessionIssueAtValidationConfigs(t *testing.T) { return map[string]string{"authorization": jwtToken} } - spec := BuildAPI(func(spec *APISpec) { + spec := buildAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.JWTSigningMethod = "rsa" @@ -694,7 +726,7 @@ func TestJWTSessionIssueAtValidationConfigs(t *testing.T) { t.Run("IssuedAt_Before_now-no_skew--Valid_jwt", func(t *testing.T) { spec.JWTIssuedAtValidationSkew = 0 - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(-time.Second), Code: http.StatusOK, @@ -704,7 +736,7 @@ func TestJWTSessionIssueAtValidationConfigs(t *testing.T) { t.Run("Expiry_after_now--Invalid_jwt", func(t *testing.T) { spec.JWTExpiresAtValidationSkew = 0 //Default value - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(-time.Second), Code: http.StatusOK, @@ -714,7 +746,7 @@ func TestJWTSessionIssueAtValidationConfigs(t *testing.T) { t.Run("IssueAt-After_now-no_skew--Invalid_jwt", func(t *testing.T) { spec.JWTIssuedAtValidationSkew = 0 - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(+time.Minute), @@ -725,7 +757,7 @@ func TestJWTSessionIssueAtValidationConfigs(t *testing.T) { t.Run("IssueAt--After_now-Huge_skew--valid_jwt", func(t *testing.T) { spec.JWTIssuedAtValidationSkew = 1000 // This value doesn't matter since validation is disabled - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(+time.Second), @@ -736,7 +768,7 @@ func TestJWTSessionIssueAtValidationConfigs(t *testing.T) { // True by definition t.Run("IssueAt-Before_now-Add_skew--not_valid_jwt", func(t *testing.T) { spec.JWTIssuedAtValidationSkew = 2 // 2 seconds - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(-3 * time.Second), Code: http.StatusOK, @@ -746,7 +778,7 @@ func TestJWTSessionIssueAtValidationConfigs(t *testing.T) { t.Run("IssueAt-After_now-Add_skew--Valid_jwt", func(t *testing.T) { spec.JWTIssuedAtValidationSkew = 1 - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(+time.Second), Code: http.StatusOK, @@ -755,12 +787,12 @@ func TestJWTSessionIssueAtValidationConfigs(t *testing.T) { } func TestJWTSessionNotBeforeValidationConfigs(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - pID := CreatePolicy() + pID := createPolicy() jwtAuthHeaderGen := func(skew time.Duration) map[string]string { - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)["policy_id"] = pID t.Claims.(jwt.MapClaims)["user_id"] = "user123" t.Claims.(jwt.MapClaims)["nbf"] = time.Now().Add(skew).Unix() @@ -768,7 +800,7 @@ func TestJWTSessionNotBeforeValidationConfigs(t *testing.T) { return map[string]string{"authorization": jwtToken} } - spec := BuildAPI(func(spec *APISpec) { + spec := buildAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.Proxy.ListenPath = "/" @@ -782,7 +814,7 @@ func TestJWTSessionNotBeforeValidationConfigs(t *testing.T) { t.Run("NotBefore_Before_now-Valid_jwt", func(t *testing.T) { spec.JWTNotBeforeValidationSkew = 0 - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(-time.Second), Code: http.StatusOK, @@ -792,7 +824,7 @@ func TestJWTSessionNotBeforeValidationConfigs(t *testing.T) { t.Run("NotBefore_After_now--Invalid_jwt", func(t *testing.T) { spec.JWTNotBeforeValidationSkew = 0 //Default value - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(+time.Second), @@ -804,7 +836,7 @@ func TestJWTSessionNotBeforeValidationConfigs(t *testing.T) { t.Run("NotBefore_After_now-Add_skew--valid_jwt", func(t *testing.T) { spec.JWTNotBeforeValidationSkew = 1 - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(+time.Second), Code: http.StatusOK, @@ -814,7 +846,7 @@ func TestJWTSessionNotBeforeValidationConfigs(t *testing.T) { t.Run("NotBefore_After_now-Huge_skew--valid_jwt", func(t *testing.T) { spec.JWTNotBeforeValidationSkew = 1000 // This value is so high that it's actually similar to disabling the claim. - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: jwtAuthHeaderGen(+time.Second), Code: http.StatusOK, @@ -823,10 +855,10 @@ func TestJWTSessionNotBeforeValidationConfigs(t *testing.T) { } func TestJWTExistingSessionRSAWithRawSourceInvalidPolicyID(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - spec := BuildAPI(func(spec *APISpec) { + spec := buildAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.JWTSigningMethod = RSASign @@ -836,11 +868,11 @@ func TestJWTExistingSessionRSAWithRawSourceInvalidPolicyID(t *testing.T) { spec.Proxy.ListenPath = "/" })[0] - LoadAPI(spec) + loadAPI(spec) - p1ID := CreatePolicy() + p1ID := createPolicy() - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["user_id"] = "user" @@ -856,7 +888,7 @@ func TestJWTExistingSessionRSAWithRawSourceInvalidPolicyID(t *testing.T) { }) // put in JWT invalid policy ID and do request again - jwtTokenInvalidPolicy := CreateJWKToken(func(t *jwt.Token) { + jwtTokenInvalidPolicy := createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["user_id"] = "user" @@ -874,201 +906,11 @@ func TestJWTExistingSessionRSAWithRawSourceInvalidPolicyID(t *testing.T) { }) } -func TestJWTScopeToPolicyMapping(t *testing.T) { - ts := StartTest() - defer ts.Close() - - basePolicyID := CreatePolicy(func(p *user.Policy) { - p.AccessRights = map[string]user.AccessDefinition{ - "base-api": { - Limit: &user.APILimit{ - Rate: 111, - Per: 3600, - QuotaMax: -1, - }, - }, - } - p.Partitions = user.PolicyPartitions{ - PerAPI: true, - } - }) - - spec1 := BuildAPI(func(spec *APISpec) { - spec.APIID = "api1" - spec.UseKeylessAccess = false - spec.EnableJWT = true - spec.JWTSigningMethod = RSASign - spec.JWTSource = base64.StdEncoding.EncodeToString([]byte(jwtRSAPubKey)) - spec.JWTIdentityBaseField = "user_id" - spec.JWTPolicyFieldName = "policy_id" - spec.Proxy.ListenPath = "/api1" - })[0] - - p1ID := CreatePolicy(func(p *user.Policy) { - p.AccessRights = map[string]user.AccessDefinition{ - spec1.APIID: { - Limit: &user.APILimit{ - Rate: 100, - Per: 60, - QuotaMax: -1, - }, - }, - } - p.Partitions = user.PolicyPartitions{ - PerAPI: true, - } - }) - - spec2 := BuildAPI(func(spec *APISpec) { - spec.APIID = "api2" - spec.UseKeylessAccess = false - spec.EnableJWT = true - spec.JWTSigningMethod = RSASign - spec.JWTSource = base64.StdEncoding.EncodeToString([]byte(jwtRSAPubKey)) - spec.JWTIdentityBaseField = "user_id" - spec.JWTPolicyFieldName = "policy_id" - spec.Proxy.ListenPath = "/api2" - })[0] - - p2ID := CreatePolicy(func(p *user.Policy) { - p.AccessRights = map[string]user.AccessDefinition{ - spec2.APIID: { - Limit: &user.APILimit{ - Rate: 500, - Per: 30, - QuotaMax: -1, - }, - }, - } - p.Partitions = user.PolicyPartitions{ - PerAPI: true, - } - }) - - spec3 := BuildAPI(func(spec *APISpec) { - spec.APIID = "api3" - spec.UseKeylessAccess = false - spec.EnableJWT = true - spec.JWTSigningMethod = RSASign - spec.JWTSource = base64.StdEncoding.EncodeToString([]byte(jwtRSAPubKey)) - spec.JWTIdentityBaseField = "user_id" - spec.JWTPolicyFieldName = "policy_id" - spec.Proxy.ListenPath = "/api3" - })[0] - - spec := BuildAPI(func(spec *APISpec) { - spec.APIID = "base-api" - spec.UseKeylessAccess = false - spec.EnableJWT = true - spec.JWTSigningMethod = RSASign - spec.JWTSource = base64.StdEncoding.EncodeToString([]byte(jwtRSAPubKey)) - spec.JWTIdentityBaseField = "user_id" - spec.JWTPolicyFieldName = "policy_id" - spec.Proxy.ListenPath = "/base" - spec.JWTScopeToPolicyMapping = map[string]string{ - "user:read": p1ID, - "user:write": p2ID, - } - })[0] - - LoadAPI(spec, spec1, spec2, spec3) - - userID := "user-" + uuid.New() - - jwtToken := CreateJWKToken(func(t *jwt.Token) { - t.Header["kid"] = "12345" - t.Claims.(jwt.MapClaims)["foo"] = "bar" - t.Claims.(jwt.MapClaims)["user_id"] = userID - t.Claims.(jwt.MapClaims)["policy_id"] = basePolicyID - t.Claims.(jwt.MapClaims)["exp"] = time.Now().Add(time.Hour * 72).Unix() - t.Claims.(jwt.MapClaims)["scope"] = "user:read user:write" - }) - - authHeaders := map[string]string{"authorization": jwtToken} - t.Run("Request with scope in JWT to create a key session", func(t *testing.T) { - ts.Run(t, - test.TestCase{ - Headers: authHeaders, - Path: "/base", - Code: http.StatusOK, - }) - }) - - // check that key has right set of policies assigned - there should be all three - base one and two from scope - sessionID := generateToken("", fmt.Sprintf("%x", md5.Sum([]byte(userID)))) - t.Run("Request to check that session has got correct apply_policies value", func(t *testing.T) { - ts.Run( - t, - test.TestCase{ - Method: http.MethodGet, - Path: "/tyk/keys/" + sessionID, - AdminAuth: true, - Code: http.StatusOK, - BodyMatchFunc: func(body []byte) bool { - expectedResp := map[interface{}]bool{ - basePolicyID: true, - p1ID: true, - p2ID: true, - } - - resp := map[string]interface{}{} - json.Unmarshal(body, &resp) - realResp := map[interface{}]bool{} - for _, val := range resp["apply_policies"].([]interface{}) { - realResp[val] = true - } - - return reflect.DeepEqual(realResp, expectedResp) - }, - }, - ) - }) - - // try to access api1 using JWT issued via base-api - t.Run("Request to api1", func(t *testing.T) { - ts.Run( - t, - test.TestCase{ - Headers: authHeaders, - Method: http.MethodGet, - Path: "/api1", - Code: http.StatusOK, - }, - ) - }) - - // try to access api2 using JWT issued via base-api - t.Run("Request to api2", func(t *testing.T) { - ts.Run( - t, - test.TestCase{ - Headers: authHeaders, - Method: http.MethodGet, - Path: "/api2", - Code: http.StatusOK, - }, - ) - }) - - // try to access api3 (which is not granted via base policy nor scope-policy mapping) using JWT issued via base-api - t.Run("Request to api3", func(t *testing.T) { - ts.Run( - t, - test.TestCase{ - Headers: authHeaders, - Method: http.MethodGet, - Path: "/api3", - Code: http.StatusForbidden, - }, - ) - }) -} - func TestJWTExistingSessionRSAWithRawSourcePolicyIDChanged(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - spec := BuildAPI(func(spec *APISpec) { + spec := buildAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.JWTSigningMethod = RSASign @@ -1078,16 +920,16 @@ func TestJWTExistingSessionRSAWithRawSourcePolicyIDChanged(t *testing.T) { spec.Proxy.ListenPath = "/" })[0] - LoadAPI(spec) + loadAPI(spec) - p1ID := CreatePolicy(func(p *user.Policy) { + p1ID := createPolicy(func(p *user.Policy) { p.QuotaMax = 111 }) - p2ID := CreatePolicy(func(p *user.Policy) { + p2ID := createPolicy(func(p *user.Policy) { p.QuotaMax = 999 }) - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["user_id"] = "user" @@ -1117,7 +959,7 @@ func TestJWTExistingSessionRSAWithRawSourcePolicyIDChanged(t *testing.T) { // check key/session quota // put in JWT another valid policy ID and do request again - jwtTokenAnotherPolicy := CreateJWKToken(func(t *jwt.Token) { + jwtTokenAnotherPolicy := createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["user_id"] = "user" @@ -1145,7 +987,7 @@ func TestJWTExistingSessionRSAWithRawSourcePolicyIDChanged(t *testing.T) { // JWTSessionRSAWithJWK func prepareJWTSessionRSAWithJWK() string { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.JWTSigningMethod = RSASign @@ -1155,8 +997,8 @@ func prepareJWTSessionRSAWithJWK() string { spec.Proxy.ListenPath = "/" }) - pID := CreatePolicy() - jwtToken := CreateJWKToken(func(t *jwt.Token) { + pID := createPolicy() + jwtToken := createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["user_id"] = "user" @@ -1168,7 +1010,7 @@ func prepareJWTSessionRSAWithJWK() string { } func TestJWTSessionRSAWithJWK(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() jwtToken := prepareJWTSessionRSAWithJWK() @@ -1184,7 +1026,7 @@ func TestJWTSessionRSAWithJWK(t *testing.T) { func BenchmarkJWTSessionRSAWithJWK(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() jwtToken := prepareJWTSessionRSAWithJWK() @@ -1204,7 +1046,7 @@ func BenchmarkJWTSessionRSAWithJWK(b *testing.B) { // JWTSessionRSAWithEncodedJWK func prepareJWTSessionRSAWithEncodedJWK() (*APISpec, string) { - spec := BuildAPI(func(spec *APISpec) { + spec := buildAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.JWTSigningMethod = RSASign @@ -1213,8 +1055,8 @@ func prepareJWTSessionRSAWithEncodedJWK() (*APISpec, string) { spec.Proxy.ListenPath = "/" })[0] - pID := CreatePolicy() - jwtToken := CreateJWKToken(func(t *jwt.Token) { + pID := createPolicy() + jwtToken := createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" // Set some claims t.Claims.(jwt.MapClaims)["foo"] = "bar" @@ -1227,7 +1069,7 @@ func prepareJWTSessionRSAWithEncodedJWK() (*APISpec, string) { } func TestJWTSessionRSAWithEncodedJWK(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec, jwtToken := prepareJWTSessionRSAWithEncodedJWK() @@ -1236,7 +1078,7 @@ func TestJWTSessionRSAWithEncodedJWK(t *testing.T) { t.Run("Direct JWK URL", func(t *testing.T) { spec.JWTSource = testHttpJWK - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, @@ -1245,7 +1087,7 @@ func TestJWTSessionRSAWithEncodedJWK(t *testing.T) { t.Run("Base64 JWK URL", func(t *testing.T) { spec.JWTSource = base64.StdEncoding.EncodeToString([]byte(testHttpJWK)) - LoadAPI(spec) + loadAPI(spec) ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, @@ -1256,13 +1098,13 @@ func TestJWTSessionRSAWithEncodedJWK(t *testing.T) { func BenchmarkJWTSessionRSAWithEncodedJWK(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec, jwtToken := prepareJWTSessionRSAWithEncodedJWK() spec.JWTSource = base64.StdEncoding.EncodeToString([]byte(testHttpJWK)) - LoadAPI(spec) + loadAPI(spec) authHeaders := map[string]string{"authorization": jwtToken} @@ -1278,12 +1120,12 @@ func BenchmarkJWTSessionRSAWithEncodedJWK(b *testing.B) { } func TestJWTHMACIdNewClaim(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //If we skip the check then the Id will be taken from SUB and the call will succeed _, jwtToken := prepareGenericJWTSession(t.Name(), HMACSign, "user-id", true) - defer ResetTestConfig() + defer resetTestConfig() authHeaders := map[string]string{"authorization": jwtToken} t.Run("Request with valid JWT/HMAC signature/id in user-id claim", func(t *testing.T) { ts.Run(t, test.TestCase{ @@ -1293,10 +1135,10 @@ func TestJWTHMACIdNewClaim(t *testing.T) { } func TestJWTRSAIdInClaimsWithBaseField(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.JWTSigningMethod = RSASign @@ -1306,10 +1148,10 @@ func TestJWTRSAIdInClaimsWithBaseField(t *testing.T) { spec.Proxy.ListenPath = "/" }) - pID := CreatePolicy() + pID := createPolicy() //First test - user id in the configured base field 'user_id' - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["user_id"] = "user123@test.com" @@ -1324,7 +1166,7 @@ func TestJWTRSAIdInClaimsWithBaseField(t *testing.T) { }) //user-id claim configured but it's empty - returning an error - jwtToken = CreateJWKToken(func(t *jwt.Token) { + jwtToken = createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["user_id"] = "" @@ -1341,7 +1183,7 @@ func TestJWTRSAIdInClaimsWithBaseField(t *testing.T) { }) //user-id claim configured but not found fallback to sub - jwtToken = CreateJWKToken(func(t *jwt.Token) { + jwtToken = createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["sub"] = "user123@test.com" @@ -1356,7 +1198,7 @@ func TestJWTRSAIdInClaimsWithBaseField(t *testing.T) { }) //user-id claim not found fallback to sub that is empty - jwtToken = CreateJWKToken(func(t *jwt.Token) { + jwtToken = createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["sub"] = "" @@ -1373,7 +1215,7 @@ func TestJWTRSAIdInClaimsWithBaseField(t *testing.T) { }) //user-id and sub claims not found - jwtToken = CreateJWKToken(func(t *jwt.Token) { + jwtToken = createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["policy_id"] = pID @@ -1390,10 +1232,10 @@ func TestJWTRSAIdInClaimsWithBaseField(t *testing.T) { } func TestJWTRSAIdInClaimsWithoutBaseField(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.JWTSigningMethod = RSASign @@ -1403,9 +1245,9 @@ func TestJWTRSAIdInClaimsWithoutBaseField(t *testing.T) { spec.Proxy.ListenPath = "/" }) - pID := CreatePolicy() + pID := createPolicy() - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["sub"] = "user123@test.com" //is ignored @@ -1420,7 +1262,7 @@ func TestJWTRSAIdInClaimsWithoutBaseField(t *testing.T) { }) //Id is not found since there's no sub claim and user_id has't been set in the api def (spec.JWTIdentityBaseField) - jwtToken = CreateJWKToken(func(t *jwt.Token) { + jwtToken = createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["user_id"] = "user123@test.com" //is ignored @@ -1442,10 +1284,10 @@ func TestJWTDefaultPolicies(t *testing.T) { const identitySource = "user_id" const policyFieldName = "policy_id" - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - defPol1 := CreatePolicy(func(p *user.Policy) { + defPol1 := createPolicy(func(p *user.Policy) { p.AccessRights = map[string]user.AccessDefinition{ apiID: {}, } @@ -1454,7 +1296,7 @@ func TestJWTDefaultPolicies(t *testing.T) { } }) - defPol2 := CreatePolicy(func(p *user.Policy) { + defPol2 := createPolicy(func(p *user.Policy) { p.AccessRights = map[string]user.AccessDefinition{ apiID: {}, } @@ -1463,7 +1305,7 @@ func TestJWTDefaultPolicies(t *testing.T) { } }) - tokenPol := CreatePolicy(func(p *user.Policy) { + tokenPol := createPolicy(func(p *user.Policy) { p.AccessRights = map[string]user.AccessDefinition{ apiID: {}, } @@ -1472,7 +1314,7 @@ func TestJWTDefaultPolicies(t *testing.T) { } }) - spec := BuildAPI(func(spec *APISpec) { + spec := buildAPI(func(spec *APISpec) { spec.APIID = apiID spec.UseKeylessAccess = false spec.EnableJWT = true @@ -1499,7 +1341,7 @@ func TestJWTDefaultPolicies(t *testing.T) { } t.Run("Policy field name empty", func(t *testing.T) { - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)[identitySource] = "dummy" t.Claims.(jwt.MapClaims)[policyFieldName] = tokenPol }) @@ -1507,7 +1349,7 @@ func TestJWTDefaultPolicies(t *testing.T) { authHeaders := map[string]string{"authorization": jwtToken} // Default - LoadAPI(spec) + loadAPI(spec) _, _ = ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, }) @@ -1521,7 +1363,7 @@ func TestJWTDefaultPolicies(t *testing.T) { // Remove one of default policies spec.JWTDefaultPolicies = []string{defPol1} - LoadAPI(spec) + loadAPI(spec) _, _ = ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, }) @@ -1529,7 +1371,7 @@ func TestJWTDefaultPolicies(t *testing.T) { // Add a default policy spec.JWTDefaultPolicies = []string{defPol1, defPol2} - LoadAPI(spec) + loadAPI(spec) _, _ = ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, }) @@ -1537,7 +1379,7 @@ func TestJWTDefaultPolicies(t *testing.T) { }) t.Run("Policy field name nonempty but empty claim", func(t *testing.T) { - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)[identitySource] = "dummy" t.Claims.(jwt.MapClaims)[policyFieldName] = "" }) @@ -1545,7 +1387,7 @@ func TestJWTDefaultPolicies(t *testing.T) { authHeaders := map[string]string{"authorization": jwtToken} // Default - LoadAPI(spec) + loadAPI(spec) _, _ = ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, }) @@ -1559,7 +1401,7 @@ func TestJWTDefaultPolicies(t *testing.T) { // Remove one of default policies spec.JWTDefaultPolicies = []string{defPol1} - LoadAPI(spec) + loadAPI(spec) _, _ = ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, }) @@ -1567,7 +1409,7 @@ func TestJWTDefaultPolicies(t *testing.T) { // Add a default policy spec.JWTDefaultPolicies = []string{defPol1, defPol2} - LoadAPI(spec) + loadAPI(spec) _, _ = ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, }) @@ -1576,9 +1418,9 @@ func TestJWTDefaultPolicies(t *testing.T) { t.Run("Policy field name nonempty invalid policy ID in claim", func(t *testing.T) { spec.JWTPolicyFieldName = policyFieldName - LoadAPI(spec) + loadAPI(spec) - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)[identitySource] = "dummy" t.Claims.(jwt.MapClaims)[policyFieldName] = "invalid" }) @@ -1595,7 +1437,7 @@ func TestJWTDefaultPolicies(t *testing.T) { }) t.Run("Default to Claim transition", func(t *testing.T) { - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)[identitySource] = "dummy" t.Claims.(jwt.MapClaims)[policyFieldName] = tokenPol }) @@ -1603,14 +1445,14 @@ func TestJWTDefaultPolicies(t *testing.T) { authHeaders := map[string]string{"authorization": jwtToken} // Default - LoadAPI(spec) + loadAPI(spec) _, _ = ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, }) assert(t, []string{defPol1, defPol2}) // Same to check stored correctly - LoadAPI(spec) + loadAPI(spec) _, _ = ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, }) @@ -1618,7 +1460,7 @@ func TestJWTDefaultPolicies(t *testing.T) { // Claim spec.JWTPolicyFieldName = policyFieldName - LoadAPI(spec) + loadAPI(spec) _, _ = ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, }) @@ -1626,7 +1468,7 @@ func TestJWTDefaultPolicies(t *testing.T) { }) t.Run("Claim to Default transition", func(t *testing.T) { - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)[identitySource] = "dummy" t.Claims.(jwt.MapClaims)[policyFieldName] = tokenPol }) @@ -1635,7 +1477,7 @@ func TestJWTDefaultPolicies(t *testing.T) { // Claim spec.JWTPolicyFieldName = policyFieldName - LoadAPI(spec) + loadAPI(spec) _, _ = ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, }) @@ -1649,7 +1491,7 @@ func TestJWTDefaultPolicies(t *testing.T) { // Default spec.JWTPolicyFieldName = "" - LoadAPI(spec) + loadAPI(spec) _, _ = ts.Run(t, test.TestCase{ Headers: authHeaders, Code: http.StatusOK, }) @@ -1658,12 +1500,12 @@ func TestJWTDefaultPolicies(t *testing.T) { } func TestJWTECDSASign(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //If we skip the check then the Id will be taken from SUB and the call will succeed _, jwtToken := prepareGenericJWTSession(t.Name(), ECDSASign, KID, false) - defer ResetTestConfig() + defer resetTestConfig() authHeaders := map[string]string{"authorization": jwtToken} t.Run("Request with valid JWT/ECDSA signature needs a test. currently defaults to HMAC", func(t *testing.T) { ts.Run(t, test.TestCase{ @@ -1673,12 +1515,12 @@ func TestJWTECDSASign(t *testing.T) { } func TestJWTUnknownSign(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() //If we skip the check then the Id will be taken from SUB and the call will succeed _, jwtToken := prepareGenericJWTSession(t.Name(), "bla", KID, false) - defer ResetTestConfig() + defer resetTestConfig() authHeaders := map[string]string{"authorization": jwtToken} t.Run("Request with valid JWT/ECDSA signature needs a test. currently defaults to HMAC", func(t *testing.T) { ts.Run(t, test.TestCase{ @@ -1688,10 +1530,10 @@ func TestJWTUnknownSign(t *testing.T) { } func TestJWTRSAInvalidPublickKey(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.JWTSigningMethod = RSASign @@ -1700,9 +1542,9 @@ func TestJWTRSAInvalidPublickKey(t *testing.T) { spec.Proxy.ListenPath = "/" }) - pID := CreatePolicy() + pID := createPolicy() - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Header["kid"] = "12345" t.Claims.(jwt.MapClaims)["foo"] = "bar" t.Claims.(jwt.MapClaims)["sub"] = "user123@test.com" //is ignored @@ -1721,7 +1563,7 @@ func TestJWTRSAInvalidPublickKey(t *testing.T) { func createExpiringPolicy(pGen ...func(p *user.Policy)) string { pID := keyGen.GenerateAuthKey("") - pol := CreateStandardPolicy() + pol := createStandardPolicy() pol.ID = pID pol.KeyExpiresIn = 1 @@ -1737,10 +1579,10 @@ func createExpiringPolicy(pGen ...func(p *user.Policy)) string { } func TestJWTExpOverride(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.EnableJWT = true spec.JWTSigningMethod = RSASign @@ -1751,11 +1593,11 @@ func TestJWTExpOverride(t *testing.T) { t.Run("JWT expiration bigger then policy", func(t *testing.T) { //create policy which sets keys to have expiry in one second - pID := CreatePolicy(func(p *user.Policy) { + pID := createPolicy(func(p *user.Policy) { p.KeyExpiresIn = 1 }) - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)["sub"] = uuid.New() t.Claims.(jwt.MapClaims)["policy_id"] = pID t.Claims.(jwt.MapClaims)["exp"] = time.Now().Add(time.Second * 72).Unix() @@ -1771,11 +1613,11 @@ func TestJWTExpOverride(t *testing.T) { }) t.Run("JWT expiration smaller then policy", func(t *testing.T) { - pID := CreatePolicy(func(p *user.Policy) { + pID := createPolicy(func(p *user.Policy) { p.KeyExpiresIn = 5 }) - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)["sub"] = uuid.New() t.Claims.(jwt.MapClaims)["policy_id"] = pID t.Claims.(jwt.MapClaims)["exp"] = time.Now().Add(-time.Second).Unix() @@ -1790,19 +1632,19 @@ func TestJWTExpOverride(t *testing.T) { }) t.Run("JWT expired but renewed, policy without expiration", func(t *testing.T) { - pID := CreatePolicy(func(p *user.Policy) { + pID := createPolicy(func(p *user.Policy) { p.KeyExpiresIn = 0 }) userID := uuid.New() - jwtToken := CreateJWKToken(func(t *jwt.Token) { + jwtToken := createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)["sub"] = userID t.Claims.(jwt.MapClaims)["policy_id"] = pID t.Claims.(jwt.MapClaims)["exp"] = time.Now().Add(time.Second).Unix() }) - newJwtToken := CreateJWKToken(func(t *jwt.Token) { + newJwtToken := createJWKToken(func(t *jwt.Token) { t.Claims.(jwt.MapClaims)["sub"] = userID t.Claims.(jwt.MapClaims)["policy_id"] = pID t.Claims.(jwt.MapClaims)["exp"] = time.Now().Add(5 * time.Second).Unix() diff --git a/gateway/mw_key_expired_check.go b/mw_key_expired_check.go similarity index 99% rename from gateway/mw_key_expired_check.go rename to mw_key_expired_check.go index 39e152cac9cc..f3b37f93e26e 100644 --- a/gateway/mw_key_expired_check.go +++ b/mw_key_expired_check.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_method_transform.go b/mw_method_transform.go similarity index 98% rename from gateway/mw_method_transform.go rename to mw_method_transform.go index cef29a445c20..0333460d968d 100644 --- a/gateway/mw_method_transform.go +++ b/mw_method_transform.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_modify_headers.go b/mw_modify_headers.go similarity index 98% rename from gateway/mw_modify_headers.go rename to mw_modify_headers.go index ecc22f1ac696..bb34c4c24098 100644 --- a/gateway/mw_modify_headers.go +++ b/mw_modify_headers.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" diff --git a/gateway/mw_oauth2_key_exists.go b/mw_oauth2_key_exists.go similarity index 99% rename from gateway/mw_oauth2_key_exists.go rename to mw_oauth2_key_exists.go index 12c1aa7f6296..2aa458bdb7a1 100644 --- a/gateway/mw_oauth2_key_exists.go +++ b/mw_oauth2_key_exists.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_openid.go b/mw_openid.go similarity index 82% rename from gateway/mw_openid.go rename to mw_openid.go index d6b800a0f919..b89f75d4dedc 100644 --- a/gateway/mw_openid.go +++ b/mw_openid.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "crypto/md5" @@ -9,7 +9,7 @@ import ( "sync" "github.com/Sirupsen/logrus" - jwt "github.com/dgrijalva/jwt-go" + "github.com/dgrijalva/jwt-go" "github.com/TykTechnologies/openid2go/openid" "github.com/TykTechnologies/tyk/apidef" @@ -113,9 +113,6 @@ func (k *OpenIDMW) ProcessRequest(w http.ResponseWriter, r *http.Request, _ inte return errors.New("Key not authorised"), http.StatusUnauthorized } - // decide if we use policy ID from provider client settings or list of policies from scope-policy mapping - useScope := len(k.Spec.JWTScopeToPolicyMapping) != 0 - k.lock.RLock() clientSet, foundIssuer := k.provider_client_policymap[iss.(string)] k.lock.RUnlock() @@ -146,7 +143,7 @@ func (k *OpenIDMW) ProcessRequest(w http.ResponseWriter, r *http.Request, _ inte } } - if !useScope && policyID == "" { + if policyID == "" { logger.Error("No matching policy found!") k.reportLoginFailure("[NOT GENERATED]", r) return errors.New("Key not authorised"), http.StatusUnauthorized @@ -164,43 +161,24 @@ func (k *OpenIDMW) ProcessRequest(w http.ResponseWriter, r *http.Request, _ inte logger.Debug("Generated Session ID: ", sessionID) - var policiesToApply []string - if !useScope { - policiesToApply = append(policiesToApply, policyID) - } else { - scopeClaimName := k.Spec.JWTScopeClaimName - if scopeClaimName == "" { - scopeClaimName = "scope" - } - - if scope := getScopeFromClaim(token.Claims.(jwt.MapClaims), scopeClaimName); scope != nil { - // add all policies matched from scope-policy mapping - policiesToApply = mapScopeToPolicies(k.Spec.JWTScopeToPolicyMapping, scope) - } - } - session, exists := k.CheckSessionAndIdentityForValidKey(sessionID, r) if !exists { // Create it logger.Debug("Key does not exist, creating") session = user.SessionState{} - if !useScope { - // We need a base policy as a template, either get it from the token itself OR a proxy client ID within Tyk - newSession, err := generateSessionFromPolicy(policyID, - k.Spec.OrgID, - true) - - if err != nil { - k.reportLoginFailure(sessionID, r) - logger.Error("Could not find a valid policy to apply to this token!") - return errors.New("Key not authorized: no matching policy"), http.StatusForbidden - } + // We need a base policy as a template, either get it from the token itself OR a proxy client ID within Tyk + newSession, err := generateSessionFromPolicy(policyID, + k.Spec.OrgID, + true) - session = newSession + if err != nil { + k.reportLoginFailure(sessionID, r) + logger.Error("Could not find a valid policy to apply to this token!") + return errors.New("Key not authorized: no matching policy"), http.StatusForbidden } - session.OrgID = k.Spec.OrgID + session = newSession session.MetaData = map[string]interface{}{"TykJWTSessionID": sessionID, "ClientID": clientID} session.Alias = clientID + ":" + ouser.ID @@ -208,7 +186,7 @@ func (k *OpenIDMW) ProcessRequest(w http.ResponseWriter, r *http.Request, _ inte logger.Debug("Policy applied to key") } // apply new policy to session if any and update session - session.SetPolicies(policiesToApply...) + session.SetPolicies(policyID) if err := k.ApplyPolicies(&session); err != nil { k.Logger().WithError(err).Error("Could not apply new policy from OIDC client to session") return errors.New("Key not authorized: could not apply new policy"), http.StatusForbidden diff --git a/gateway/mw_organisation_activity.go b/mw_organisation_activity.go similarity index 98% rename from gateway/mw_organisation_activity.go rename to mw_organisation_activity.go index 679a79e2fe45..22d2bc63049b 100644 --- a/gateway/mw_organisation_activity.go +++ b/mw_organisation_activity.go @@ -1,12 +1,13 @@ -package gateway +package main import ( - "errors" "net/http" "sync" + + "errors" + "time" - "github.com/TykTechnologies/tyk/ctx" "github.com/TykTechnologies/tyk/request" "github.com/TykTechnologies/tyk/user" ) @@ -170,7 +171,7 @@ func (k *OrganizationMonitor) ProcessRequestLive(r *http.Request, orgSession use } // Lets keep a reference of the org - setCtxValue(r, ctx.OrgSessionContext, orgSession) + setCtxValue(r, OrgSessionContext, orgSession) // Request is valid, carry on return nil, http.StatusOK @@ -197,7 +198,7 @@ func (k *OrganizationMonitor) ProcessRequestOffThread(r *http.Request, orgSessio // Lets keep a reference of the org // session might be updated by go-routine AllowAccessNext and we loose those changes here // but it is OK as we need it in context for detailed org logging - setCtxValue(r, ctx.OrgSessionContext, orgSession) + setCtxValue(r, OrgSessionContext, orgSession) orgSessionCopy := orgSession go k.AllowAccessNext( diff --git a/gateway/mw_organization_activity_test.go b/mw_organization_activity_test.go similarity index 93% rename from gateway/mw_organization_activity_test.go rename to mw_organization_activity_test.go index 6c73b68102d4..4f6786fc39e5 100644 --- a/gateway/mw_organization_activity_test.go +++ b/mw_organization_activity_test.go @@ -1,22 +1,22 @@ // +build !race -package gateway +package main import ( "net/http" "testing" "time" - uuid "github.com/satori/go.uuid" + "github.com/satori/go.uuid" "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/test" ) -func testPrepareProcessRequestQuotaLimit(tb testing.TB, ts Test, data map[string]interface{}) { +func testPrepareProcessRequestQuotaLimit(tb testing.TB, ts tykTestServer, data map[string]interface{}) { // load API orgID := "test-org-" + uuid.NewV4().String() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = true spec.OrgID = orgID spec.Proxy.ListenPath = "/" @@ -42,7 +42,7 @@ func TestProcessRequestLiveQuotaLimit(t *testing.T) { config.SetGlobal(globalConf) // run test server - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // load API @@ -88,10 +88,10 @@ func BenchmarkProcessRequestLiveQuotaLimit(b *testing.B) { globalConf.ExperimentalProcessOrgOffThread = false config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() // run test server - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // load API @@ -118,10 +118,10 @@ func TestProcessRequestOffThreadQuotaLimit(t *testing.T) { globalConf.EnforceOrgQuotas = true globalConf.ExperimentalProcessOrgOffThread = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() // run test server - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // load API @@ -190,10 +190,10 @@ func BenchmarkProcessRequestOffThreadQuotaLimit(b *testing.B) { globalConf.ExperimentalProcessOrgOffThread = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() // run test server - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // load API @@ -221,10 +221,10 @@ func TestProcessRequestLiveRedisRollingLimiter(t *testing.T) { globalConf.EnableRedisRollingLimiter = true globalConf.ExperimentalProcessOrgOffThread = false config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() // run test server - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // load API @@ -278,10 +278,10 @@ func BenchmarkProcessRequestLiveRedisRollingLimiter(b *testing.B) { globalConf.ExperimentalProcessOrgOffThread = false config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() // run test server - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // load API @@ -309,10 +309,10 @@ func TestProcessRequestOffThreadRedisRollingLimiter(t *testing.T) { globalConf.EnableRedisRollingLimiter = true globalConf.ExperimentalProcessOrgOffThread = true config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() // run test server - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // load API @@ -367,7 +367,7 @@ func BenchmarkProcessRequestOffThreadRedisRollingLimiter(b *testing.B) { config.SetGlobal(globalConf) // run test server - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // load API diff --git a/gateway/mw_rate_check.go b/mw_rate_check.go similarity index 95% rename from gateway/mw_rate_check.go rename to mw_rate_check.go index 7509b6aee68d..d6bb310142c5 100644 --- a/gateway/mw_rate_check.go +++ b/mw_rate_check.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" diff --git a/gateway/mw_rate_limiting.go b/mw_rate_limiting.go similarity index 99% rename from gateway/mw_rate_limiting.go rename to mw_rate_limiting.go index 72ca52ee8b66..8ef09e5dadc1 100644 --- a/gateway/mw_rate_limiting.go +++ b/mw_rate_limiting.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_redis_cache.go b/mw_redis_cache.go similarity index 78% rename from gateway/mw_redis_cache.go rename to mw_redis_cache.go index c76bfdf0418b..66ad0bc5d3ca 100644 --- a/gateway/mw_redis_cache.go +++ b/mw_redis_cache.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bufio" @@ -8,16 +8,11 @@ import ( "encoding/hex" "errors" "io" - "io/ioutil" "net/http" "strconv" "strings" "time" - "golang.org/x/sync/singleflight" - - "github.com/TykTechnologies/murmur3" - "github.com/TykTechnologies/tyk/regexp" "github.com/TykTechnologies/tyk/request" "github.com/TykTechnologies/tyk/storage" ) @@ -30,9 +25,8 @@ const ( // RedisCacheMiddleware is a caching middleware that will pull data from Redis instead of the upstream proxy type RedisCacheMiddleware struct { BaseMiddleware - CacheStore storage.Handler - sh SuccessHandler - singleFlight singleflight.Group + CacheStore storage.Handler + sh SuccessHandler } func (m *RedisCacheMiddleware) Name() string { @@ -47,44 +41,13 @@ func (m *RedisCacheMiddleware) EnabledForSpec() bool { return m.Spec.CacheOptions.EnableCache } -func (m *RedisCacheMiddleware) CreateCheckSum(req *http.Request, keyName string, regex string) (string, error) { +func (m *RedisCacheMiddleware) CreateCheckSum(req *http.Request, keyName string) string { h := md5.New() io.WriteString(h, req.Method) io.WriteString(h, "-") io.WriteString(h, req.URL.String()) - if req.Method == http.MethodPost { - if req.Body != nil { - bodyBytes, err := ioutil.ReadAll(req.Body) - - if err != nil { - return "", err - } - - defer req.Body.Close() - req.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) - - m := murmur3.New128() - if regex == "" { - io.WriteString(h, "-") - m.Write(bodyBytes) - io.WriteString(h, hex.EncodeToString(m.Sum(nil))) - } else { - r, err := regexp.Compile(regex) - if err != nil { - return "", err - } - match := r.Find(bodyBytes) - if match != nil { - io.WriteString(h, "-") - m.Write(match) - io.WriteString(h, hex.EncodeToString(m.Sum(nil))) - } - } - } - } - reqChecksum := hex.EncodeToString(h.Sum(nil)) - return m.Spec.APIID + keyName + reqChecksum, nil + return m.Spec.APIID + keyName + reqChecksum } func (m *RedisCacheMiddleware) getTimeTTL(cacheTTL int64) string { @@ -136,28 +99,25 @@ func (m *RedisCacheMiddleware) decodePayload(payload string) (string, string, er // ProcessRequest will run any checks on the request on the way through the system, return an error to have the chain fail func (m *RedisCacheMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Request, _ interface{}) (error, int) { + // Only allow idempotent (safe) methods - if r.Method != "GET" && r.Method != "HEAD" && r.Method != "OPTIONS" && r.Method != "POST" { + if r.Method != "GET" && r.Method != "HEAD" && r.Method != "OPTIONS" { return nil, http.StatusOK } var stat RequestStatus - var cacheKeyRegex string _, versionPaths, _, _ := m.Spec.Version(r) isVirtual, _ := m.Spec.CheckSpecMatchesStatus(r, versionPaths, VirtualPath) // Lets see if we can throw a sledgehammer at this - if m.Spec.CacheOptions.CacheAllSafeRequests && r.Method != "POST" { + if m.Spec.CacheOptions.CacheAllSafeRequests { stat = StatusCached - } - if stat != StatusCached { + } else { // New request checker, more targeted, less likely to fail - found, meta := m.Spec.CheckSpecMatchesStatus(r, versionPaths, Cached) + found, _ := m.Spec.CheckSpecMatchesStatus(r, versionPaths, Cached) if found { - cacheMeta := meta.(*EndPointCacheMeta) stat = StatusCached - cacheKeyRegex = cacheMeta.CacheKeyRegex } } @@ -172,25 +132,10 @@ func (m *RedisCacheMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Req token = request.RealIP(r) } - var errCreatingChecksum bool - var retBlob string - key, err := m.CreateCheckSum(r, token, cacheKeyRegex) + key := m.CreateCheckSum(r, token) + retBlob, err := m.CacheStore.GetKey(key) if err != nil { - log.Debug("Error creating checksum. Skipping cache check") - errCreatingChecksum = true - } else { - retBlob, err = m.CacheStore.GetKey(key) - v, sfErr, _ := m.singleFlight.Do(key, func() (interface{}, error) { - return m.CacheStore.GetKey(key) - }) - retBlob = v.(string) - err = sfErr - } - - if err != nil { - if !errCreatingChecksum { - log.Debug("Cache enabled, but record not found") - } + log.Debug("Cache enabled, but record not found") // Pass through to proxy AND CACHE RESULT var resVal *http.Response @@ -254,7 +199,7 @@ func (m *RedisCacheMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Req } } - if cacheThisRequest && !errCreatingChecksum { + if cacheThisRequest { log.Debug("Caching request to redis") var wireFormatReq bytes.Buffer resVal.Write(&wireFormatReq) @@ -262,6 +207,7 @@ func (m *RedisCacheMiddleware) ProcessRequest(w http.ResponseWriter, r *http.Req ts := m.getTimeTTL(cacheTTL) toStore := m.encodePayload(wireFormatReq.String(), ts) go m.CacheStore.SetKey(key, toStore, cacheTTL) + } return nil, mwStatusRespond diff --git a/gateway/mw_redis_cache_test.go b/mw_redis_cache_test.go similarity index 88% rename from gateway/mw_redis_cache_test.go rename to mw_redis_cache_test.go index 34635a6f8db5..53b7d760a182 100644 --- a/gateway/mw_redis_cache_test.go +++ b/mw_redis_cache_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "testing" @@ -15,15 +15,15 @@ func TestRedisCacheMiddleware_WithCompressedResponse(t *testing.T) { globalConf.AnalyticsConfig.EnableDetailedRecording = true config.SetGlobal(globalConf) - ts := StartTest() + ts := newTykTestServer() defer ts.Close() createAPI := func(withCache bool) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.CacheOptions.CacheTimeout = 60 spec.CacheOptions.EnableCache = withCache - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.ExtendedPaths.Cached = []string{path} }) }) diff --git a/gateway/mw_request_size_limit.go b/mw_request_size_limit.go similarity index 99% rename from gateway/mw_request_size_limit.go rename to mw_request_size_limit.go index a12a52755f08..a12dd702cec9 100644 --- a/gateway/mw_request_size_limit.go +++ b/mw_request_size_limit.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_strip_auth.go b/mw_strip_auth.go similarity index 99% rename from gateway/mw_strip_auth.go rename to mw_strip_auth.go index 2b632be24a13..153eac6acd03 100644 --- a/gateway/mw_strip_auth.go +++ b/mw_strip_auth.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" diff --git a/gateway/mw_strip_auth_test.go b/mw_strip_auth_test.go similarity index 99% rename from gateway/mw_strip_auth_test.go rename to mw_strip_auth_test.go index 0d3b99e99b76..eac782219c34 100644 --- a/gateway/mw_strip_auth_test.go +++ b/mw_strip_auth_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "fmt" diff --git a/gateway/mw_track_endpoints.go b/mw_track_endpoints.go similarity index 98% rename from gateway/mw_track_endpoints.go rename to mw_track_endpoints.go index 4a7099b663d3..a4b9a25904fe 100644 --- a/gateway/mw_track_endpoints.go +++ b/mw_track_endpoints.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" diff --git a/gateway/mw_transform.go b/mw_transform.go similarity index 99% rename from gateway/mw_transform.go rename to mw_transform.go index 423f5c15b4e6..c7c9a7f44b7e 100644 --- a/gateway/mw_transform.go +++ b/mw_transform.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" diff --git a/gateway/mw_transform_jq.go b/mw_transform_jq.go similarity index 99% rename from gateway/mw_transform_jq.go rename to mw_transform_jq.go index d5a09b9f7c53..32f6d88c66c9 100644 --- a/gateway/mw_transform_jq.go +++ b/mw_transform_jq.go @@ -1,6 +1,6 @@ // +build jq -package gateway +package main import ( "bytes" diff --git a/gateway/mw_transform_jq_dummy.go b/mw_transform_jq_dummy.go similarity index 98% rename from gateway/mw_transform_jq_dummy.go rename to mw_transform_jq_dummy.go index 234789204ed3..f42f6b4ca8e1 100644 --- a/gateway/mw_transform_jq_dummy.go +++ b/mw_transform_jq_dummy.go @@ -1,6 +1,6 @@ // +build !jq -package gateway +package main import ( "net/http" diff --git a/gateway/mw_transform_jq_test.go b/mw_transform_jq_test.go similarity index 90% rename from gateway/mw_transform_jq_test.go rename to mw_transform_jq_test.go index 0e4862e22b65..1c9e06c10940 100644 --- a/gateway/mw_transform_jq_test.go +++ b/mw_transform_jq_test.go @@ -1,6 +1,6 @@ // +build jq -package gateway +package main import ( "testing" @@ -10,10 +10,10 @@ import ( ) func testPrepareJQMiddleware() { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.EnableContextVars = true - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.UseExtendedPaths = true v.ExtendedPaths.TransformJQ = []apidef.TransformJQMeta{{ Path: "/jq", @@ -25,7 +25,7 @@ func testPrepareJQMiddleware() { } func TestJQMiddleware(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() testPrepareJQMiddleware() @@ -43,7 +43,7 @@ func TestJQMiddleware(t *testing.T) { func BenchmarkJQMiddleware(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() testPrepareJQMiddleware() diff --git a/gateway/mw_transform_test.go b/mw_transform_test.go similarity index 76% rename from gateway/mw_transform_test.go rename to mw_transform_test.go index 7af4e65b19a2..fb1473f933c5 100644 --- a/gateway/mw_transform_test.go +++ b/mw_transform_test.go @@ -1,14 +1,11 @@ -package gateway +package main import ( - "encoding/base64" "io/ioutil" "strings" "testing" "text/template" - "github.com/TykTechnologies/tyk/test" - "github.com/TykTechnologies/tyk/apidef" ) @@ -29,7 +26,7 @@ func TestTransformNonAscii(t *testing.T) { tmeta, in := testPrepareTransformNonAscii() want := `["Jyväskylä", "Hyvinkää"]` - r := TestReq(t, "GET", "/", in) + r := testReq(t, "GET", "/", in) if err := transformBody(r, tmeta, false); err != nil { t.Fatalf("wanted nil error, got %v", err) } @@ -47,7 +44,7 @@ func BenchmarkTransformNonAscii(b *testing.B) { tmeta, in := testPrepareTransformNonAscii() for i := 0; i < b.N; i++ { - r := TestReq(b, "GET", "/", in) + r := testReq(b, "GET", "/", in) if err := transformBody(r, tmeta, false); err != nil { b.Fatalf("wanted nil error, got %v", err) } @@ -58,7 +55,7 @@ func TestTransformXMLCrash(t *testing.T) { // mxj.NewMapXmlReader used to take forever and crash the // process by eating up all the memory. in := strings.NewReader("not xml") - r := TestReq(t, "GET", "/", in) + r := testReq(t, "GET", "/", in) tmeta := &TransformSpec{} tmeta.TemplateData.Input = apidef.RequestXML tmeta.Template = template.Must(apidef.Template.New("").Parse("")) @@ -106,7 +103,7 @@ func TestTransformJSONMarshalXMLInput(t *testing.T) { tmeta, in := testPrepareTransformJSONMarshal("xml") want := `["Foo\"oo", "Bàr"]` - r := TestReq(t, "GET", "/", in) + r := testReq(t, "GET", "/", in) if err := transformBody(r, tmeta, false); err != nil { t.Fatalf("wanted nil error, got %v", err) } @@ -123,7 +120,7 @@ func TestTransformJSONMarshalJSONInput(t *testing.T) { tmeta, in := testPrepareTransformJSONMarshal("json") want := `["Foo\"oo", "Bàr"]` - r := TestReq(t, "GET", "/", in) + r := testReq(t, "GET", "/", in) if err := transformBody(r, tmeta, false); err != nil { t.Fatalf("wanted nil error, got %v", err) } @@ -152,7 +149,7 @@ func TestTransformJSONMarshalJSONArrayInput(t *testing.T) { tmeta, in := testPrepareTransformJSONMarshalArray(t) want := `[123,456]` - r := TestReq(t, "GET", "/", in) + r := testReq(t, "GET", "/", in) if err := transformBody(r, tmeta, false); err != nil { t.Fatalf("wanted nil error, got %v", err) } @@ -171,7 +168,7 @@ func BenchmarkTransformJSONMarshal(b *testing.B) { tmeta, in := testPrepareTransformJSONMarshal("xml") for i := 0; i < b.N; i++ { - r := TestReq(b, "GET", "/", in) + r := testReq(b, "GET", "/", in) if err := transformBody(r, tmeta, false); err != nil { b.Fatalf("wanted nil error, got %v", err) } @@ -181,7 +178,7 @@ func BenchmarkTransformJSONMarshal(b *testing.B) { func TestTransformXMLMarshal(t *testing.T) { assert := func(t *testing.T, input string, tmpl string, output string, inputType apidef.RequestInputType) { tmeta := testPrepareTransformXMLMarshal(tmpl, inputType) - r := TestReq(t, "GET", "/", input) + r := testReq(t, "GET", "/", input) if err := transformBody(r, tmeta, false); err != nil { t.Fatalf("wanted nil error, got %v", err) } @@ -219,53 +216,3 @@ func TestTransformXMLMarshal(t *testing.T) { assert(t, input, tmpl, output, apidef.RequestJSON) }) } - -func TestBodyTransformCaseSensitivity(t *testing.T) { - ts := StartTest() - defer ts.Close() - - assert := func(relativePath string, requestedPath string, bodyMatch string) { - transformResponseConf := apidef.TemplateMeta{ - Path: relativePath, - Method: "GET", - TemplateData: apidef.TemplateData{ - Mode: "blob", - TemplateSource: base64.StdEncoding.EncodeToString([]byte(`{"http_method":"{{.Method}}"}`)), - }, - } - - responseProcessorConf := []apidef.ResponseProcessor{{Name: "response_body_transform"}} - - BuildAndLoadAPI(func(spec *APISpec) { - spec.Proxy.ListenPath = "/" - spec.ResponseProcessors = responseProcessorConf - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { - v.ExtendedPaths.TransformResponse = []apidef.TemplateMeta{transformResponseConf} - }) - }) - - ts.Run(t, test.TestCase{ - Path: requestedPath, Code: 200, BodyMatch: bodyMatch, - }) - } - - // Matches and transforms - t.Run("Relative path lower, requested path lower", func(t *testing.T) { - assert("/get", "/get", `{"http_method":"GET"}`) - }) - - // Doesn't match and doesn't transform - t.Run("Relative path lower, requested path upper", func(t *testing.T) { - assert("/get", "/Get", `"Method":"GET"`) - }) - - // Doesn't match and doesn't transform - t.Run("Relative path upper, requested path lower", func(t *testing.T) { - assert("/Get", "/get", `"Method":"GET"`) - }) - - // Matches and transforms - t.Run("Relative path upper, requested path upper", func(t *testing.T) { - assert("/Get", "/Get", `{"http_method":"GET"}`) - }) -} diff --git a/gateway/mw_url_rewrite.go b/mw_url_rewrite.go similarity index 95% rename from gateway/mw_url_rewrite.go rename to mw_url_rewrite.go index 06b03deb33ee..561340d741cf 100644 --- a/gateway/mw_url_rewrite.go +++ b/mw_url_rewrite.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "fmt" @@ -13,7 +13,6 @@ import ( "github.com/Sirupsen/logrus" "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/ctx" "github.com/TykTechnologies/tyk/regexp" "github.com/TykTechnologies/tyk/user" ) @@ -335,7 +334,7 @@ func (m *URLRewriteMiddleware) CheckHostRewrite(oldPath, newTarget string, r *ht newAsURL, _ := url.Parse(newTarget) if newAsURL.Scheme != LoopScheme && oldAsURL.Host != newAsURL.Host { log.Debug("Detected a host rewrite in pattern!") - setCtxValue(r, ctx.RetainHost, true) + setCtxValue(r, RetainHost, true) } } @@ -397,8 +396,8 @@ func checkHeaderTrigger(r *http.Request, options map[string]apidef.StringRegexMa vals, ok := r.Header[mhCN] if ok { for i, v := range vals { - matched, match := mr.FindStringSubmatch(v) - if matched { + match := mr.FindStringSubmatch(v) + if len(match) > 0 { addMatchToContextData(contextData, match, triggernum, mhCN, i) fCount++ } @@ -426,8 +425,8 @@ func checkQueryString(r *http.Request, options map[string]apidef.StringRegexMap, vals, ok := qvals[mv] if ok { for i, v := range vals { - matched, match := mr.FindStringSubmatch(v) - if matched { + match := mr.FindStringSubmatch(v) + if len(match) > 0 { addMatchToContextData(contextData, match, triggernum, mv, i) fCount++ } @@ -454,8 +453,8 @@ func checkPathParts(r *http.Request, options map[string]apidef.StringRegexMap, a pathParts := strings.Split(r.URL.Path, "/") for _, part := range pathParts { - matched, match := mr.FindStringSubmatch(part) - if matched { + match := mr.FindStringSubmatch(part) + if len(match) > 0 { addMatchToContextData(contextData, match, triggernum, mv, fCount) fCount++ } @@ -482,8 +481,8 @@ func checkSessionTrigger(r *http.Request, sess *user.SessionState, options map[s if ok { val, valOk := rawVal.(string) if valOk { - matched, match := mr.FindStringSubmatch(val) - if matched { + match := mr.FindStringSubmatch(val) + if len(match) > 0 { addMatchToContextData(contextData, match, triggernum, mh) fCount++ } @@ -513,8 +512,8 @@ func checkContextTrigger(r *http.Request, options map[string]apidef.StringRegexM if ok { val, valOk := rawVal.(string) if valOk { - matched, match := mr.FindStringSubmatch(val) - if matched { + match := mr.FindStringSubmatch(val) + if len(match) > 0 { addMatchToContextData(contextData, match, triggernum, mh) fCount++ } @@ -538,13 +537,10 @@ func checkPayload(r *http.Request, options apidef.StringRegexMap, triggernum int contextData := ctxGetData(r) bodyBytes, _ := ioutil.ReadAll(r.Body) - matched, matches := options.FindAllStringSubmatch(string(bodyBytes), -1) + matches := options.FindAllStringSubmatch(string(bodyBytes), -1) - if matched { + if len(matches) > 0 { kn := buildTriggerKey(triggernum, "payload") - if len(matches) == 0 { - return true - } contextData[kn] = matches[0][0] for i, match := range matches { @@ -560,10 +556,6 @@ func checkPayload(r *http.Request, options apidef.StringRegexMap, triggernum int func addMatchToContextData(cd map[string]interface{}, match []string, trNum int, trName string, indices ...int) { kn := buildTriggerKey(trNum, trName, indices...) - if len(match) == 0 { - return - } - cd[kn] = match[0] if len(match) > 1 { diff --git a/gateway/mw_url_rewrite_test.go b/mw_url_rewrite_test.go similarity index 69% rename from gateway/mw_url_rewrite_test.go rename to mw_url_rewrite_test.go index 354c53522b46..c86a721fde1f 100644 --- a/gateway/mw_url_rewrite_test.go +++ b/mw_url_rewrite_test.go @@ -1,11 +1,9 @@ -package gateway +package main import ( "net/http/httptest" "testing" - "github.com/TykTechnologies/tyk/test" - "bytes" "net/http" @@ -60,38 +58,15 @@ var testRewriterData = []struct { }, } -type testRewriterCase struct { - name string - meta *apidef.URLRewriteMeta - reqMaker func() *http.Request - want string -} - -func prepareRewriterCases() []testRewriterCase { - tcs := make([]testRewriterCase, len(testRewriterData)) - for i, td := range testRewriterData { - reqTarget := td.in - tcs[i] = testRewriterCase{ - name: td.name, - meta: &apidef.URLRewriteMeta{ - MatchPattern: td.pattern, - RewriteTo: td.to, - }, - reqMaker: func() *http.Request { - return httptest.NewRequest("GET", reqTarget, nil) - }, - want: td.want, - } - } - return tcs -} - func TestRewriter(t *testing.T) { - cases := prepareRewriterCases() - for _, tc := range cases { + for _, tc := range testRewriterData { t.Run(tc.name, func(t *testing.T) { - r := tc.reqMaker() - got, err := urlRewrite(tc.meta, r) + testConf := apidef.URLRewriteMeta{ + MatchPattern: tc.pattern, + RewriteTo: tc.to, + } + r := httptest.NewRequest("GET", tc.in, nil) + got, err := urlRewrite(&testConf, r) if err != nil { t.Error("compile failed:", err) } @@ -102,21 +77,15 @@ func TestRewriter(t *testing.T) { } } func BenchmarkRewriter(b *testing.B) { - cases := prepareRewriterCases() - //warm-up regexp caches - for _, tc := range cases { - r := tc.reqMaker() - urlRewrite(tc.meta, r) - } - b.ReportAllocs() - b.ResetTimer() - for _, tc := range cases { + for _, tc := range testRewriterData { + testConf := apidef.URLRewriteMeta{ + MatchPattern: tc.pattern, + RewriteTo: tc.to, + } for i := 0; i < b.N; i++ { - b.StopTimer() - r := tc.reqMaker() - b.StartTimer() - urlRewrite(tc.meta, r) + r := httptest.NewRequest("GET", tc.in, nil) + urlRewrite(&testConf, r) } } } @@ -318,66 +287,6 @@ func TestRewriterTriggers(t *testing.T) { r, } }, - func() TestDef { - r, _ := http.NewRequest("GET", "/test/straight/rewrite", nil) - - r.Header.Set("x-test", "hello") - r.Header.Set("x-test-Two", "world") - - hOpt := apidef.StringRegexMap{MatchPattern: "hello"} - hOpt.Init() - hOpt2 := apidef.StringRegexMap{MatchPattern: "w.*", Reverse: true} - hOpt2.Init() - - return TestDef{ - "Header Reverse Logic Any Pass", - "/test/straight/rewrite", "/change/to/me/ignore", - "/test/straight/rewrite", "/change/to/me/hello", - []apidef.RoutingTrigger{ - { - On: apidef.Any, - Options: apidef.RoutingTriggerOptions{ - HeaderMatches: map[string]apidef.StringRegexMap{ - "x-test": hOpt, - "x-test-Two": hOpt2, - }, - }, - RewriteTo: "/change/to/me/$tyk_context.trigger-0-X-Test-0", - }, - }, - r, - } - }, - func() TestDef { - r, _ := http.NewRequest("GET", "/test/straight/rewrite", nil) - - r.Header.Set("x-test", "hello") - r.Header.Set("x-test-Two", "world") - - hOpt := apidef.StringRegexMap{MatchPattern: "hello"} - hOpt.Init() - hOpt2 := apidef.StringRegexMap{MatchPattern: "w.*", Reverse: true} - hOpt2.Init() - - return TestDef{ - "Header Reverse Logic All Fail", - "/test/straight/rewrite", "/change/to/me/ignore", - "/test/straight/rewrite", "/change/to/me/ignore", - []apidef.RoutingTrigger{ - { - On: apidef.All, - Options: apidef.RoutingTriggerOptions{ - HeaderMatches: map[string]apidef.StringRegexMap{ - "x-test": hOpt, - "x-test-Two": hOpt2, - }, - }, - RewriteTo: "/change/to/me/$tyk_context.trigger-0-X-Test-0", - }, - }, - r, - } - }, func() TestDef { r, _ := http.NewRequest("GET", "/test/query/rewrite?x_test=foo", nil) @@ -547,74 +456,6 @@ func TestRewriterTriggers(t *testing.T) { r, } }, - func() TestDef { - r, _ := http.NewRequest("GET", "/test/query/rewrite?x_test=foo", nil) - r.Header.Set("y-test", "bar") - r.Header.Set("z-test", "baz") - - hOpt := apidef.StringRegexMap{MatchPattern: "foo"} - hOpt.Init() - hOpt2 := apidef.StringRegexMap{MatchPattern: "bar"} - hOpt2.Init() - hOpt3 := apidef.StringRegexMap{MatchPattern: "baz", Reverse: true} - hOpt3.Init() - - return TestDef{ - "Multi Multi Type Reverse Logic All Fail", - "/test/query/rewrite", "/change/to/me/ignore", - "/test/query/rewrite", "/change/to/me/ignore", - []apidef.RoutingTrigger{ - { - On: apidef.All, - Options: apidef.RoutingTriggerOptions{ - QueryValMatches: map[string]apidef.StringRegexMap{ - "x_test": hOpt, - }, - HeaderMatches: map[string]apidef.StringRegexMap{ - "y-test": hOpt2, - "z-test": hOpt3, - }, - }, - RewriteTo: "/change/to/me/$tyk_context.trigger-0-Y-Test-0", - }, - }, - r, - } - }, - func() TestDef { - r, _ := http.NewRequest("GET", "/test/query/rewrite?x_test=foo", nil) - r.Header.Set("y-test", "bar") - r.Header.Set("z-test", "baz") - - hOpt := apidef.StringRegexMap{MatchPattern: "foo"} - hOpt.Init() - hOpt2 := apidef.StringRegexMap{MatchPattern: "bar"} - hOpt2.Init() - hOpt3 := apidef.StringRegexMap{MatchPattern: "baz", Reverse: true} - hOpt3.Init() - - return TestDef{ - "Multi Multi Type Reverse Logic All Fail", - "/test/query/rewrite", "/change/to/me/ignore", - "/test/query/rewrite", "/change/to/me/bar", - []apidef.RoutingTrigger{ - { - On: apidef.Any, - Options: apidef.RoutingTriggerOptions{ - QueryValMatches: map[string]apidef.StringRegexMap{ - "x_test": hOpt, - }, - HeaderMatches: map[string]apidef.StringRegexMap{ - "y-test": hOpt2, - "z-test": hOpt3, - }, - }, - RewriteTo: "/change/to/me/$tyk_context.trigger-0-Y-Test-0", - }, - }, - r, - } - }, func() TestDef { var jsonStr = []byte(`{"foo":"bar"}`) r, _ := http.NewRequest("POST", "/test/pl/rewrite", bytes.NewBuffer(jsonStr)) @@ -684,87 +525,6 @@ func TestRewriterTriggers(t *testing.T) { r, } }, - func() TestDef { - var jsonStr = []byte(`{"foo":"barxxx", "fooble":"baryyy"}`) - r, _ := http.NewRequest("POST", "/test/pl/rewrite", bytes.NewBuffer(jsonStr)) - - hOpt := apidef.StringRegexMap{MatchPattern: "bar(\\w*)"} - hOpt.Init() - - return TestDef{ - "Payload Multiple Match Groups", - "/test/pl/rewrite", "/change/to/me/ignore", - "/test/pl/rewrite", "/change/to/me/xxx/yyy", - []apidef.RoutingTrigger{ - { - On: apidef.Any, - Options: apidef.RoutingTriggerOptions{ - PayloadMatches: hOpt, - }, - RewriteTo: "/change/to/me/$tyk_context.trigger-0-payload-0-0/$tyk_context.trigger-0-payload-1-0", - }, - }, - r, - } - }, - func() TestDef { - var jsonStr = []byte(`{"foo":"bar"}`) - r, _ := http.NewRequest("POST", "/test/pl/rewrite", bytes.NewBuffer(jsonStr)) - r.Header.Set("x-test", "apple") - - hOpt := apidef.StringRegexMap{MatchPattern: "bar"} - hOpt.Init() - hOpt2 := apidef.StringRegexMap{MatchPattern: "apple"} - hOpt2.Init() - - return TestDef{ - "Multi Type All", - "/test/pl/rewrite", "/change/to/me/ignore", - "/test/pl/rewrite", "/change/to/me/bar/apple", - []apidef.RoutingTrigger{ - { - On: apidef.All, - Options: apidef.RoutingTriggerOptions{ - PayloadMatches: hOpt, - HeaderMatches: map[string]apidef.StringRegexMap{ - "x-test": hOpt2, - }, - }, - RewriteTo: "/change/to/me/$tyk_context.trigger-0-payload-0/$tyk_context.trigger-0-X-Test-0", - }, - }, - r, - } - }, - func() TestDef { - var jsonStr = []byte(`{"foo":"bar"}`) - r, _ := http.NewRequest("POST", "/test/pl/rewrite", bytes.NewBuffer(jsonStr)) - r.Header.Set("x-test", "apple") - - hOpt := apidef.StringRegexMap{MatchPattern: "bar"} - hOpt.Init() - hOpt2 := apidef.StringRegexMap{MatchPattern: "apple", Reverse: true} - hOpt2.Init() - - return TestDef{ - "Multi Multi Type Reverse Logic Any 1", - "/test/pl/rewrite", "/change/to/me/ignore", - "/test/pl/rewrite", "/change/to/me/bar/", - []apidef.RoutingTrigger{ - { - On: apidef.Any, - Options: apidef.RoutingTriggerOptions{ - PayloadMatches: hOpt, - HeaderMatches: map[string]apidef.StringRegexMap{ - "x-test": hOpt2, - }, - }, - RewriteTo: "/change/to/me/$tyk_context.trigger-0-payload-0/$tyk_context.trigger-0-X-Test-0", - }, - }, - r, - } - }, func() TestDef { r, _ := http.NewRequest("GET", "/test/foo/rewrite", nil) hOpt := apidef.StringRegexMap{MatchPattern: "foo"} @@ -788,64 +548,6 @@ func TestRewriterTriggers(t *testing.T) { r, } }, - func() TestDef { - var jsonStr = []byte(`{"foo":"bar"}`) - r, _ := http.NewRequest("POST", "/test/pl/rewrite", bytes.NewBuffer(jsonStr)) - r.Header.Set("x-test", "apple") - - hOpt := apidef.StringRegexMap{MatchPattern: "bar", Reverse: true} - hOpt.Init() - hOpt2 := apidef.StringRegexMap{MatchPattern: "apple"} - hOpt2.Init() - - return TestDef{ - "Multi Multi Type Reverse Logic Any 2", - "/test/pl/rewrite", "/change/to/me/ignore", - "/test/pl/rewrite", "/change/to/me//apple", - []apidef.RoutingTrigger{ - { - On: apidef.Any, - Options: apidef.RoutingTriggerOptions{ - PayloadMatches: hOpt, - HeaderMatches: map[string]apidef.StringRegexMap{ - "x-test": hOpt2, - }, - }, - RewriteTo: "/change/to/me/$tyk_context.trigger-0-payload-0/$tyk_context.trigger-0-X-Test-0", - }, - }, - r, - } - }, - func() TestDef { - var jsonStr = []byte(`{"foo":"bar"}`) - r, _ := http.NewRequest("POST", "/test/pl/rewrite", bytes.NewBuffer(jsonStr)) - r.Header.Set("x-test", "apple") - - hOpt := apidef.StringRegexMap{MatchPattern: "bar"} - hOpt.Init() - hOpt2 := apidef.StringRegexMap{MatchPattern: "apple", Reverse: true} - hOpt2.Init() - - return TestDef{ - "Multi Multi Type Reverse Logic Any 3", - "/test/pl/rewrite", "/change/to/me/ignore", - "/test/pl/rewrite", "/change/to/me/bar/", - []apidef.RoutingTrigger{ - { - On: apidef.Any, - Options: apidef.RoutingTriggerOptions{ - PayloadMatches: hOpt, - HeaderMatches: map[string]apidef.StringRegexMap{ - "x-test": hOpt2, - }, - }, - RewriteTo: "/change/to/me/$tyk_context.trigger-0-payload-0/$tyk_context.trigger-0-X-Test-0", - }, - }, - r, - } - }, func() TestDef { r, _ := http.NewRequest("GET", "/test/foobar/rewrite", nil) hOpt := apidef.StringRegexMap{MatchPattern: "foo(\\w+)"} @@ -1177,49 +879,6 @@ func TestInitTriggerRx(t *testing.T) { } } -func TestURLRewriteCaseSensitivity(t *testing.T) { - ts := StartTest() - defer ts.Close() - - assert := func(relativePath string, requestedPath string, bodyMatch string) { - BuildAndLoadAPI(func(spec *APISpec) { - spec.Proxy.ListenPath = "/" - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { - v.ExtendedPaths.URLRewrite = []apidef.URLRewriteMeta{{ - Path: relativePath, - Method: "GET", - MatchPattern: requestedPath, - RewriteTo: "/xyz", - }} - }) - }) - - ts.Run(t, test.TestCase{ - Path: requestedPath, Code: 200, BodyMatch: bodyMatch, - }) - } - - // Matches and rewrites - t.Run("Relative path lower, requested path lower", func(t *testing.T) { - assert("/get", "/get", `"Url":"/xyz"`) - }) - - // Doesn't match and doesn't rewrite - t.Run("Relative path lower, requested path upper", func(t *testing.T) { - assert("/get", "/Get", `"Url":"/Get"`) - }) - - // Doesn't match and doesn't rewrite - t.Run("Relative path upper, requested path lower", func(t *testing.T) { - assert("/Get", "/get", `"Url":"/get"`) - }) - - // Matches and rewrites - t.Run("Relative path upper, requested path upper", func(t *testing.T) { - assert("/Get", "/Get", `"Url":"/xyz"`) - }) -} - func TestValToStr(t *testing.T) { example := []interface{}{ diff --git a/gateway/mw_validate_json.go b/mw_validate_json.go similarity index 99% rename from gateway/mw_validate_json.go rename to mw_validate_json.go index e7e550727300..d5f47c4eeb71 100644 --- a/gateway/mw_validate_json.go +++ b/mw_validate_json.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_validate_json_test.go b/mw_validate_json_test.go similarity index 93% rename from gateway/mw_validate_json_test.go rename to mw_validate_json_test.go index 834e918e119f..bcee9ab632b1 100644 --- a/gateway/mw_validate_json_test.go +++ b/mw_validate_json_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/json" @@ -29,8 +29,8 @@ var testJsonSchema = `{ }` func testPrepareValidateJSONSchema() { - BuildAndLoadAPI(func(spec *APISpec) { - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + buildAndLoadAPI(func(spec *APISpec) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { json.Unmarshal([]byte(`[ { "path": "/v", @@ -45,7 +45,7 @@ func testPrepareValidateJSONSchema() { } func TestValidateJSONSchema(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() testPrepareValidateJSONSchema() @@ -62,7 +62,7 @@ func TestValidateJSONSchema(t *testing.T) { func BenchmarkValidateJSONSchema(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() testPrepareValidateJSONSchema() diff --git a/gateway/mw_version_check.go b/mw_version_check.go similarity index 99% rename from gateway/mw_version_check.go rename to mw_version_check.go index 8f9ebe83b54f..8a753ff00f3d 100644 --- a/gateway/mw_version_check.go +++ b/mw_version_check.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "errors" diff --git a/gateway/mw_version_check_test.go b/mw_version_check_test.go similarity index 88% rename from gateway/mw_version_check_test.go rename to mw_version_check_test.go index 1b22ea67bfde..8f3cf6df9b20 100644 --- a/gateway/mw_version_check_test.go +++ b/mw_version_check_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" @@ -10,7 +10,7 @@ import ( ) func testPrepareVersioning() (string, string) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = false spec.VersionData.NotVersioned = false spec.VersionDefinition.Location = "header" @@ -37,20 +37,6 @@ func testPrepareVersioning() (string, string) { }, }, }, - URLRewrite: []apidef.URLRewriteMeta{ - { - Path: "/a", - Method: http.MethodGet, - MatchPattern: "/a(.*)", - RewriteTo: "/b", - }, - { - Path: "/c", - Method: http.MethodPost, - MatchPattern: "/c(.*)", - RewriteTo: "/d", - }, - }, Ignored: []apidef.EndPointMeta{ { Path: "/ignore", @@ -60,13 +46,13 @@ func testPrepareVersioning() (string, string) { } }) - keyWrongVersion := CreateSession(func(s *user.SessionState) { + keyWrongVersion := createSession(func(s *user.SessionState) { s.AccessRights = map[string]user.AccessDefinition{"test": { APIID: "test", Versions: []string{"v3"}, }} }) - keyKnownVersion := CreateSession(func(s *user.SessionState) { + keyKnownVersion := createSession(func(s *user.SessionState) { s.AccessRights = map[string]user.AccessDefinition{"test": { APIID: "test", Versions: []string{"v1", "v2", "expired"}, }} @@ -76,7 +62,7 @@ func testPrepareVersioning() (string, string) { } func TestVersioning(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() keyWrongVersion, keyKnownVersion := testPrepareVersioning() @@ -119,7 +105,7 @@ func TestVersioning(t *testing.T) { func BenchmarkVersioning(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() keyWrongVersion, keyKnownVersion := testPrepareVersioning() diff --git a/gateway/mw_virtual_endpoint.go b/mw_virtual_endpoint.go similarity index 98% rename from gateway/mw_virtual_endpoint.go rename to mw_virtual_endpoint.go index f25a3de3e78f..f5e5aba57777 100644 --- a/gateway/mw_virtual_endpoint.go +++ b/mw_virtual_endpoint.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" @@ -31,7 +31,6 @@ type RequestObject struct { Body string URL string Params map[string][]string - Scheme string } type ResponseObject struct { @@ -137,15 +136,10 @@ func (d *VirtualEndpoint) ServeHTTPForCache(w http.ResponseWriter, r *http.Reque } defer r.Body.Close() - scheme := "http" - if r.TLS != nil { - scheme = "https" - } requestData := RequestObject{ Headers: r.Header, Body: string(originalBody), - URL: r.URL.String(), - Scheme: scheme, + URL: r.URL.Path, } // We need to copy the body _back_ for the decode diff --git a/gateway/mw_virtual_endpoint_test.go b/mw_virtual_endpoint_test.go similarity index 94% rename from gateway/mw_virtual_endpoint_test.go rename to mw_virtual_endpoint_test.go index 7b1b9e8c9b6c..cb4ebcfb25ae 100644 --- a/gateway/mw_virtual_endpoint_test.go +++ b/mw_virtual_endpoint_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/base64" @@ -24,7 +24,7 @@ function testVirtData(request, session, config) { ` func testPrepareVirtualEndpoint(js string, method string, path string, proxyOnError bool) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" virtualMeta := apidef.VirtualMeta{ @@ -58,7 +58,7 @@ func testPrepareVirtualEndpoint(js string, method string, path string, proxyOnEr } func TestVirtualEndpoint(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() testPrepareVirtualEndpoint(virtTestJS, "GET", "/virt", true) @@ -75,7 +75,7 @@ func TestVirtualEndpoint(t *testing.T) { } func TestVirtualEndpoint500(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() testPrepareVirtualEndpoint("abc", "GET", "/abc", false) @@ -89,7 +89,7 @@ func TestVirtualEndpoint500(t *testing.T) { func BenchmarkVirtualEndpoint(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() testPrepareVirtualEndpoint(virtTestJS, "GET", "/virt", true) diff --git a/gateway/newrelic.go b/newrelic.go similarity index 98% rename from gateway/newrelic.go rename to newrelic.go index 306cffb44589..e4336f979524 100644 --- a/gateway/newrelic.go +++ b/newrelic.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "fmt" @@ -7,7 +7,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/gocraft/health" "github.com/gorilla/mux" - newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent" "github.com/newrelic/go-agent/_integrations/nrgorilla/v1" "github.com/TykTechnologies/tyk/config" diff --git a/gateway/oauth_manager.go b/oauth_manager.go similarity index 93% rename from gateway/oauth_manager.go rename to oauth_manager.go index b919dd13e0ae..da9d9251136c 100644 --- a/gateway/oauth_manager.go +++ b/oauth_manager.go @@ -1,16 +1,15 @@ -package gateway +package main import ( "bytes" "encoding/base64" "encoding/json" "errors" - "math" "net/http" "time" - "github.com/lonelycode/osin" - uuid "github.com/satori/go.uuid" + osin "github.com/lonelycode/osin" + "github.com/satori/go.uuid" "golang.org/x/crypto/bcrypt" "strconv" @@ -384,7 +383,6 @@ const ( prefixClientTokens = "oauth-client-tokens." ) -// swagger:model type OAuthClientToken struct { Token string `json:"code"` Expires int64 `json:"expires"` @@ -404,14 +402,13 @@ type ExtendedOsinStorageInterface interface { // Custom getter to handle prefixing issues in Redis GetClientNoPrefix(id string) (osin.Client, error) - GetClientTokens(id string) ([]OAuthClientToken, error) - GetPaginatedClientTokens(id string, page int) ([]OAuthClientToken, int, error) - GetExtendedClient(id string) (ExtendedOsinClientInterface, error) // Custom getter to handle prefixing issues in Redis GetExtendedClientNoPrefix(id string) (ExtendedOsinClientInterface, error) + GetClientTokens(id string) ([]OAuthClientToken, error) + GetClients(filter string, ignorePrefix bool) ([]ExtendedOsinClientInterface, error) DeleteClient(id string, ignorePrefix bool) error @@ -555,64 +552,6 @@ func (r *RedisOsinStorageInterface) GetClients(filter string, ignorePrefix bool) return theseClients, nil } -// GetPaginatedClientTokens returns all tokens associated with the given id. -// It returns the tokens, the total number of pages of the tokens after -// pagination and an error if any -func (r *RedisOsinStorageInterface) GetPaginatedClientTokens(id string, page int) ([]OAuthClientToken, int, error) { - key := prefixClientTokens + id - - // use current timestamp as a start score so all expired tokens won't be picked - nowTs := time.Now().Unix() - startScore := strconv.FormatInt(nowTs, 10) - - log.Info("Getting client tokens sorted list:", key) - - tokens, scores, err := r.store.GetSortedSetRange(key, startScore, "+inf") - if err != nil { - return nil, 0, err - } - - // clean up expired tokens in sorted set (remove all tokens with score up to current timestamp minus retention) - if config.Global().OauthTokenExpiredRetainPeriod > 0 { - cleanupStartScore := strconv.FormatInt(nowTs-int64(config.Global().OauthTokenExpiredRetainPeriod), 10) - go r.store.RemoveSortedSetRange(key, "-inf", cleanupStartScore) - } - - itemsPerPage := 100 - - tokenNumber := len(tokens) - - if tokenNumber == 0 { - return []OAuthClientToken{}, 0, nil - } - - startIdx := (page - 1) * itemsPerPage - endIdx := startIdx + itemsPerPage - - if tokenNumber < startIdx { - startIdx = tokenNumber - } - - if tokenNumber < endIdx { - endIdx = tokenNumber - } - - totalPages := int(math.Ceil(float64(len(tokens)) / float64(itemsPerPage))) - - tokens = tokens[startIdx:endIdx] - - // convert sorted set data and scores into reply struct - tokensData := make([]OAuthClientToken, len(tokens)) - for i := range tokens { - tokensData[i] = OAuthClientToken{ - Token: tokens[i], - Expires: int64(scores[i]), // we store expire timestamp as a score - } - } - - return tokensData, totalPages, nil -} - func (r *RedisOsinStorageInterface) GetClientTokens(id string) ([]OAuthClientToken, error) { key := prefixClientTokens + id @@ -633,13 +572,9 @@ func (r *RedisOsinStorageInterface) GetClientTokens(id string) ([]OAuthClientTok go r.store.RemoveSortedSetRange(key, "-inf", cleanupStartScore) } - if len(tokens) == 0 { - return []OAuthClientToken{}, nil - } - // convert sorted set data and scores into reply struct tokensData := make([]OAuthClientToken, len(tokens)) - for i := range tokens { + for i := 0; i < len(tokensData); i++ { tokensData[i] = OAuthClientToken{ Token: tokens[i], Expires: int64(scores[i]), // we store expire timestamp as a score diff --git a/gateway/oauth_manager_test.go b/oauth_manager_test.go similarity index 84% rename from gateway/oauth_manager_test.go rename to oauth_manager_test.go index 594db87eda58..a1d4b176884f 100644 --- a/gateway/oauth_manager_test.go +++ b/oauth_manager_test.go @@ -1,4 +1,4 @@ -package gateway +package main /* NOTE: Requires the test tyk.conf to be in place and the settings to b correct - ugly, I know, but necessary for the end to end to work correctly. @@ -17,8 +17,9 @@ import ( "time" + "github.com/satori/go.uuid" + "github.com/lonelycode/osin" - uuid "github.com/satori/go.uuid" "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" @@ -46,7 +47,7 @@ const keyRules = `{ }` func buildTestOAuthSpec(apiGens ...func(spec *APISpec)) *APISpec { - return BuildAPI(func(spec *APISpec) { + return buildAPI(func(spec *APISpec) { spec.APIID = "999999" spec.OrgID = "default" spec.Auth = apidef.Auth{ @@ -96,11 +97,11 @@ func buildTestOAuthSpec(apiGens ...func(spec *APISpec)) *APISpec { } func loadTestOAuthSpec() *APISpec { - return LoadAPI(buildTestOAuthSpec())[0] + return loadAPI(buildTestOAuthSpec())[0] } func createTestOAuthClient(spec *APISpec, clientID string) { - pID := CreatePolicy(func(p *user.Policy) { + pID := createPolicy(func(p *user.Policy) { p.ID = "TEST-4321" p.AccessRights = map[string]user.AccessDefinition{ "test": { @@ -131,7 +132,7 @@ func createTestOAuthClient(spec *APISpec, clientID string) { } func TestOauthMultipleAPIs(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := buildTestOAuthSpec(func(spec *APISpec) { @@ -147,11 +148,11 @@ func TestOauthMultipleAPIs(t *testing.T) { spec.Proxy.ListenPath = "/api2/" }) - apis := LoadAPI(spec, spec2) + apis := loadAPI(spec, spec2) spec = apis[0] spec2 = apis[1] - pID := CreatePolicy(func(p *user.Policy) { + pID := createPolicy(func(p *user.Policy) { p.AccessRights = map[string]user.AccessDefinition{ "oauth2": { APIID: "oauth2", @@ -218,19 +219,13 @@ func TestOauthMultipleAPIs(t *testing.T) { } func TestAuthCodeRedirect(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() createTestOAuthClient(spec, authClientID) - client := &http.Client{ - CheckRedirect: func(req *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - }, - } - t.Run("Authorize request with redirect", func(t *testing.T) { param := make(url.Values) param.Set("response_type", "code") @@ -246,7 +241,6 @@ func TestAuthCodeRedirect(t *testing.T) { Data: param.Encode(), Headers: headers, Method: http.MethodPost, - Client: client, Code: http.StatusTemporaryRedirect, }) }) @@ -257,21 +251,15 @@ func TestAuthCodeRedirectMultipleURL(t *testing.T) { globalConf := config.Global() globalConf.OauthRedirectUriSeparator = "," config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() createTestOAuthClient(spec, authClientID) - client := &http.Client{ - CheckRedirect: func(req *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - }, - } - t.Run("Client authorize request with multiple redirect URI", func(t *testing.T) { param := make(url.Values) param.Set("response_type", "code") @@ -288,7 +276,6 @@ func TestAuthCodeRedirectMultipleURL(t *testing.T) { Headers: headers, Method: http.MethodPost, Code: http.StatusTemporaryRedirect, - Client: client, }) }) } @@ -298,9 +285,9 @@ func TestAuthCodeRedirectInvalidMultipleURL(t *testing.T) { globalConf := config.Global() globalConf.OauthRedirectUriSeparator = "" config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() @@ -328,7 +315,7 @@ func TestAuthCodeRedirectInvalidMultipleURL(t *testing.T) { } func TestAPIClientAuthorizeAuthCode(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() @@ -359,7 +346,7 @@ func TestAPIClientAuthorizeAuthCode(t *testing.T) { } func TestAPIClientAuthorizeToken(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() @@ -390,7 +377,7 @@ func TestAPIClientAuthorizeToken(t *testing.T) { } func TestDeleteOauthClient(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() @@ -483,7 +470,7 @@ func TestDeleteOauthClient(t *testing.T) { } func TestAPIClientAuthorizeTokenWithPolicy(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() @@ -534,7 +521,7 @@ func TestAPIClientAuthorizeTokenWithPolicy(t *testing.T) { }) } -func getAuthCode(t *testing.T, ts *Test) map[string]string { +func getAuthCode(t *testing.T, ts *tykTestServer) map[string]string { param := make(url.Values) param.Set("response_type", "code") param.Set("redirect_uri", authRedirectUri) @@ -561,115 +548,6 @@ func getAuthCode(t *testing.T, ts *Test) map[string]string { return response } -func TestGetPaginatedClientTokens(t *testing.T) { - testPagination := func(pageParam int, expectedPageNumber int, tokenRequestCount int, expectedRes int) { - globalConf := config.Global() - // set tokens to be expired after 100 seconds - globalConf.OauthTokenExpire = 100 - // cleanup tokens older than 300 seconds - globalConf.OauthTokenExpiredRetainPeriod = 300 - - config.SetGlobal(globalConf) - - defer ResetTestConfig() - - ts := StartTest() - defer ts.Close() - - spec := loadTestOAuthSpec() - - clientID := uuid.NewV4().String() - createTestOAuthClient(spec, clientID) - - tokensID := map[string]bool{} - param := make(url.Values) - param.Set("response_type", "token") - param.Set("redirect_uri", authRedirectUri) - param.Set("client_id", clientID) - param.Set("client_secret", authClientSecret) - param.Set("key_rules", keyRules) - - headers := map[string]string{ - "Content-Type": "application/x-www-form-urlencoded", - } - - for i := 0; i < tokenRequestCount; i++ { - resp, err := ts.Run(t, test.TestCase{ - Path: "/APIID/tyk/oauth/authorize-client/", - Data: param.Encode(), - AdminAuth: true, - Headers: headers, - Method: http.MethodPost, - Code: http.StatusOK, - }) - if err != nil { - t.Error(err) - } - - response := map[string]interface{}{} - if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { - t.Fatal(err) - } - - // save tokens for future check - tokensID[response["access_token"].(string)] = true - } - - resp, err := ts.Run(t, test.TestCase{ - // strconv#Atoi successfully parses a negative integer - // so make sure it is being reset to the first page - Path: fmt.Sprintf("/tyk/oauth/clients/999999/%s/tokens?page=%d", clientID, pageParam), - AdminAuth: true, - Method: http.MethodGet, - Code: http.StatusOK, - }) - if err != nil { - t.Error(err) - } - - tokensResp := paginatedOAuthClientTokens{} - if err := json.NewDecoder(resp.Body).Decode(&tokensResp); err != nil { - t.Fatal(err) - } - - // check response - if len(tokensResp.Tokens) != expectedRes { - t.Errorf("Wrong number of tokens received. Expected: %d. Got: %d", expectedRes, len(tokensResp.Tokens)) - } - - for _, token := range tokensResp.Tokens { - if !tokensID[token.Token] { - t.Errorf("Token %s is not found in expected result. Expecting: %v", token.Token, tokensID) - } - } - - // Also inspect the pagination data information - if expectedPageNumber != tokensResp.Pagination.PageNum { - t.Errorf("Page number, expected %d, got %d", expectedPageNumber, tokensResp.Pagination.PageNum) - } - } - - t.Run("Negative value should return first page", func(t *testing.T) { - testPagination(-3, 1, 110, 100) - }) - - t.Run("First page, less than items per page", func(t *testing.T) { - testPagination(1, 1, 85, 85) - }) - - t.Run("First page, greater than items per page", func(t *testing.T) { - testPagination(1, 1, 110, 100) - }) - - t.Run("Second page, greater than items per page", func(t *testing.T) { - testPagination(2, 2, 110, 10) - }) - - t.Run("Second page, multiple of items per page", func(t *testing.T) { - testPagination(2, 2, 200, 100) - }) -} - func TestGetClientTokens(t *testing.T) { t.Run("Without hashing", func(t *testing.T) { testGetClientTokens(t, false) @@ -690,9 +568,9 @@ func testGetClientTokens(t *testing.T, hashed bool) { config.SetGlobal(globalConf) - defer ResetTestConfig() + defer resetTestConfig() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() @@ -759,10 +637,9 @@ func testGetClientTokens(t *testing.T, hashed bool) { } // check response - if n := len(tokensID); len(tokensResp) != n { - t.Errorf("Wrong number of tokens received. Expected: %d. Got: %d", n, len(tokensResp)) + if len(tokensResp) != len(tokensID) { + t.Errorf("Wrong number of tokens received. Expected: %d. Got: %d", len(tokensID), len(tokensResp)) } - for _, token := range tokensResp { if !tokensID[token.Token] { t.Errorf("Token %s is not found in expected result. Expecting: %v", token.Token, tokensID) @@ -800,7 +677,7 @@ type tokenData struct { RefreshToken string `json:"refresh_token"` } -func getToken(t *testing.T, ts *Test) tokenData { +func getToken(t *testing.T, ts *tykTestServer) tokenData { authData := getAuthCode(t, ts) param := make(url.Values) @@ -832,7 +709,7 @@ func getToken(t *testing.T, ts *Test) tokenData { } func TestOAuthClientCredsGrant(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() @@ -871,7 +748,7 @@ func TestOAuthClientCredsGrant(t *testing.T) { } func TestClientAccessRequest(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() @@ -903,7 +780,7 @@ func TestClientAccessRequest(t *testing.T) { } func TestOAuthAPIRefreshInvalidate(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() @@ -960,7 +837,7 @@ func TestOAuthAPIRefreshInvalidate(t *testing.T) { } func TestClientRefreshRequest(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() @@ -992,7 +869,7 @@ func TestClientRefreshRequest(t *testing.T) { } func TestClientRefreshRequestDouble(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() @@ -1053,7 +930,7 @@ func TestClientRefreshRequestDouble(t *testing.T) { } func TestTokenEndpointHeaders(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() spec := loadTestOAuthSpec() diff --git a/gateway/policy.go b/policy.go similarity index 95% rename from gateway/policy.go rename to policy.go index e032b543f33d..5e518418562d 100644 --- a/gateway/policy.go +++ b/policy.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/json" @@ -8,10 +8,9 @@ import ( "os" "time" - "github.com/TykTechnologies/tyk/rpc" - "github.com/Sirupsen/logrus" + "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/user" ) @@ -159,11 +158,11 @@ func parsePoliciesFromRPC(list string) (map[string]user.Policy, error) { } func LoadPoliciesFromRPC(orgId string) (map[string]user.Policy, error) { - if rpc.IsEmergencyMode() { + if rpcEmergencyMode { return LoadPoliciesFromRPCBackup() } - store := &RPCStorageHandler{} + store := &RPCStorageHandler{UserKey: config.Global().SlaveOptions.APIKey, Address: config.Global().SlaveOptions.ConnectionString} if !store.Connect() { return nil, errors.New("Policies backup: Failed connecting to database") } diff --git a/gateway/policy_test.go b/policy_test.go similarity index 96% rename from gateway/policy_test.go rename to policy_test.go index 24cf5c08ba8e..819311192c60 100644 --- a/gateway/policy_test.go +++ b/policy_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/json" @@ -19,7 +19,7 @@ import ( ) func TestLoadPoliciesFromDashboardReLogin(t *testing.T) { - // Test Dashboard + // Mock Dashboard ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(403) })) @@ -487,11 +487,11 @@ func TestApplyPoliciesQuotaAPILimit(t *testing.T) { } policiesMu.RUnlock() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // load APIs - BuildAndLoadAPI( + buildAndLoadAPI( func(spec *APISpec) { spec.Name = "api 1" spec.APIID = "api1" @@ -643,7 +643,7 @@ func TestApplyPoliciesQuotaAPILimit(t *testing.T) { ts.Run(t, []test.TestCase{ { Method: http.MethodPut, - Path: "/tyk/keys/" + key, + Path: "/tyk/keys/" + key + "?suppress_reset=1", AdminAuth: true, Code: http.StatusOK, Data: session, @@ -659,13 +659,37 @@ func TestApplyPoliciesQuotaAPILimit(t *testing.T) { t.Log(err.Error()) return false } - api1Limit := sessionData.AccessRights["api1"].Limit - if api1Limit == nil { - t.Error("api1 limit is not set") + + api1Limit := sessionData.AccessRights["api3"].Limit + if api1Limit.QuotaRemaining == 0 { + t.Error("Should not reset quota") + return false + } + + return true + }, + }, + { + Method: http.MethodPut, + Path: "/tyk/keys/" + key, + AdminAuth: true, + Code: http.StatusOK, + Data: session, + }, + { + Method: http.MethodGet, + Path: "/tyk/keys/" + key, + AdminAuth: true, + Code: http.StatusOK, + BodyMatchFunc: func(data []byte) bool { + sessionData := user.SessionState{} + if err := json.Unmarshal(data, &sessionData); err != nil { + t.Log(err.Error()) return false } - if api1Limit.QuotaRemaining != 100 { + api1Limit := sessionData.AccessRights["api3"].Limit + if api1Limit.QuotaRemaining != 50 { t.Error("Should reset quota:", api1Limit.QuotaRemaining) return false } @@ -701,11 +725,11 @@ func TestPerAPIPolicyUpdate(t *testing.T) { } policiesMu.RUnlock() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // load APIs - BuildAndLoadAPI( + buildAndLoadAPI( func(spec *APISpec) { spec.Name = "api 1" spec.APIID = "api1" diff --git a/gateway/redis_logrus_hook.go b/redis_logrus_hook.go similarity index 91% rename from gateway/redis_logrus_hook.go rename to redis_logrus_hook.go index df6cf83b8d6e..93d620175a91 100644 --- a/gateway/redis_logrus_hook.go +++ b/redis_logrus_hook.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "time" @@ -16,7 +16,7 @@ type redisChannelHook struct { func newRedisHook() *redisChannelHook { hook := &redisChannelHook{} hook.formatter = new(logrus.JSONFormatter) - hook.notifier.store = &storage.RedisCluster{KeyPrefix: "gateway-notifications:"} + hook.notifier.store = storage.RedisCluster{KeyPrefix: "gateway-notifications:"} hook.notifier.channel = "dashboard.ui.messages" return hook } diff --git a/gateway/redis_signal_handle_config.go b/redis_signal_handle_config.go similarity index 99% rename from gateway/redis_signal_handle_config.go rename to redis_signal_handle_config.go index df7a6863b774..ed8d5ae7bde8 100644 --- a/gateway/redis_signal_handle_config.go +++ b/redis_signal_handle_config.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/json" diff --git a/gateway/redis_signals.go b/redis_signals.go similarity index 97% rename from gateway/redis_signals.go rename to redis_signals.go index c6510212f4f7..2875e14c9e7c 100644 --- a/gateway/redis_signals.go +++ b/redis_signals.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/base64" @@ -140,6 +140,10 @@ func isPayloadSignatureValid(notification Notification) bool { } if notification.Signature == "" && config.Global().AllowInsecureConfigs { + redisInsecureWarn.Do(func() { + pubSubLog.Warning("Insecure configuration detected (allowing)!") + }) + return true } @@ -178,7 +182,7 @@ func isPayloadSignatureValid(notification Notification) bool { // RedisNotifier will use redis pub/sub channels to send notifications type RedisNotifier struct { - store *storage.RedisCluster + store storage.RedisCluster channel string } diff --git a/regexp/cache_regexp_byte_ret_bool.go b/regexp/cache_regexp_byte_ret_bool.go index 52004de7ede3..f3ccbc281f91 100644 --- a/regexp/cache_regexp_byte_ret_bool.go +++ b/regexp/cache_regexp_byte_ret_bool.go @@ -24,24 +24,20 @@ func (c *regexpByteRetBoolCache) do(r *regexp.Regexp, b []byte, noCacheFn func([ return noCacheFn(b) } - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendBytes(b).UnsafeKey() - if len(nsKey) > maxKeySize { + key := r.String() + string(b) + if len(key) > maxKeySize { return noCacheFn(b) } // cache hit - if res, found := c.getBool(nsKey); found { + if res, found := c.getBool(key); found { return res } // cache miss, add to cache res := noCacheFn(b) - c.add(kb.Key(), res) + c.add(key, res) return res } diff --git a/regexp/cache_regexp_str_func_ret_str.go b/regexp/cache_regexp_str_func_ret_str.go index fb4519c663b6..8f99fa11c343 100644 --- a/regexp/cache_regexp_str_func_ret_str.go +++ b/regexp/cache_regexp_str_func_ret_str.go @@ -1,6 +1,7 @@ package regexp import ( + "fmt" "regexp" "time" ) @@ -24,18 +25,14 @@ func (c *regexpStrFuncRetStrCache) do(r *regexp.Regexp, src string, repl func(st return noCacheFn(src, repl) } - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendString(src).Appendf("%p", repl).UnsafeKey() - if len(nsKey) > maxKeySize { + key := r.String() + src + fmt.Sprintf("%p", repl) + if len(key) > maxKeySize { return noCacheFn(src, repl) } // cache hit - if res, found := c.getString(nsKey); found { + if res, found := c.getString(key); found { return res } @@ -45,7 +42,7 @@ func (c *regexpStrFuncRetStrCache) do(r *regexp.Regexp, src string, repl func(st return res } - c.add(kb.Key(), res) + c.add(key, res) return res } diff --git a/regexp/cache_regexp_str_int_ret_slice_slice_str.go b/regexp/cache_regexp_str_int_ret_slice_slice_str.go index b8dfb5523202..84b24fb60971 100644 --- a/regexp/cache_regexp_str_int_ret_slice_slice_str.go +++ b/regexp/cache_regexp_str_int_ret_slice_slice_str.go @@ -2,6 +2,7 @@ package regexp import ( "regexp" + "strconv" "time" ) @@ -24,18 +25,14 @@ func (c *regexpStrIntRetSliceSliceStrCache) do(r *regexp.Regexp, s string, n int return noCacheFn(s, n) } - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendString(s).AppendInt(n).UnsafeKey() - if len(nsKey) > maxKeySize { + key := r.String() + s + strconv.Itoa(n) + if len(key) > maxKeySize { return noCacheFn(s, n) } // cache hit - if res, found := c.getStrSliceOfSlices(nsKey); found { + if res, found := c.getStrSliceOfSlices(key); found { return res } @@ -45,7 +42,7 @@ func (c *regexpStrIntRetSliceSliceStrCache) do(r *regexp.Regexp, s string, n int return res } - c.add(kb.Key(), res) + c.add(key, res) return res } diff --git a/regexp/cache_regexp_str_int_ret_slice_str.go b/regexp/cache_regexp_str_int_ret_slice_str.go index ad3c0723d9ea..d1cbd8942a80 100644 --- a/regexp/cache_regexp_str_int_ret_slice_str.go +++ b/regexp/cache_regexp_str_int_ret_slice_str.go @@ -2,6 +2,7 @@ package regexp import ( "regexp" + "strconv" "time" ) @@ -24,18 +25,14 @@ func (c *regexpStrIntRetSliceStrCache) do(r *regexp.Regexp, s string, n int, noC return noCacheFn(s, n) } - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendString(s).AppendInt(n).UnsafeKey() - if len(nsKey) > maxKeySize { + key := r.String() + s + strconv.Itoa(n) + if len(key) > maxKeySize { return noCacheFn(s, n) } // cache hit - if res, found := c.getStrSlice(nsKey); found { + if res, found := c.getStrSlice(key); found { return res } @@ -45,7 +42,7 @@ func (c *regexpStrIntRetSliceStrCache) do(r *regexp.Regexp, s string, n int, noC return res } - c.add(kb.Key(), res) + c.add(key, res) return res } diff --git a/regexp/cache_regexp_str_ret_bool.go b/regexp/cache_regexp_str_ret_bool.go index 6544b279e201..df87118ec692 100644 --- a/regexp/cache_regexp_str_ret_bool.go +++ b/regexp/cache_regexp_str_ret_bool.go @@ -24,24 +24,20 @@ func (c *regexpStrRetBoolCache) do(r *regexp.Regexp, s string, noCacheFn func(st return noCacheFn(s) } - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendString(s).UnsafeKey() - if len(nsKey) > maxKeySize { + key := r.String() + s + if len(key) > maxKeySize { return noCacheFn(s) } // cache hit - if res, found := c.getBool(nsKey); found { + if res, found := c.getBool(key); found { return res } // cache miss, add to cache res := noCacheFn(s) - c.add(kb.Key(), res) + c.add(key, res) return res } diff --git a/regexp/cache_regexp_str_ret_slice_str.go b/regexp/cache_regexp_str_ret_slice_str.go index ec1e7884a1ed..9c3e9cc7889f 100644 --- a/regexp/cache_regexp_str_ret_slice_str.go +++ b/regexp/cache_regexp_str_ret_slice_str.go @@ -24,25 +24,21 @@ func (c *regexpStrRetSliceStrCache) do(r *regexp.Regexp, s string, noCacheFn fun return noCacheFn(s) } - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendString(s).UnsafeKey() - if len(nsKey) > maxKeySize { + key := r.String() + s + if len(key) > maxKeySize { return noCacheFn(s) } // cache hit - if res, found := c.getStrSlice(nsKey); found { + if res, found := c.getStrSlice(key); found { return res } // cache miss, add to cache if value is not too big res := noCacheFn(s) if len(res) <= maxValueSize { - c.add(kb.Key(), res) + c.add(key, res) } return res diff --git a/regexp/cache_regexp_str_str_ret_str.go b/regexp/cache_regexp_str_str_ret_str.go index 788136d44065..8ff0ea455ad2 100644 --- a/regexp/cache_regexp_str_str_ret_str.go +++ b/regexp/cache_regexp_str_str_ret_str.go @@ -24,18 +24,14 @@ func (c *regexpStrStrRetStrCache) do(r *regexp.Regexp, src string, repl string, return noCacheFn(src, repl) } - kb := keyBuilderPool.Get().(*keyBuilder) - defer keyBuilderPool.Put(kb) - kb.Reset() - // generate key, check key size - nsKey := kb.AppendString(r.String()).AppendString(src).AppendString(repl).UnsafeKey() - if len(nsKey) > maxKeySize { + key := r.String() + src + repl + if len(key) > maxKeySize { return noCacheFn(src, repl) } // cache hit - if res, found := c.getString(nsKey); found { + if res, found := c.getString(key); found { return res } @@ -45,7 +41,7 @@ func (c *regexpStrStrRetStrCache) do(r *regexp.Regexp, src string, repl string, return res } - c.add(kb.Key(), res) + c.add(key, res) return res } diff --git a/regexp/keybuilder.go b/regexp/keybuilder.go deleted file mode 100644 index bc528aff40c7..000000000000 --- a/regexp/keybuilder.go +++ /dev/null @@ -1,66 +0,0 @@ -package regexp - -import ( - "fmt" - "strconv" - "sync" - "unsafe" -) - -var keyBuilderPool = sync.Pool{ - New: func() interface{} { return new(keyBuilder) }, -} - -// Combine logic of strings.Builder and bytes.Buffer. -// Allow to reuse builder with 0 allocs, as bytes.Buffer -// and also allow to get 0 alloc string representation -// as strings.Builder -type keyBuilder struct { - buf []byte -} - -// Reset resets the keyBuilder to be empty. -func (kb *keyBuilder) Reset() *keyBuilder { - kb.buf = kb.buf[:0] - return kb -} - -// Returns content of internal buffer, converted to string. -// Safe for using as key for storing item, immutable -func (kb *keyBuilder) Key() string { - return string(kb.buf) -} - -// Returns string representation of internal buffer. -// Mutable, sequential writes to keyBuilder will -// also mutate returned representation. -// Safe for lookups by key. -// Should not be used as key for storing items. -func (kb *keyBuilder) UnsafeKey() string { - return *(*string)(unsafe.Pointer(&kb.buf)) -} - -func (kb *keyBuilder) Write(p []byte) (int, error) { - kb.buf = append(kb.buf, p...) - return len(p), nil -} - -func (kb *keyBuilder) AppendString(s string) *keyBuilder { - kb.buf = append(kb.buf, s...) - return kb -} - -func (kb *keyBuilder) AppendBytes(b []byte) *keyBuilder { - kb.buf = append(kb.buf, b...) - return kb -} - -func (kb *keyBuilder) Appendf(format string, a ...interface{}) *keyBuilder { - fmt.Fprintf(kb, format, a...) - return kb -} - -func (kb *keyBuilder) AppendInt(n int) *keyBuilder { - kb.buf = strconv.AppendInt(kb.buf, int64(n), 10) - return kb -} diff --git a/regexp/keybuilder_test.go b/regexp/keybuilder_test.go deleted file mode 100644 index 7cf88694c774..000000000000 --- a/regexp/keybuilder_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package regexp - -import ( - "fmt" - "testing" -) - -var tStr1 = "aαa⏰𐌈" -var tStr2 = "bβb⏳𐌏" - -func TestKeyImmutabilityReset(t *testing.T) { - kb := keyBuilder{} - - kb.AppendString(tStr1) - k := kb.Key() - - kb.Reset() - if k != tStr1 { - t.Errorf("key should remains %v, got %v", tStr1, k) - } -} - -func TestKeyImmutabilityChangeBuilderState(t *testing.T) { - kb := keyBuilder{} - - kb.AppendString(tStr1) - k := kb.Key() - - kb.AppendString(tStr2) - if k != tStr1 { - t.Errorf("key should remains %v, got %v", tStr1, k) - } -} - -func TestAppendString(t *testing.T) { - kb := keyBuilder{} - - kb.AppendString(tStr1).AppendString(tStr2) - nsKey := kb.UnsafeKey() - key := kb.Key() - - exp := tStr1 + tStr2 - if key != exp || nsKey != exp { - t.Errorf("expect to got %v, got %v and %v", exp, key, nsKey) - } -} - -func TestAppendBytes(t *testing.T) { - kb := keyBuilder{} - - kb.AppendString(tStr1).AppendBytes([]byte(tStr2)) - nsKey := kb.UnsafeKey() - key := kb.Key() - - exp := tStr1 + tStr2 - if key != exp || nsKey != exp { - t.Errorf("expect to got %v, got %v and %v", exp, key, nsKey) - } -} - -func TestAppendInt(t *testing.T) { - kb := keyBuilder{} - - kb.AppendString(tStr1).AppendInt(123) - nsKey := kb.UnsafeKey() - key := kb.Key() - - exp := tStr1 + "123" - if key != exp || nsKey != exp { - t.Errorf("expect to got %v, got %v and %v", exp, key, nsKey) - } -} - -func TestWrite(t *testing.T) { - kb := keyBuilder{} - - b := []byte(tStr2) - n, err := kb.AppendString(tStr1).Write(b) - - if err != nil { - t.Errorf("Write should always pass without error, got %v", err) - } - - if n != len(b) { - t.Errorf("Write should always return length of byte slice argument. Expected %v, got %v", len(b), n) - } - - nsKey := kb.UnsafeKey() - key := kb.Key() - - exp := tStr1 + tStr2 - if key != exp || nsKey != exp { - t.Errorf("expect to got %v, got %v and %v", exp, key, nsKey) - } -} - -func TestAppendf(t *testing.T) { - kb := keyBuilder{} - - f := func(s string) string { return s } - - kb.AppendString(tStr1).Appendf("%p", f) - nsKey := kb.UnsafeKey() - key := kb.Key() - - exp := tStr1 + fmt.Sprintf("%p", f) - if key != exp || nsKey != exp { - t.Errorf("expect to got %v, got %v and %v", exp, key, nsKey) - } -} diff --git a/gateway/res_handler_header_injector.go b/res_handler_header_injector.go similarity index 94% rename from gateway/res_handler_header_injector.go rename to res_handler_header_injector.go index 197ec5048e00..25723da02576 100644 --- a/gateway/res_handler_header_injector.go +++ b/res_handler_header_injector.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" @@ -19,9 +19,6 @@ type HeaderInjector struct { config HeaderInjectorOptions } -func (HeaderInjector) Name() string { - return "HeaderInjector" -} func (h *HeaderInjector) Init(c interface{}, spec *APISpec) error { h.Spec = spec return mapstructure.Decode(c, &h.config) diff --git a/gateway/res_handler_header_injector_test.go b/res_handler_header_injector_test.go similarity index 88% rename from gateway/res_handler_header_injector_test.go rename to res_handler_header_injector_test.go index 5eb7394a2f17..1dfb2c342fe8 100644 --- a/gateway/res_handler_header_injector_test.go +++ b/res_handler_header_injector_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/json" @@ -10,29 +10,29 @@ import ( ) func testPrepareResponseHeaderInjection() { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.UseKeylessAccess = true spec.Proxy.ListenPath = "/" spec.OrgID = "default" - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.UseExtendedPaths = true json.Unmarshal([]byte(`[ { - "delete_headers": ["X-Tyk-Test"], + "delete_headers": ["X-Tyk-Mock"], "add_headers": {"X-Test": "test"}, "path": "/test-with-slash", "method": "GET", "act_on": false }, { - "delete_headers": ["X-Tyk-Test"], + "delete_headers": ["X-Tyk-Mock"], "add_headers": {"X-Test": "test"}, "path": "test-no-slash", "method": "GET", "act_on": false }, { - "delete_headers": ["X-Tyk-Test"], + "delete_headers": ["X-Tyk-Mock"], "add_headers": {"X-Test": "test"}, "path": "/rewrite-test", "method": "GET", @@ -61,13 +61,13 @@ func testPrepareResponseHeaderInjection() { } func TestResponseHeaderInjection(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() testPrepareResponseHeaderInjection() addHeaders := map[string]string{"X-Test": "test"} - deleteHeaders := map[string]string{"X-Tyk-Test": "1"} + deleteHeaders := map[string]string{"X-Tyk-Mock": "1"} userAgent := fmt.Sprintf("\"User-Agent\":\"Tyk/%v\"", VERSION) ts.Run(t, []test.TestCase{ @@ -83,13 +83,13 @@ func TestResponseHeaderInjection(t *testing.T) { func BenchmarkResponseHeaderInjection(b *testing.B) { b.ReportAllocs() - ts := StartTest() + ts := newTykTestServer() defer ts.Close() testPrepareResponseHeaderInjection() addHeaders := map[string]string{"X-Test": "test"} - deleteHeaders := map[string]string{"X-Tyk-Test": "1"} + deleteHeaders := map[string]string{"X-Tyk-Mock": "1"} userAgent := fmt.Sprintf("\"User-Agent\":\"Tyk/%v\"", VERSION) for i := 0; i < b.N; i++ { diff --git a/gateway/res_handler_header_transform.go b/res_handler_header_transform.go similarity index 95% rename from gateway/res_handler_header_transform.go rename to res_handler_header_transform.go index 6283fc4e587d..3d27e5f411f0 100644 --- a/gateway/res_handler_header_transform.go +++ b/res_handler_header_transform.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" @@ -24,10 +24,6 @@ type HeaderTransform struct { config HeaderTransformOptions } -func (HeaderTransform) Name() string { - return "HeaderTransform" -} - func (h *HeaderTransform) Init(c interface{}, spec *APISpec) error { if err := mapstructure.Decode(c, &h.config); err != nil { return err diff --git a/gateway/res_handler_jq_transform.go b/res_handler_jq_transform.go similarity index 98% rename from gateway/res_handler_jq_transform.go rename to res_handler_jq_transform.go index ec62bc3cbbab..ce98992c1634 100644 --- a/gateway/res_handler_jq_transform.go +++ b/res_handler_jq_transform.go @@ -1,6 +1,6 @@ //+build jq -package gateway +package main import ( "bytes" diff --git a/gateway/res_handler_jq_transform_dummy.go b/res_handler_jq_transform_dummy.go similarity index 80% rename from gateway/res_handler_jq_transform_dummy.go rename to res_handler_jq_transform_dummy.go index 9fe8f8cab714..95d6c851f893 100644 --- a/gateway/res_handler_jq_transform_dummy.go +++ b/res_handler_jq_transform_dummy.go @@ -1,6 +1,6 @@ //+build !jq -package gateway +package main import ( "net/http" @@ -12,10 +12,6 @@ type ResponseTransformJQMiddleware struct { Spec *APISpec } -func (ResponseTransformJQMiddleware) Name() string { - return "ResponseTransformJQMiddleware" -} - func (h *ResponseTransformJQMiddleware) Init(c interface{}, spec *APISpec) error { h.Spec = spec diff --git a/gateway/res_handler_transform.go b/res_handler_transform.go similarity index 97% rename from gateway/res_handler_transform.go rename to res_handler_transform.go index 6c4636fe25f0..05775f408cea 100644 --- a/gateway/res_handler_transform.go +++ b/res_handler_transform.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" @@ -21,10 +21,6 @@ type ResponseTransformMiddleware struct { Spec *APISpec } -func (ResponseTransformMiddleware) Name() string { - return "ResponseTransformMiddleware" -} - func (h *ResponseTransformMiddleware) Init(c interface{}, spec *APISpec) error { h.Spec = spec return nil diff --git a/gateway/res_handler_transform_test.go b/res_handler_transform_test.go similarity index 85% rename from gateway/res_handler_transform_test.go rename to res_handler_transform_test.go index bc2c3e7607a8..95d2d8a0ab9a 100644 --- a/gateway/res_handler_transform_test.go +++ b/res_handler_transform_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "encoding/base64" @@ -28,13 +28,13 @@ func TestTransformResponseWithURLRewrite(t *testing.T) { responseProcessorConf := []apidef.ResponseProcessor{{Name: "response_body_transform"}} t.Run("Transform without rewrite", func(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.ResponseProcessors = responseProcessorConf - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.ExtendedPaths.TransformResponse = []apidef.TemplateMeta{transformResponseConf} }) }) @@ -45,14 +45,14 @@ func TestTransformResponseWithURLRewrite(t *testing.T) { }) t.Run("Transform path equals rewrite to ", func(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.ResponseProcessors = responseProcessorConf - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.ExtendedPaths.TransformResponse = []apidef.TemplateMeta{transformResponseConf} v.ExtendedPaths.URLRewrite = []apidef.URLRewriteMeta{urlRewriteConf} }) @@ -64,16 +64,16 @@ func TestTransformResponseWithURLRewrite(t *testing.T) { }) t.Run("Transform path equals rewrite path", func(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.ResponseProcessors = responseProcessorConf transformResponseConf.Path = "abc" - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.ExtendedPaths.TransformResponse = []apidef.TemplateMeta{transformResponseConf} v.ExtendedPaths.URLRewrite = []apidef.URLRewriteMeta{urlRewriteConf} }) @@ -86,7 +86,7 @@ func TestTransformResponseWithURLRewrite(t *testing.T) { } func TestTransformResponse_ContextVars(t *testing.T) { - ts := StartTest() + ts := newTykTestServer() defer ts.Close() transformResponseConf := apidef.TemplateMeta{ @@ -101,10 +101,10 @@ func TestTransformResponse_ContextVars(t *testing.T) { responseProcessorConf := []apidef.ResponseProcessor{{Name: "response_body_transform"}} // When Context Vars are disabled - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.ResponseProcessors = responseProcessorConf - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.ExtendedPaths.TransformResponse = []apidef.TemplateMeta{transformResponseConf} }) }) @@ -114,11 +114,11 @@ func TestTransformResponse_ContextVars(t *testing.T) { }) // When Context Vars are enabled - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.EnableContextVars = true spec.ResponseProcessors = responseProcessorConf - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.ExtendedPaths.TransformResponse = []apidef.TemplateMeta{transformResponseConf} }) }) @@ -131,7 +131,7 @@ func TestTransformResponse_ContextVars(t *testing.T) { func TestTransformResponse_WithCache(t *testing.T) { const path = "/get" - ts := StartTest() + ts := newTykTestServer() defer ts.Close() transformResponseConf := apidef.TemplateMeta{ @@ -145,13 +145,13 @@ func TestTransformResponse_WithCache(t *testing.T) { responseProcessorConf := []apidef.ResponseProcessor{{Name: "response_body_transform"}} createAPI := func(withCache bool) { - BuildAndLoadAPI(func(spec *APISpec) { + buildAndLoadAPI(func(spec *APISpec) { spec.Proxy.ListenPath = "/" spec.CacheOptions.CacheTimeout = 60 spec.EnableContextVars = true spec.CacheOptions.EnableCache = withCache spec.ResponseProcessors = responseProcessorConf - UpdateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { + updateAPIVersion(spec, "v1", func(v *apidef.VersionInfo) { v.ExtendedPaths.TransformResponse = []apidef.TemplateMeta{transformResponseConf} v.ExtendedPaths.Cached = []string{path} }) diff --git a/gateway/reverse_proxy.go b/reverse_proxy.go similarity index 89% rename from gateway/reverse_proxy.go rename to reverse_proxy.go index a9fcd17d9f33..496d5ec283ec 100644 --- a/gateway/reverse_proxy.go +++ b/reverse_proxy.go @@ -9,7 +9,7 @@ // * load balancing // * service discovery -package gateway +package main import ( "bytes" @@ -26,18 +26,12 @@ import ( "sync" "time" - "golang.org/x/net/http2" - "github.com/Sirupsen/logrus" - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - cache "github.com/pmylund/go-cache" + "github.com/pmylund/go-cache" "github.com/TykTechnologies/tyk/apidef" "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/ctx" "github.com/TykTechnologies/tyk/regexp" - "github.com/TykTechnologies/tyk/trace" "github.com/TykTechnologies/tyk/user" ) @@ -228,7 +222,7 @@ func TykNewSingleHostReverseProxy(target *url.URL, spec *APISpec) *ReverseProxy targetToUse := target - if spec.URLRewriteEnabled && req.Context().Value(ctx.RetainHost) == true { + if spec.URLRewriteEnabled && req.Context().Value(RetainHost) == true { log.Debug("Detected host rewrite, overriding target") tmpTarget, err := url.Parse(req.URL.String()) if err != nil { @@ -315,29 +309,18 @@ type ReverseProxy struct { ErrorHandler ErrorHandler } -func defaultTransport(dialerTimeout float64) *http.Transport { - timeout := 30.0 - if dialerTimeout > 0 { - log.Debug("Setting timeout for outbound request to: ", dialerTimeout) - timeout = dialerTimeout - } - - dialer := &net.Dialer{ - Timeout: time.Duration(float64(timeout) * float64(time.Second)), - KeepAlive: 30 * time.Second, - DualStack: true, - } - dialContextFunc := dialer.DialContext - if dnsCacheManager.IsCacheEnabled() { - dialContextFunc = dnsCacheManager.WrapDialer(dialer) - } - +func defaultTransport() *http.Transport { + // allocate a new one every time for now, to avoid modifying a + // global variable for each request's needs (e.g. timeouts). return &http.Transport{ - DialContext: dialContextFunc, - MaxIdleConns: config.Global().MaxIdleConns, - MaxIdleConnsPerHost: config.Global().MaxIdleConnsPerHost, // default is 100 - ResponseHeaderTimeout: time.Duration(dialerTimeout) * time.Second, - TLSHandshakeTimeout: 10 * time.Second, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: config.Global().MaxIdleConns, + MaxIdleConnsPerHost: config.Global().MaxIdleConnsPerHost, // default is 100 + TLSHandshakeTimeout: 10 * time.Second, } } @@ -406,7 +389,7 @@ func (p *ReverseProxy) ServeHTTPForCache(rw http.ResponseWriter, req *http.Reque return resp } -func (p *ReverseProxy) CheckHardTimeoutEnforced(spec *APISpec, req *http.Request) (bool, float64) { +func (p *ReverseProxy) CheckHardTimeoutEnforced(spec *APISpec, req *http.Request) (bool, int) { if !spec.EnforcedTimeoutEnabled { return false, spec.GlobalConfig.ProxyDefaultTimeout } @@ -414,7 +397,7 @@ func (p *ReverseProxy) CheckHardTimeoutEnforced(spec *APISpec, req *http.Request _, versionPaths, _, _ := spec.Version(req) found, meta := spec.CheckSpecMatchesStatus(req, versionPaths, HardTimeout) if found { - intMeta := meta.(*float64) + intMeta := meta.(*int) log.Debug("HARD TIMEOUT ENFORCED: ", *intMeta) return true, *intMeta } @@ -468,8 +451,8 @@ func proxyFromAPI(api *APISpec) func(*http.Request) (*url.URL, error) { } } -func httpTransport(timeOut float64, rw http.ResponseWriter, req *http.Request, p *ReverseProxy) http.RoundTripper { - transport := defaultTransport(timeOut) // modifies a newly created transport +func httpTransport(timeOut int, rw http.ResponseWriter, req *http.Request, p *ReverseProxy) http.RoundTripper { + transport := defaultTransport() // modifies a newly created transport transport.TLSClientConfig = &tls.Config{} transport.Proxy = proxyFromAPI(p.TykAPISpec) @@ -509,6 +492,16 @@ func httpTransport(timeOut float64, rw http.ResponseWriter, req *http.Request, p transport.TLSClientConfig.Renegotiation = tls.RenegotiateFreelyAsClient } + // Use the default unless we've modified the timout + if timeOut > 0 { + log.Debug("Setting timeout for outbound request to: ", timeOut) + transport.DialContext = (&net.Dialer{ + Timeout: time.Duration(timeOut) * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext + transport.ResponseHeaderTimeout = time.Duration(timeOut) * time.Second + } + transport.DisableKeepAlives = p.TykAPISpec.GlobalConfig.ProxyCloseConnections if IsWebsocket(req) { @@ -516,20 +509,10 @@ func httpTransport(timeOut float64, rw http.ResponseWriter, req *http.Request, p return wsTransport } - if config.Global().ProxyEnableHttp2 { - http2.ConfigureTransport(transport) - } - return transport } func (p *ReverseProxy) WrappedServeHTTP(rw http.ResponseWriter, req *http.Request, withCache bool) *http.Response { - if trace.IsEnabled() { - span, ctx := trace.Span(req.Context(), req.URL.Path) - defer span.Finish() - ext.SpanKindRPCClient.Set(span) - req = req.WithContext(ctx) - } outReqIsWebsocket := IsWebsocket(req) var roundTripper http.RoundTripper @@ -573,17 +556,17 @@ func (p *ReverseProxy) WrappedServeHTTP(rw http.ResponseWriter, req *http.Reques } p.TykAPISpec.Unlock() - reqCtx := req.Context() + ctx := req.Context() if cn, ok := rw.(http.CloseNotifier); ok { var cancel context.CancelFunc - reqCtx, cancel = context.WithCancel(reqCtx) + ctx, cancel = context.WithCancel(ctx) defer cancel() notifyChan := cn.CloseNotify() go func() { select { case <-notifyChan: cancel() - case <-reqCtx.Done(): + case <-ctx.Done(): } }() } @@ -603,21 +586,18 @@ func (p *ReverseProxy) WrappedServeHTTP(rw http.ResponseWriter, req *http.Reques log.Debug("UPSTREAM REQUEST URL: ", req.URL) // We need to double set the context for the outbound request to reprocess the target - if p.TykAPISpec.URLRewriteEnabled && req.Context().Value(ctx.RetainHost) == true { + if p.TykAPISpec.URLRewriteEnabled && req.Context().Value(RetainHost) == true { log.Debug("Detected host rewrite, notifying director") - setCtxValue(outreq, ctx.RetainHost, true) + setCtxValue(outreq, RetainHost, true) } if req.ContentLength == 0 { outreq.Body = nil // Issue 16036: nil Body for http.Transport retries } - outreq = outreq.WithContext(reqCtx) + outreq = outreq.WithContext(ctx) outreq.Header = cloneHeader(req.Header) - if trace.IsEnabled() { - span := opentracing.SpanFromContext(req.Context()) - trace.Inject(p.TykAPISpec.Name, span, outreq.Header) - } + p.Director(outreq) outreq.Close = false @@ -638,15 +618,10 @@ func (p *ReverseProxy) WrappedServeHTTP(rw http.ResponseWriter, req *http.Reques // important is "Connection" because we want a persistent // connection, regardless of what the client sent to us. for _, h := range hopHeaders { - hv := outreq.Header.Get(h) - if hv == "" { - continue - } - if h == "Te" && hv == "trailers" { - continue + if outreq.Header.Get(h) != "" { + outreq.Header.Del(h) + logreq.Header.Del(h) } - outreq.Header.Del(h) - logreq.Header.Del(h) } } @@ -678,7 +653,7 @@ func (p *ReverseProxy) WrappedServeHTTP(rw http.ResponseWriter, req *http.Reques if breakerEnforced { if !breakerConf.CB.Ready() { log.Debug("ON REQUEST: Circuit Breaker is in OPEN state") - p.ErrorHandler.HandleError(rw, logreq, "Service temporarily unavailable.", 503, true) + p.ErrorHandler.HandleError(rw, logreq, "Service temporarily unavailable.", 503) return nil } log.Debug("ON REQUEST: Circuit Breaker is in CLOSED or HALF-OPEN state") @@ -712,7 +687,7 @@ func (p *ReverseProxy) WrappedServeHTTP(rw http.ResponseWriter, req *http.Reques }).Error("http: proxy error: ", err) if strings.Contains(err.Error(), "timeout awaiting response headers") { - p.ErrorHandler.HandleError(rw, logreq, "Upstream service reached hard timeout.", http.StatusGatewayTimeout, true) + p.ErrorHandler.HandleError(rw, logreq, "Upstream service reached hard timeout.", http.StatusGatewayTimeout) if p.TykAPISpec.Proxy.ServiceDiscovery.UseDiscoveryService { if ServiceCache != nil { @@ -724,16 +699,16 @@ func (p *ReverseProxy) WrappedServeHTTP(rw http.ResponseWriter, req *http.Reques } if strings.Contains(err.Error(), "context canceled") { - p.ErrorHandler.HandleError(rw, logreq, "Client closed request", 499, true) + p.ErrorHandler.HandleError(rw, logreq, "Client closed request", 499) return nil } if strings.Contains(err.Error(), "no such host") { - p.ErrorHandler.HandleError(rw, logreq, "Upstream host lookup failed", http.StatusInternalServerError, true) + p.ErrorHandler.HandleError(rw, logreq, "Upstream host lookup failed", http.StatusInternalServerError) return nil } - p.ErrorHandler.HandleError(rw, logreq, "There was a problem proxying the request", http.StatusInternalServerError, true) + p.ErrorHandler.HandleError(rw, logreq, "There was a problem proxying the request", http.StatusInternalServerError) return nil } @@ -811,39 +786,8 @@ func (p *ReverseProxy) HandleResponse(rw http.ResponseWriter, res *http.Response copyHeader(rw.Header(), res.Header) - announcedTrailers := len(res.Trailer) - if announcedTrailers > 0 { - trailerKeys := make([]string, 0, len(res.Trailer)) - for k := range res.Trailer { - trailerKeys = append(trailerKeys, k) - } - rw.Header().Add("Trailer", strings.Join(trailerKeys, ", ")) - } - rw.WriteHeader(res.StatusCode) - - if len(res.Trailer) > 0 { - // Force chunking if we saw a response trailer. - // This prevents net/http from calculating the length for short - // bodies and adding a Content-Length. - if fl, ok := rw.(http.Flusher); ok { - fl.Flush() - } - } - p.CopyResponse(rw, res.Body) - - if len(res.Trailer) == announcedTrailers { - copyHeader(rw.Header(), res.Trailer) - return nil - } - - for k, vv := range res.Trailer { - k = http.TrailerPrefix + k - for _, v := range vv { - rw.Header().Add(k, v) - } - } return nil } diff --git a/gateway/reverse_proxy_test.go b/reverse_proxy_test.go similarity index 68% rename from gateway/reverse_proxy_test.go rename to reverse_proxy_test.go index e50dd0bc973f..940b562f0204 100644 --- a/gateway/reverse_proxy_test.go +++ b/reverse_proxy_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" @@ -10,14 +10,9 @@ import ( "strings" "testing" "text/template" - "time" "github.com/TykTechnologies/tyk/apidef" - "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/ctx" - "github.com/TykTechnologies/tyk/dnscache" "github.com/TykTechnologies/tyk/request" - "github.com/TykTechnologies/tyk/test" ) func TestCopyHeader_NoDuplicateCORSHeaders(t *testing.T) { @@ -89,15 +84,14 @@ func TestReverseProxyRetainHost(t *testing.T) { spec := &APISpec{APIDefinition: &apidef.APIDefinition{}, URLRewriteEnabled: true} spec.URLRewriteEnabled = true - req := TestReq(t, http.MethodGet, tc.inURL, nil) + req := testReq(t, http.MethodGet, tc.inURL, nil) req.URL.Path = tc.inPath if tc.retainHost { - setCtxValue(req, ctx.RetainHost, true) + setCtxValue(req, RetainHost, true) } proxy := TykNewSingleHostReverseProxy(target, spec) proxy.Director(req) - if got := req.URL.String(); got != tc.wantURL { t.Fatalf("wanted url %q, got %q", tc.wantURL, got) } @@ -105,206 +99,8 @@ func TestReverseProxyRetainHost(t *testing.T) { } } -type configTestReverseProxyDnsCache struct { - *testing.T - - etcHostsMap map[string][]string - dnsConfig config.DnsCacheConfig -} - -func setupTestReverseProxyDnsCache(cfg *configTestReverseProxyDnsCache) func() { - pullDomains := mockHandle.PushDomains(cfg.etcHostsMap, nil) - dnsCacheManager.InitDNSCaching( - time.Duration(cfg.dnsConfig.TTL)*time.Second, time.Duration(cfg.dnsConfig.CheckInterval)*time.Second) - - globalConf := config.Global() - enableWebSockets := globalConf.HttpServerOptions.EnableWebSockets - - globalConf.HttpServerOptions.EnableWebSockets = true - config.SetGlobal(globalConf) - - return func() { - pullDomains() - dnsCacheManager.DisposeCache() - globalConf.HttpServerOptions.EnableWebSockets = enableWebSockets - config.SetGlobal(globalConf) - } -} - -func TestReverseProxyDnsCache(t *testing.T) { - const ( - host = "orig-host.com." - host2 = "orig-host2.com." - host3 = "orig-host3.com." - wsHost = "ws.orig-host.com." - - hostApiUrl = "http://orig-host.com/origpath" - host2HttpApiUrl = "http://orig-host2.com/origpath" - host2HttpsApiUrl = "https://orig-host2.com/origpath" - host3ApiUrl = "https://orig-host3.com/origpath" - wsHostWsApiUrl = "ws://ws.orig-host.com/connect" - wsHostWssApiUrl = "wss://ws.orig-host.com/connect" - - cacheTTL = 5 - cacheUpdateInterval = 10 - ) - - var ( - etcHostsMap = map[string][]string{ - host: {"127.0.0.10", "127.0.0.20"}, - host2: {"10.0.20.0", "10.0.20.1", "10.0.20.2"}, - host3: {"10.0.20.15", "10.0.20.16"}, - wsHost: {"127.0.0.10", "127.0.0.10"}, - } - ) - - tearDown := setupTestReverseProxyDnsCache(&configTestReverseProxyDnsCache{t, etcHostsMap, - config.DnsCacheConfig{ - Enabled: true, TTL: cacheTTL, CheckInterval: cacheUpdateInterval, - MultipleIPsHandleStrategy: config.NoCacheStrategy}}) - - currentStorage := dnsCacheManager.CacheStorage() - fakeDeleteStorage := &dnscache.MockStorage{ - MockFetchItem: currentStorage.FetchItem, - MockGet: currentStorage.Get, - MockSet: currentStorage.Set, - MockDelete: func(key string) { - //prevent deletion - }, - MockClear: currentStorage.Clear} - dnsCacheManager.SetCacheStorage(fakeDeleteStorage) - - defer tearDown() - - cases := []struct { - name string - - URL string - Method string - Body []byte - Headers http.Header - - isWebsocket bool - - expectedIPs []string - shouldBeCached bool - isCacheEnabled bool - }{ - { - "Should cache first request to Host1", - hostApiUrl, - http.MethodGet, nil, nil, - false, - etcHostsMap[host], - true, true, - }, - { - "Should cache first request to Host2", - host2HttpsApiUrl, - http.MethodPost, []byte("{ \"param\": \"value\" }"), nil, - false, - etcHostsMap[host2], - true, true, - }, - { - "Should populate from cache second request to Host1", - hostApiUrl, - http.MethodGet, nil, nil, - false, - etcHostsMap[host], - false, true, - }, - { - "Should populate from cache second request to Host2 with different protocol", - host2HttpApiUrl, - http.MethodPost, []byte("{ \"param\": \"value2\" }"), nil, - false, - etcHostsMap[host2], - false, true, - }, - { - "Shouldn't cache request with different http verb to same host", - hostApiUrl, - http.MethodPatch, []byte("{ \"param2\": \"value3\" }"), nil, - false, - etcHostsMap[host], - false, true, - }, - { - "Shouldn't cache dns record when cache is disabled", - host3ApiUrl, - http.MethodGet, nil, nil, - false, etcHostsMap[host3], - false, false, - }, - { - "Should cache ws protocol host dns records", - wsHostWsApiUrl, - http.MethodGet, nil, - map[string][]string{ - "Upgrade": {"websocket"}, - "Connection": {"Upgrade"}, - }, - true, - etcHostsMap[wsHost], - true, true, - }, - // { - // "Should cache wss protocol host dns records", - // wsHostWssApiUrl, - // http.MethodGet, nil, - // map[string][]string{ - // "Upgrade": {"websocket"}, - // "Connection": {"Upgrade"}, - // }, - // true, - // etcHostsMap[wsHost], - // true, true, - // }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - storage := dnsCacheManager.CacheStorage() - if !tc.isCacheEnabled { - dnsCacheManager.SetCacheStorage(nil) - } - - spec := &APISpec{APIDefinition: &apidef.APIDefinition{}, - EnforcedTimeoutEnabled: true, - GlobalConfig: config.Config{ProxyCloseConnections: true, ProxyDefaultTimeout: 0.1}} - - req := TestReq(t, tc.Method, tc.URL, tc.Body) - for name, value := range tc.Headers { - req.Header.Add(name, strings.Join(value, ";")) - } - - Url, _ := url.Parse(tc.URL) - proxy := TykNewSingleHostReverseProxy(Url, spec) - recorder := httptest.NewRecorder() - proxy.WrappedServeHTTP(recorder, req, false) - - host := Url.Hostname() - if tc.isCacheEnabled { - item, ok := storage.Get(host) - if !ok || !test.IsDnsRecordsAddrsEqualsTo(item.Addrs, tc.expectedIPs) { - t.Fatalf("got %q, but wanted %q. ok=%t", item, tc.expectedIPs, ok) - } - } else { - item, ok := storage.Get(host) - if ok { - t.Fatalf("got %t, but wanted %t. item=%#v", ok, false, item) - } - } - - if !tc.isCacheEnabled { - dnsCacheManager.SetCacheStorage(storage) - } - }) - } -} - func testNewWrappedServeHTTP() *ReverseProxy { - target, _ := url.Parse(TestHttpGet) + target, _ := url.Parse(testHttpGet) def := apidef.APIDefinition{} def.VersionData.DefaultVersion = "Default" def.VersionData.Versions = map[string]apidef.VersionInfo{ @@ -523,7 +319,7 @@ func TestCheckHeaderInRemoveList(t *testing.T) { t.Fatal(err) } - spec := CreateSpecTest(t, specOutput.String()) + spec := createSpecTest(t, specOutput.String()) actual := rp.CheckHeaderInRemoveList(tc.header, spec, r) if actual != tc.expected { t.Fatalf("want %t, got %t", tc.expected, actual) @@ -655,7 +451,6 @@ func BenchmarkWrappedServeHTTP(b *testing.B) { proxy.WrappedServeHTTP(recorder, req, false) } } - func BenchmarkCopyRequestResponse(b *testing.B) { b.ReportAllocs() diff --git a/rpc/rpc_analytics_purger.go b/rpc/rpc_analytics_purger.go deleted file mode 100644 index dbb85b014d92..000000000000 --- a/rpc/rpc_analytics_purger.go +++ /dev/null @@ -1,135 +0,0 @@ -package rpc - -import ( - "encoding/json" - "time" - - msgpack "gopkg.in/vmihailenco/msgpack.v2" - - "github.com/TykTechnologies/tyk/storage" -) - -type AnalyticsRecord struct { - Method string - Path string - RawPath string - ContentLength int64 - UserAgent string - Day int - Month time.Month - Year int - Hour int - ResponseCode int - APIKey string - TimeStamp time.Time - APIVersion string - APIName string - APIID string - OrgID string - OauthID string - RequestTime int64 - RawRequest string - RawResponse string - IPAddress string - Geo GeoData - Tags []string - Alias string - TrackPath bool - ExpireAt time.Time `bson:"expireAt" json:"expireAt"` -} -type GeoData struct { - Country struct { - ISOCode string `maxminddb:"iso_code"` - } `maxminddb:"country"` - - City struct { - GeoNameID uint `maxminddb:"geoname_id"` - Names map[string]string `maxminddb:"names"` - } `maxminddb:"city"` - - Location struct { - Latitude float64 `maxminddb:"latitude"` - Longitude float64 `maxminddb:"longitude"` - TimeZone string `maxminddb:"time_zone"` - } `maxminddb:"location"` -} - -const analyticsKeyName = "tyk-system-analytics" - -// RPCPurger will purge analytics data into a Mongo database, requires that the Mongo DB string is specified -// in the Config object -type Purger struct { - Store storage.Handler -} - -// Connect Connects to RPC -func (r *Purger) Connect() { - if !clientIsConnected { - Log.Error("RPC client is not connected, use Connect method 1st") - } - - // setup RPC func if needed - if !addedFuncs["Ping"] { - dispatcher.AddFunc("Ping", func() bool { - return false - }) - addedFuncs["Ping"] = true - } - if !addedFuncs["PurgeAnalyticsData"] { - dispatcher.AddFunc("PurgeAnalyticsData", func(data string) error { - return nil - }) - addedFuncs["PurgeAnalyticsData"] = true - } - - Log.Info("RPC Analytics client using singleton") -} - -// PurgeLoop starts the loop that will pull data out of the in-memory -// store and into RPC. -func (r Purger) PurgeLoop(ticker <-chan time.Time) { - for { - <-ticker - r.PurgeCache() - } -} - -// PurgeCache will pull the data from the in-memory store and drop it into the specified MongoDB collection -func (r *Purger) PurgeCache() { - if !clientIsConnected { - Log.Error("RPC client is not connected, use Connect method 1st") - } - - if _, err := FuncClientSingleton("Ping", nil); err != nil { - Log.WithError(err).Error("Can't purge cache, failed to ping RPC") - return - } - - analyticsValues := r.Store.GetAndDeleteSet(analyticsKeyName) - if len(analyticsValues) == 0 { - return - } - keys := make([]interface{}, len(analyticsValues)) - - for i, v := range analyticsValues { - decoded := AnalyticsRecord{} - if err := msgpack.Unmarshal(v.([]byte), &decoded); err != nil { - Log.WithError(err).Error("Couldn't unmarshal analytics data") - } else { - Log.WithField("decoded", decoded).Debug("Decoded Record") - keys[i] = decoded - } - } - - data, err := json.Marshal(keys) - if err != nil { - Log.WithError(err).Error("Failed to marshal analytics data") - return - } - - // Send keys to RPC - if _, err := FuncClientSingleton("PurgeAnalyticsData", string(data)); err != nil { - EmitErrorEvent(FuncClientSingletonCall, "PurgeAnalyticsData", err) - Log.Warn("Failed to call purge, retrying: ", err) - } -} diff --git a/rpc/rpc_client.go b/rpc/rpc_client.go deleted file mode 100644 index c5bb1b42bc28..000000000000 --- a/rpc/rpc_client.go +++ /dev/null @@ -1,393 +0,0 @@ -package rpc - -import ( - "crypto/tls" - "errors" - "net" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/Sirupsen/logrus" - - "github.com/gocraft/health" - uuid "github.com/satori/go.uuid" - - "github.com/TykTechnologies/gorpc" -) - -var ( - GlobalRPCCallTimeout = 30 * time.Second - GlobalRPCPingTimeout = 60 * time.Second - Log = &logrus.Logger{} - Instrument *health.Stream - - clientSingleton *gorpc.Client - clientSingletonMu sync.Mutex - funcClientSingleton *gorpc.DispatcherClient - clientIsConnected bool - - dispatcher = gorpc.NewDispatcher() - addedFuncs = make(map[string]bool) - - config Config - getGroupLoginCallback func(string, string) interface{} - emergencyModeCallback func() - emergencyModeLoadedCallback func() - - // rpcLoadCount is a counter to check if this is a cold boot - rpcLoadCount int - rpcEmergencyMode bool - rpcEmergencyModeLoaded bool - - killChan = make(chan int) - killed bool - id string - - rpcLoginMu sync.Mutex - reLoginRunning uint32 - - rpcConnectMu sync.Mutex -) - -const ( - ClientSingletonCall = "gorpcClientCall" - FuncClientSingletonCall = "gorpcDispatcherClientCall" -) - -type Config struct { - UseSSL bool `json:"use_ssl"` - SSLInsecureSkipVerify bool `json:"ssl_insecure_skip_verify"` - ConnectionString string `json:"connection_string"` - RPCKey string `json:"rpc_key"` - APIKey string `json:"api_key"` - GroupID string `json:"group_id"` - CallTimeout int `json:"call_timeout"` - PingTimeout int `json:"ping_timeout"` - RPCPoolSize int `json:"rpc_pool_size"` -} - -func IsEmergencyMode() bool { - return rpcEmergencyMode -} - -func LoadCount() int { - return rpcLoadCount -} - -func Reset() { - clientSingleton.Stop() - clientIsConnected = false - clientSingleton = nil - funcClientSingleton = nil - rpcLoadCount = 0 - rpcEmergencyMode = false - rpcEmergencyModeLoaded = false -} - -func ResetEmergencyMode() { - rpcEmergencyModeLoaded = false - rpcEmergencyMode = false -} - -func EmitErrorEvent(jobName string, funcName string, err error) { - if Instrument == nil { - return - } - - job := Instrument.NewJob(jobName) - if emitErr := job.EventErr(funcName, err); emitErr != nil { - Log.WithError(emitErr).WithFields(logrus.Fields{ - "jobName": jobName, - "funcName": funcName, - }) - } -} - -func EmitErrorEventKv(jobName string, funcName string, err error, kv map[string]string) { - if Instrument == nil { - return - } - - job := Instrument.NewJob(jobName) - if emitErr := job.EventErrKv(funcName, err, kv); emitErr != nil { - Log.WithError(emitErr).WithFields(logrus.Fields{ - "jobName": jobName, - "funcName": funcName, - "kv": kv, - }) - } -} - -// Connect will establish a connection to the RPC server specified in connection options -func Connect(connConfig Config, suppressRegister bool, dispatcherFuncs map[string]interface{}, - getGroupLoginFunc func(string, string) interface{}, - emergencyModeFunc func(), - emergencyModeLoadedFunc func()) bool { - rpcConnectMu.Lock() - defer rpcConnectMu.Unlock() - - config = connConfig - getGroupLoginCallback = getGroupLoginFunc - emergencyModeCallback = emergencyModeFunc - emergencyModeLoadedCallback = emergencyModeLoadedFunc - - if clientIsConnected { - Log.Debug("Using RPC singleton for connection") - return true - } - - if clientSingleton != nil { - return rpcEmergencyMode != true - } - - // RPC Client is unset - // Set up the cache - Log.Info("Setting new RPC connection!") - - connID := uuid.NewV4().String() - - // Length should fit into 1 byte. Protection if we decide change uuid in future. - if len(connID) > 255 { - panic("connID is too long") - } - - if config.UseSSL { - clientCfg := &tls.Config{ - InsecureSkipVerify: config.SSLInsecureSkipVerify, - } - - clientSingleton = gorpc.NewTLSClient(config.ConnectionString, clientCfg) - } else { - clientSingleton = gorpc.NewTCPClient(config.ConnectionString) - } - - if Log.Level != logrus.DebugLevel { - clientSingleton.LogError = gorpc.NilErrorLogger - } - - clientSingleton.OnConnect = onConnectFunc - - clientSingleton.Conns = config.RPCPoolSize - if clientSingleton.Conns == 0 { - clientSingleton.Conns = 20 - } - - clientSingleton.Dial = func(addr string) (conn net.Conn, err error) { - dialer := &net.Dialer{ - Timeout: 10 * time.Second, - KeepAlive: 30 * time.Second, - } - - useSSL := config.UseSSL - - if useSSL { - cfg := &tls.Config{ - InsecureSkipVerify: config.SSLInsecureSkipVerify, - } - - conn, err = tls.DialWithDialer(dialer, "tcp", addr, cfg) - } else { - conn, err = dialer.Dial("tcp", addr) - } - - if err != nil { - EmitErrorEventKv( - ClientSingletonCall, - "dial", - err, - map[string]string{ - "addr": addr, - "useSSL": strconv.FormatBool(useSSL), - }, - ) - return - } - - conn.Write([]byte("proto2")) - conn.Write([]byte{byte(len(connID))}) - conn.Write([]byte(connID)) - return conn, nil - } - clientSingleton.Start() - - loadDispatcher(dispatcherFuncs) - - if funcClientSingleton == nil { - funcClientSingleton = dispatcher.NewFuncClient(clientSingleton) - } - - if !Login() { - return false - } - - if !suppressRegister { - register() - go checkDisconnect() - } - - return true -} - -func reAttemptLogin(err error) bool { - if atomic.LoadUint32(&reLoginRunning) == 1 { - return false - } - atomic.StoreUint32(&reLoginRunning, 1) - - rpcLoginMu.Lock() - if rpcLoadCount == 0 && !rpcEmergencyModeLoaded { - Log.Warning("[RPC Store] --> Detected cold start, attempting to load from cache") - Log.Warning("[RPC Store] ----> Found APIs... beginning emergency load") - rpcEmergencyModeLoaded = true - if emergencyModeLoadedCallback != nil { - go emergencyModeLoadedCallback() - } - } - rpcLoginMu.Unlock() - - time.Sleep(time.Second * 3) - atomic.StoreUint32(&reLoginRunning, 0) - - if strings.Contains(err.Error(), "Cannot obtain response during timeout") { - reConnect() - return false - } - - Log.Warning("[RPC Store] Login failed, waiting 3s to re-attempt") - - return Login() -} - -func GroupLogin() bool { - if getGroupLoginCallback == nil { - Log.Error("GroupLogin call back is not set") - return false - } - - groupLoginData := getGroupLoginCallback(config.APIKey, config.GroupID) - ok, err := FuncClientSingleton("LoginWithGroup", groupLoginData) - if err != nil { - Log.WithError(err).Error("RPC Login failed") - EmitErrorEventKv( - FuncClientSingletonCall, - "LoginWithGroup", - err, - map[string]string{ - "GroupID": config.GroupID, - }, - ) - rpcEmergencyMode = true - go reAttemptLogin(err) - return false - } - - if ok == false { - Log.Error("RPC Login incorrect") - rpcEmergencyMode = true - go reAttemptLogin(errors.New("Login incorrect")) - return false - } - Log.Debug("[RPC Store] Group Login complete") - rpcLoadCount++ - - // Recovery - if rpcEmergencyMode { - rpcEmergencyMode = false - rpcEmergencyModeLoaded = false - if emergencyModeCallback != nil { - emergencyModeCallback() - } - } - - return true -} - -func Login() bool { - Log.Debug("[RPC Store] Login initiated") - - if len(config.APIKey) == 0 { - Log.Fatal("No API Key set!") - } - - // If we have a group ID, lets login as a group - if config.GroupID != "" { - return GroupLogin() - } - - ok, err := FuncClientSingleton("Login", config.APIKey) - if err != nil { - Log.WithError(err).Error("RPC Login failed") - EmitErrorEvent(FuncClientSingletonCall, "Login", err) - rpcEmergencyMode = true - go reAttemptLogin(err) - return false - } - - if ok == false { - Log.Error("RPC Login incorrect") - rpcEmergencyMode = true - go reAttemptLogin(errors.New("Login incorrect")) - return false - } - Log.Debug("[RPC Store] Login complete") - rpcLoadCount++ - - if rpcEmergencyMode { - rpcEmergencyMode = false - rpcEmergencyModeLoaded = false - if emergencyModeCallback != nil { - emergencyModeCallback() - } - } - - return true -} - -func FuncClientSingleton(funcName string, request interface{}) (interface{}, error) { - return funcClientSingleton.CallTimeout(funcName, request, GlobalRPCCallTimeout) -} - -func onConnectFunc(conn net.Conn) (net.Conn, string, error) { - clientSingletonMu.Lock() - defer clientSingletonMu.Unlock() - - clientIsConnected = true - remoteAddr := conn.RemoteAddr().String() - Log.WithField("remoteAddr", remoteAddr).Debug("connected to RPC server") - - return conn, remoteAddr, nil -} - -func Disconnect() bool { - clientIsConnected = false - return true -} - -func reConnect() { - // no-op, let the gorpc client handle it. -} - -func register() { - id = uuid.NewV4().String() - Log.Debug("RPC Client registered") -} - -func checkDisconnect() { - res := <-killChan - Log.WithField("res", res).Info("RPC Client disconnecting") - killed = true - Disconnect() -} - -func loadDispatcher(dispatcherFuncs map[string]interface{}) { - for funcName, funcBody := range dispatcherFuncs { - if addedFuncs[funcName] { - continue - } - dispatcher.AddFunc(funcName, funcBody) - addedFuncs[funcName] = true - } -} diff --git a/rpc_analytics_purger.go b/rpc_analytics_purger.go new file mode 100644 index 000000000000..658edef0431f --- /dev/null +++ b/rpc_analytics_purger.go @@ -0,0 +1,101 @@ +package main + +import ( + "encoding/json" + "time" + + "gopkg.in/vmihailenco/msgpack.v2" + + "github.com/TykTechnologies/tyk/config" + "github.com/TykTechnologies/tyk/storage" +) + +// Purger is an interface that will define how the in-memory store will be purged +// of analytics data to prevent it growing too large +type Purger interface { + PurgeCache() + PurgeLoop(<-chan time.Time) +} + +// RPCPurger will purge analytics data into a Mongo database, requires that the Mongo DB string is specified +// in the Config object +type RPCPurger struct { + Store storage.Handler +} + +// Connect Connects to RPC +func (r *RPCPurger) Connect() { + if RPCClientIsConnected && RPCCLientSingleton != nil && RPCFuncClientSingleton != nil { + log.Info("RPC Analytics client using singleton") + return + } +} + +// PurgeLoop starts the loop that will pull data out of the in-memory +// store and into RPC. +func (r RPCPurger) PurgeLoop(ticker <-chan time.Time) { + for { + <-ticker + r.PurgeCache() + } +} + +// PurgeCache will pull the data from the in-memory store and drop it into the specified MongoDB collection +func (r *RPCPurger) PurgeCache() { + if _, err := RPCFuncClientSingleton.Call("Ping", nil); err != nil { + log.Error("Can't purge cache, failed to ping RPC: ", err) + return + } + + analyticsValues := r.Store.GetAndDeleteSet(analyticsKeyName) + if len(analyticsValues) == 0 { + return + } + keys := make([]AnalyticsRecord, len(analyticsValues)) + + for i, v := range analyticsValues { + decoded := AnalyticsRecord{} + if err := msgpack.Unmarshal(v.([]byte), &decoded); err != nil { + log.Error("Couldn't unmarshal analytics data: ", err) + } else { + log.Debug("Decoded Record: ", decoded) + keys[i] = decoded + } + } + + data, err := json.Marshal(keys) + if err != nil { + log.Error("Failed to marshal analytics data") + return + } + + // Send keys to RPC + if _, err := RPCFuncClientSingleton.Call("PurgeAnalyticsData", string(data)); err != nil { + emitRPCErrorEvent(rpcFuncClientSingletonCall, "PurgeAnalyticsData", err) + log.Warn("Failed to call purge, retrying: ", err) + } + +} + +type RedisPurger struct { + Store storage.Handler +} + +func (r RedisPurger) PurgeLoop(ticker <-chan time.Time) { + for { + <-ticker + r.PurgeCache() + } +} + +func (r *RedisPurger) PurgeCache() { + expireAfter := config.Global().AnalyticsConfig.StorageExpirationTime + if expireAfter == 0 { + expireAfter = 60 // 1 minute + } + + exp, _ := r.Store.GetExp(analyticsKeyName) + if exp <= 0 { + r.Store.SetExp(analyticsKeyName, int64(expireAfter)) + } +} diff --git a/gateway/rpc_backup_handlers.go b/rpc_backup_handlers.go similarity index 99% rename from gateway/rpc_backup_handlers.go rename to rpc_backup_handlers.go index abc4a7e1e6bb..36998b520c18 100644 --- a/gateway/rpc_backup_handlers.go +++ b/rpc_backup_handlers.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "crypto/aes" diff --git a/gateway/rpc_storage_handler.go b/rpc_storage_handler.go similarity index 55% rename from gateway/rpc_storage_handler.go rename to rpc_storage_handler.go index 9232181e1cf5..821c8b36f6c0 100644 --- a/gateway/rpc_storage_handler.go +++ b/rpc_storage_handler.go @@ -1,15 +1,20 @@ -package gateway +package main import ( + "crypto/tls" + "errors" + "io" + "net" "strconv" "strings" + "sync" + "sync/atomic" "time" - cache "github.com/pmylund/go-cache" - - "github.com/TykTechnologies/tyk/rpc" - "github.com/garyburd/redigo/redis" + "github.com/lonelycode/gorpc" + cache "github.com/pmylund/go-cache" + "github.com/satori/go.uuid" "github.com/TykTechnologies/tyk/config" "github.com/TykTechnologies/tyk/storage" @@ -47,115 +52,191 @@ type GroupKeySpaceRequest struct { } var ( - dispatcherFuncs = map[string]interface{}{ - "Login": func(clientAddr, userKey string) bool { - return false - }, - "LoginWithGroup": func(clientAddr string, groupData *GroupLoginRequest) bool { - return false - }, - "GetKey": func(keyName string) (string, error) { - return "", nil - }, - "SetKey": func(ibd *InboundData) error { - return nil - }, - "GetExp": func(keyName string) (int64, error) { - return 0, nil - }, - "GetKeys": func(keyName string) ([]string, error) { - return nil, nil - }, - "DeleteKey": func(keyName string) (bool, error) { - return true, nil - }, - "DeleteRawKey": func(keyName string) (bool, error) { - return true, nil - }, - "GetKeysAndValues": func(searchString string) (*KeysValuesPair, error) { - return nil, nil - }, - "GetKeysAndValuesWithFilter": func(searchString string) (*KeysValuesPair, error) { - return nil, nil - }, - "DeleteKeys": func(keys []string) (bool, error) { - return true, nil - }, - "Decrement": func(keyName string) error { - return nil - }, - "IncrememntWithExpire": func(ibd *InboundData) (int64, error) { - return 0, nil - }, - "AppendToSet": func(ibd *InboundData) error { - return nil - }, - "SetRollingWindow": func(ibd *InboundData) (int, error) { - return 0, nil - }, - "GetApiDefinitions": func(dr *DefRequest) (string, error) { - return "", nil - }, - "GetPolicies": func(orgId string) (string, error) { - return "", nil - }, - "PurgeAnalyticsData": func(data string) error { - return nil - }, - "CheckReload": func(clientAddr, orgId string) (bool, error) { - return false, nil - }, - "GetKeySpaceUpdate": func(clientAddr, orgId string) ([]string, error) { - return nil, nil - }, - "GetGroupKeySpaceUpdate": func(clientAddr string, groupData *GroupKeySpaceRequest) ([]string, error) { - return nil, nil - }, - "Ping": func() bool { - return false - }, - } + // rpcLoadCount is a counter to check if this is a cold boot + rpcLoadCount int + rpcEmergencyMode bool + rpcEmergencyModeLoaded bool + + GlobalRPCCallTimeout time.Duration + GlobalRPCPingTimeout time.Duration ) // RPCStorageHandler is a storage manager that uses the redis database. type RPCStorageHandler struct { KeyPrefix string HashKeys bool + UserKey string + Address string + killChan chan int + Killed bool + Connected bool + ID string SuppressRegister bool } +func (r *RPCStorageHandler) Register() { + r.ID = uuid.NewV4().String() + r.killChan = make(chan int) + log.Debug("RPC Client registered") +} + +func (r *RPCStorageHandler) checkDisconnect() { + res := <-r.killChan + log.Info("RPC Client disconnecting: ", res) + r.Killed = true + r.Disconnect() +} + +func (r *RPCStorageHandler) ReConnect() { + // no-op, let the gorpc client handle it. +} + +var RPCCLientSingleton *gorpc.Client +var RPCCLientSingletonMu sync.Mutex +var RPCFuncClientSingleton *gorpc.DispatcherClient var RPCGlobalCache = cache.New(30*time.Second, 15*time.Second) +var RPCClientIsConnected bool + +const ( + rpcCLientSingletonCall = "gorpcClientCall" + rpcFuncClientSingletonCall = "gorpcDispatcherClientCall" +) + +func emitRPCErrorEvent(jobName string, funcName string, err error) { + job := instrument.NewJob(jobName) + if emitErr := job.EventErr(funcName, err); emitErr != nil { + log.WithError(emitErr).WithFields(logrus.Fields{ + "jobName": jobName, + "funcName": funcName, + }) + } +} + +func emitRPCErrorEventKv(jobName string, funcName string, err error, kv map[string]string) { + job := instrument.NewJob(jobName) + if emitErr := job.EventErrKv(funcName, err, kv); emitErr != nil { + log.WithError(emitErr).WithFields(logrus.Fields{ + "jobName": jobName, + "funcName": funcName, + "kv": kv, + }) + } +} + +var rpcConnectMu sync.Mutex -// Connect will establish a connection to the RPC +// Connect will establish a connection to the DB func (r *RPCStorageHandler) Connect() bool { - slaveOptions := config.Global().SlaveOptions - rpcConfig := rpc.Config{ - UseSSL: slaveOptions.UseSSL, - SSLInsecureSkipVerify: slaveOptions.SSLInsecureSkipVerify, - ConnectionString: slaveOptions.ConnectionString, - RPCKey: slaveOptions.RPCKey, - APIKey: slaveOptions.APIKey, - GroupID: slaveOptions.GroupID, - CallTimeout: slaveOptions.CallTimeout, - PingTimeout: slaveOptions.PingTimeout, - RPCPoolSize: slaveOptions.RPCPoolSize, - } - - return rpc.Connect( - rpcConfig, - r.SuppressRegister, - dispatcherFuncs, - func(userKey string, groupID string) interface{} { - return GroupLoginRequest{ - UserKey: userKey, - GroupID: groupID, + rpcConnectMu.Lock() + defer rpcConnectMu.Unlock() + + if RPCClientIsConnected { + log.Debug("Using RPC singleton for connection") + return true + } + + if RPCCLientSingleton != nil { + return rpcEmergencyMode != true + } + + // RPC Client is unset + // Set up the cache + log.Info("Setting new RPC connection!") + + connID := uuid.NewV4().String() + + // Length should fit into 1 byte. Protection if we decide change uuid in future. + if len(connID) > 255 { + panic("connID is too long") + } + + if slaveOptions := config.Global().SlaveOptions; slaveOptions.UseSSL { + clientCfg := &tls.Config{ + InsecureSkipVerify: slaveOptions.SSLInsecureSkipVerify, + } + + RPCCLientSingleton = gorpc.NewTLSClient(r.Address, clientCfg) + } else { + RPCCLientSingleton = gorpc.NewTCPClient(r.Address) + } + + if log.Level != logrus.DebugLevel { + RPCCLientSingleton.LogError = gorpc.NilErrorLogger + } + + RPCCLientSingleton.OnConnect = r.OnConnectFunc + + RPCCLientSingleton.Conns = config.Global().SlaveOptions.RPCPoolSize + if RPCCLientSingleton.Conns == 0 { + RPCCLientSingleton.Conns = 20 + } + + RPCCLientSingleton.Dial = func(addr string) (conn io.ReadWriteCloser, err error) { + dialer := &net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: 30 * time.Second, + } + + useSSL := config.Global().SlaveOptions.UseSSL + + if useSSL { + cfg := &tls.Config{ + InsecureSkipVerify: config.Global().SlaveOptions.SSLInsecureSkipVerify, } - }, - func() { - reloadURLStructure(nil) - }, - doReload, - ) + + conn, err = tls.DialWithDialer(dialer, "tcp", addr, cfg) + } else { + conn, err = dialer.Dial("tcp", addr) + } + + if err != nil { + emitRPCErrorEventKv( + rpcCLientSingletonCall, + "dial", + err, + map[string]string{ + "addr": addr, + "useSSL": strconv.FormatBool(useSSL), + }, + ) + return + } + + conn.Write([]byte("proto2")) + conn.Write([]byte{byte(len(connID))}) + conn.Write([]byte(connID)) + return conn, nil + } + RPCCLientSingleton.Start() + d := getDispatcher() + + if RPCFuncClientSingleton == nil { + RPCFuncClientSingleton = d.NewFuncClient(RPCCLientSingleton) + } + + if !r.Login() { + return false + } + + if !r.SuppressRegister { + r.Register() + go r.checkDisconnect() + } + + return true +} + +func (r *RPCStorageHandler) OnConnectFunc(remoteAddr string, rwc io.ReadWriteCloser) (io.ReadWriteCloser, error) { + RPCCLientSingletonMu.Lock() + defer RPCCLientSingletonMu.Unlock() + + RPCClientIsConnected = true + return rwc, nil +} + +func (r *RPCStorageHandler) Disconnect() bool { + RPCClientIsConnected = false + return true } func (r *RPCStorageHandler) hashKey(in string) string { @@ -179,6 +260,116 @@ func (r *RPCStorageHandler) cleanKey(keyName string) string { return setKeyName } +var rpcLoginMu sync.Mutex +var reLoginRunning uint32 + +func (r *RPCStorageHandler) ReAttemptLogin(err error) bool { + if atomic.LoadUint32(&reLoginRunning) == 1 { + return false + } + atomic.StoreUint32(&reLoginRunning, 1) + + rpcLoginMu.Lock() + if rpcLoadCount == 0 && !rpcEmergencyModeLoaded { + log.Warning("[RPC Store] --> Detected cold start, attempting to load from cache") + log.Warning("[RPC Store] ----> Found APIs... beginning emergency load") + rpcEmergencyModeLoaded = true + go doReload() + } + rpcLoginMu.Unlock() + + time.Sleep(time.Second * 3) + atomic.StoreUint32(&reLoginRunning, 0) + + if strings.Contains(err.Error(), "Cannot obtain response during timeout") { + r.ReConnect() + return false + } + + log.Warning("[RPC Store] Login failed, waiting 3s to re-attempt") + + return r.Login() +} + +func (r *RPCStorageHandler) GroupLogin() bool { + groupLoginData := GroupLoginRequest{ + UserKey: r.UserKey, + GroupID: config.Global().SlaveOptions.GroupID, + } + ok, err := RPCFuncClientSingleton.CallTimeout("LoginWithGroup", groupLoginData, GlobalRPCCallTimeout) + if err != nil { + log.Error("RPC Login failed: ", err) + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, + "LoginWithGroup", + err, + map[string]string{ + "GroupID": groupLoginData.GroupID, + }, + ) + rpcEmergencyMode = true + go r.ReAttemptLogin(err) + return false + } + + if ok == false { + log.Error("RPC Login incorrect") + rpcEmergencyMode = true + go r.ReAttemptLogin(errors.New("Login incorrect")) + return false + } + log.Debug("[RPC Store] Group Login complete") + rpcLoadCount++ + + // Recovery + if rpcEmergencyMode { + rpcEmergencyMode = false + rpcEmergencyModeLoaded = false + reloadURLStructure(nil) + } + + return true +} + +func (r *RPCStorageHandler) Login() bool { + log.Debug("[RPC Store] Login initiated") + + if len(r.UserKey) == 0 { + log.Fatal("No API Key set!") + } + + // If we have a group ID, lets login as a group + if config.Global().SlaveOptions.GroupID != "" { + return r.GroupLogin() + } + + ok, err := RPCFuncClientSingleton.CallTimeout("Login", r.UserKey, GlobalRPCCallTimeout) + if err != nil { + log.Error("RPC Login failed: ", err) + emitRPCErrorEvent(rpcFuncClientSingletonCall, "Login", err) + rpcEmergencyMode = true + go r.ReAttemptLogin(err) + return false + } + + if ok == false { + log.Error("RPC Login incorrect") + rpcEmergencyMode = true + go r.ReAttemptLogin(errors.New("Login incorrect")) + return false + } + log.Debug("[RPC Store] Login complete") + rpcLoadCount++ + + if rpcEmergencyMode { + rpcEmergencyMode = false + rpcEmergencyModeLoaded = false + reloadURLStructure(nil) + } + + return true +} + // GetKey will retrieve a key from the database func (r *RPCStorageHandler) GetKey(keyName string) (string, error) { start := time.Now() // get current time @@ -204,10 +395,10 @@ func (r *RPCStorageHandler) GetRawKey(keyName string) (string, error) { } } - value, err := rpc.FuncClientSingleton("GetKey", keyName) + value, err := RPCFuncClientSingleton.CallTimeout("GetKey", keyName, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "GetKey", err, map[string]string{ @@ -215,7 +406,7 @@ func (r *RPCStorageHandler) GetRawKey(keyName string) (string, error) { }, ) if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { return r.GetRawKey(keyName) } } @@ -232,10 +423,10 @@ func (r *RPCStorageHandler) GetRawKey(keyName string) (string, error) { func (r *RPCStorageHandler) GetExp(keyName string) (int64, error) { log.Debug("GetExp called") - value, err := rpc.FuncClientSingleton("GetExp", r.fixKey(keyName)) + value, err := RPCFuncClientSingleton.CallTimeout("GetExp", r.fixKey(keyName), GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "GetExp", err, map[string]string{ @@ -244,7 +435,7 @@ func (r *RPCStorageHandler) GetExp(keyName string) (int64, error) { }, ) if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { return r.GetExp(keyName) } } @@ -268,10 +459,10 @@ func (r *RPCStorageHandler) SetKey(keyName, session string, timeout int64) error Timeout: timeout, } - _, err := rpc.FuncClientSingleton("SetKey", ibd) + _, err := RPCFuncClientSingleton.CallTimeout("SetKey", ibd, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "SetKey", err, map[string]string{ @@ -281,7 +472,7 @@ func (r *RPCStorageHandler) SetKey(keyName, session string, timeout int64) error ) if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { return r.SetKey(keyName, session, timeout) } } @@ -303,10 +494,10 @@ func (r *RPCStorageHandler) SetRawKey(keyName, session string, timeout int64) er // Decrement will decrement a key in redis func (r *RPCStorageHandler) Decrement(keyName string) { log.Warning("Decrement called") - _, err := rpc.FuncClientSingleton("Decrement", keyName) + _, err := RPCFuncClientSingleton.CallTimeout("Decrement", keyName, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "Decrement", err, map[string]string{ @@ -315,7 +506,7 @@ func (r *RPCStorageHandler) Decrement(keyName string) { ) } if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { r.Decrement(keyName) return } @@ -330,10 +521,10 @@ func (r *RPCStorageHandler) IncrememntWithExpire(keyName string, expire int64) i Expire: expire, } - val, err := rpc.FuncClientSingleton("IncrememntWithExpire", ibd) + val, err := RPCFuncClientSingleton.CallTimeout("IncrememntWithExpire", ibd, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "IncrememntWithExpire", err, map[string]string{ @@ -342,7 +533,7 @@ func (r *RPCStorageHandler) IncrememntWithExpire(keyName string, expire int64) i ) } if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { return r.IncrememntWithExpire(keyName, expire) } } @@ -368,10 +559,10 @@ func (r *RPCStorageHandler) GetKeysAndValuesWithFilter(filter string) map[string searchStr := r.KeyPrefix + r.hashKey(filter) + "*" log.Debug("[STORE] Getting list by: ", searchStr) - kvPair, err := rpc.FuncClientSingleton("GetKeysAndValuesWithFilter", searchStr) + kvPair, err := RPCFuncClientSingleton.CallTimeout("GetKeysAndValuesWithFilter", searchStr, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "GetKeysAndValuesWithFilter", err, map[string]string{ @@ -380,7 +571,7 @@ func (r *RPCStorageHandler) GetKeysAndValuesWithFilter(filter string) map[string ) if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { return r.GetKeysAndValuesWithFilter(filter) } } @@ -402,12 +593,12 @@ func (r *RPCStorageHandler) GetKeysAndValues() map[string]string { searchStr := r.KeyPrefix + "*" - kvPair, err := rpc.FuncClientSingleton("GetKeysAndValues", searchStr) + kvPair, err := RPCFuncClientSingleton.CallTimeout("GetKeysAndValues", searchStr, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEvent(rpc.FuncClientSingletonCall, "GetKeysAndValues", err) + emitRPCErrorEvent(rpcFuncClientSingletonCall, "GetKeysAndValues", err) if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { return r.GetKeysAndValues() } } @@ -429,10 +620,10 @@ func (r *RPCStorageHandler) DeleteKey(keyName string) bool { log.Debug("DEL Key was: ", keyName) log.Debug("DEL Key became: ", r.fixKey(keyName)) - ok, err := rpc.FuncClientSingleton("DeleteKey", r.fixKey(keyName)) + ok, err := RPCFuncClientSingleton.CallTimeout("DeleteKey", r.fixKey(keyName), GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "DeleteKey", err, map[string]string{ @@ -442,7 +633,7 @@ func (r *RPCStorageHandler) DeleteKey(keyName string) bool { ) if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { return r.DeleteKey(keyName) } } @@ -453,10 +644,10 @@ func (r *RPCStorageHandler) DeleteKey(keyName string) bool { // DeleteKey will remove a key from the database without prefixing, assumes user knows what they are doing func (r *RPCStorageHandler) DeleteRawKey(keyName string) bool { - ok, err := rpc.FuncClientSingleton("DeleteRawKey", keyName) + ok, err := RPCFuncClientSingleton.CallTimeout("DeleteRawKey", keyName, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "DeleteRawKey", err, map[string]string{ @@ -465,7 +656,7 @@ func (r *RPCStorageHandler) DeleteRawKey(keyName string) bool { ) if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { return r.DeleteRawKey(keyName) } } @@ -483,10 +674,10 @@ func (r *RPCStorageHandler) DeleteKeys(keys []string) bool { } log.Debug("Deleting: ", asInterface) - ok, err := rpc.FuncClientSingleton("DeleteKeys", asInterface) + ok, err := RPCFuncClientSingleton.CallTimeout("DeleteKeys", asInterface, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "DeleteKeys", err, map[string]string{ @@ -496,7 +687,7 @@ func (r *RPCStorageHandler) DeleteKeys(keys []string) bool { ) if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { return r.DeleteKeys(keys) } } @@ -530,10 +721,10 @@ func (r *RPCStorageHandler) AppendToSet(keyName, value string) { Value: value, } - _, err := rpc.FuncClientSingleton("AppendToSet", ibd) + _, err := RPCFuncClientSingleton.CallTimeout("AppendToSet", ibd, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "AppendToSet", err, map[string]string{ @@ -542,7 +733,7 @@ func (r *RPCStorageHandler) AppendToSet(keyName, value string) { ) } if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { r.AppendToSet(keyName, value) } } @@ -565,10 +756,10 @@ func (r *RPCStorageHandler) SetRollingWindow(keyName string, per int64, val stri Expire: -1, } - intVal, err := rpc.FuncClientSingleton("SetRollingWindow", ibd) + intVal, err := RPCFuncClientSingleton.CallTimeout("SetRollingWindow", ibd, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "SetRollingWindow", err, map[string]string{ @@ -578,7 +769,7 @@ func (r *RPCStorageHandler) SetRollingWindow(keyName string, per int64, val stri ) if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { return r.SetRollingWindow(keyName, per, val, false) } } @@ -628,10 +819,10 @@ func (r *RPCStorageHandler) GetApiDefinitions(orgId string, tags []string) strin Tags: tags, } - defString, err := rpc.FuncClientSingleton("GetApiDefinitions", dr) + defString, err := RPCFuncClientSingleton.CallTimeout("GetApiDefinitions", dr, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "GetApiDefinitions", err, map[string]string{ @@ -641,7 +832,7 @@ func (r *RPCStorageHandler) GetApiDefinitions(orgId string, tags []string) strin ) if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { return r.GetApiDefinitions(orgId, tags) } } @@ -659,10 +850,10 @@ func (r *RPCStorageHandler) GetApiDefinitions(orgId string, tags []string) strin // GetPolicies will pull Policies from the RPC server func (r *RPCStorageHandler) GetPolicies(orgId string) string { - defString, err := rpc.FuncClientSingleton("GetPolicies", orgId) + defString, err := RPCFuncClientSingleton.CallTimeout("GetPolicies", orgId, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "GetPolicies", err, map[string]string{ @@ -671,7 +862,7 @@ func (r *RPCStorageHandler) GetPolicies(orgId string) string { ) if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { return r.GetPolicies(orgId) } } @@ -688,10 +879,10 @@ func (r *RPCStorageHandler) GetPolicies(orgId string) string { // CheckForReload will start a long poll func (r *RPCStorageHandler) CheckForReload(orgId string) { log.Debug("[RPC STORE] Check Reload called...") - reload, err := rpc.FuncClientSingleton("CheckReload", orgId) + reload, err := RPCFuncClientSingleton.CallTimeout("CheckReload", orgId, GlobalRPCPingTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, "CheckReload", err, map[string]string{ @@ -700,7 +891,7 @@ func (r *RPCStorageHandler) CheckForReload(orgId string) { ) if r.IsAccessError(err) { log.Warning("[RPC STORE] CheckReload: Not logged in") - if rpc.Login() { + if r.Login() { r.CheckForReload(orgId) } } else if !strings.Contains(err.Error(), "Cannot obtain response during") { @@ -742,7 +933,7 @@ func (r *RPCStorageHandler) StartRPCKeepaliveWatcher() { }).Info("Can't connect to RPC layer") if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { continue } } @@ -780,20 +971,20 @@ func (r *RPCStorageHandler) CheckForKeyspaceChanges(orgId string) { reqData["GroupID"] = groupID } - keys, err = rpc.FuncClientSingleton(funcName, req) + keys, err = RPCFuncClientSingleton.CallTimeout(funcName, req, GlobalRPCCallTimeout) if err != nil { - rpc.EmitErrorEventKv( - rpc.FuncClientSingletonCall, + emitRPCErrorEventKv( + rpcFuncClientSingletonCall, funcName, err, reqData, ) if r.IsAccessError(err) { - if rpc.Login() { + if r.Login() { r.CheckForKeyspaceChanges(orgId) } } - log.Warning("Keyspace warning: ", err) + log.Warning("Keysapce warning: ", err) return } @@ -819,18 +1010,26 @@ func getSessionAndCreate(keyName string, r *RPCStorageHandler) { } func (r *RPCStorageHandler) ProcessKeySpaceChanges(keys []string) { + keysToReset := map[string]bool{} + for _, key := range keys { splitKeys := strings.Split(key, ":") - if len(splitKeys) > 1 { + if len(splitKeys) > 1 && splitKeys[1] == "resetQuota" { + keysToReset[splitKeys[0]] = true + } + } + + for _, key := range keys { + splitKeys := strings.Split(key, ":") + _, resetQuota := keysToReset[splitKeys[0]] + if len(splitKeys) > 1 && splitKeys[1] == "hashed" { key = splitKeys[0] - if splitKeys[1] == "hashed" { - log.Info("--> removing cached (hashed) key: ", splitKeys[0]) - handleDeleteHashedKey(splitKeys[0], "") - getSessionAndCreate(splitKeys[0], r) - } + log.Info("--> removing cached (hashed) key: ", splitKeys[0]) + handleDeleteHashedKey(splitKeys[0], "", resetQuota) + getSessionAndCreate(splitKeys[0], r) } else { log.Info("--> removing cached key: ", key) - handleDeleteKey(key, "-1") + handleDeleteKey(key, "-1", resetQuota) getSessionAndCreate(splitKeys[0], r) } SessionCache.Delete(key) @@ -867,3 +1066,98 @@ func (r *RPCStorageHandler) RemoveSortedSetRange(keyName, scoreFrom, scoreTo str log.Error("RPCStorageHandler.RemoveSortedSetRange - Not implemented") return nil } + +func getDispatcher() *gorpc.Dispatcher { + dispatch := gorpc.NewDispatcher() + + dispatch.AddFunc("Login", func(clientAddr, userKey string) bool { + return false + }) + + dispatch.AddFunc("LoginWithGroup", func(clientAddr string, groupData *GroupLoginRequest) bool { + return false + }) + + dispatch.AddFunc("GetKey", func(keyName string) (string, error) { + return "", nil + }) + + dispatch.AddFunc("SetKey", func(ibd *InboundData) error { + return nil + }) + + dispatch.AddFunc("GetExp", func(keyName string) (int64, error) { + return 0, nil + }) + + dispatch.AddFunc("GetKeys", func(keyName string) ([]string, error) { + return nil, nil + }) + + dispatch.AddFunc("DeleteKey", func(keyName string) (bool, error) { + return true, nil + }) + + dispatch.AddFunc("DeleteRawKey", func(keyName string) (bool, error) { + return true, nil + }) + + dispatch.AddFunc("GetKeysAndValues", func(searchString string) (*KeysValuesPair, error) { + return nil, nil + }) + + dispatch.AddFunc("GetKeysAndValuesWithFilter", func(searchString string) (*KeysValuesPair, error) { + return nil, nil + }) + + dispatch.AddFunc("DeleteKeys", func(keys []string) (bool, error) { + return true, nil + }) + + dispatch.AddFunc("Decrement", func(keyName string) error { + return nil + }) + + dispatch.AddFunc("IncrememntWithExpire", func(ibd *InboundData) (int64, error) { + return 0, nil + }) + + dispatch.AddFunc("AppendToSet", func(ibd *InboundData) error { + return nil + }) + + dispatch.AddFunc("SetRollingWindow", func(ibd *InboundData) (int, error) { + return 0, nil + }) + + dispatch.AddFunc("GetApiDefinitions", func(dr *DefRequest) (string, error) { + return "", nil + }) + + dispatch.AddFunc("GetPolicies", func(orgId string) (string, error) { + return "", nil + }) + + dispatch.AddFunc("PurgeAnalyticsData", func(data string) error { + return nil + }) + + dispatch.AddFunc("CheckReload", func(clientAddr, orgId string) (bool, error) { + return false, nil + }) + + dispatch.AddFunc("GetKeySpaceUpdate", func(clientAddr, orgId string) ([]string, error) { + return nil, nil + }) + + dispatch.AddFunc("GetGroupKeySpaceUpdate", func(clientAddr string, groupData *GroupKeySpaceRequest) ([]string, error) { + return nil, nil + }) + + dispatch.AddFunc("Ping", func() bool { + return false + }) + + return dispatch + +} diff --git a/gateway/rpc_test.go b/rpc_test.go similarity index 92% rename from gateway/rpc_test.go rename to rpc_test.go index c87c51efaa92..0dfa4d4e9b97 100644 --- a/gateway/rpc_test.go +++ b/rpc_test.go @@ -1,6 +1,6 @@ // +build !race -package gateway +package main import ( "testing" @@ -9,16 +9,14 @@ import ( "github.com/gorilla/mux" "github.com/TykTechnologies/tyk/cli" - - "github.com/TykTechnologies/gorpc" "github.com/TykTechnologies/tyk/config" - "github.com/TykTechnologies/tyk/rpc" "github.com/TykTechnologies/tyk/test" + "github.com/lonelycode/gorpc" ) func startRPCMock(dispatcher *gorpc.Dispatcher) *gorpc.Server { - rpc.GlobalRPCCallTimeout = 100 * time.Millisecond + GlobalRPCCallTimeout = 100 * time.Millisecond globalConf := config.Global() globalConf.SlaveOptions.UseRPC = true @@ -60,7 +58,13 @@ func stopRPCMock(server *gorpc.Server) { server.Stop() } - rpc.Reset() + RPCCLientSingleton.Stop() + RPCClientIsConnected = false + RPCCLientSingleton = nil + RPCFuncClientSingleton = nil + rpcLoadCount = 0 + rpcEmergencyMode = false + rpcEmergencyModeLoaded = false } const apiDefListTest = `[{ @@ -117,7 +121,7 @@ const apiDefListTest2 = `[{ }]` func TestSyncAPISpecsRPCFailure_CheckGlobals(t *testing.T) { - // Test RPC + // Mock RPC callCount := 0 dispatcher := gorpc.NewDispatcher() dispatcher.AddFunc("GetApiDefinitions", func(clientAddr string, dr *DefRequest) (string, error) { @@ -178,7 +182,7 @@ func TestSyncAPISpecsRPCFailure_CheckGlobals(t *testing.T) { // Our RPC layer too racy, but not harmul, mostly global variables like RPCIsClientConnected func TestSyncAPISpecsRPCFailure(t *testing.T) { - // Test RPC + // Mock RPC dispatcher := gorpc.NewDispatcher() dispatcher.AddFunc("GetApiDefinitions", func(clientAddr string, dr *DefRequest) (string, error) { return "malformed json", nil @@ -197,10 +201,10 @@ func TestSyncAPISpecsRPCFailure(t *testing.T) { } func TestSyncAPISpecsRPCSuccess(t *testing.T) { - // Test RPC + // Mock RPC dispatcher := gorpc.NewDispatcher() dispatcher.AddFunc("GetApiDefinitions", func(clientAddr string, dr *DefRequest) (string, error) { - return jsonMarshalString(BuildAPI(func(spec *APISpec) { + return jsonMarshalString(buildAPI(func(spec *APISpec) { spec.UseKeylessAccess = false })), nil }) @@ -211,13 +215,13 @@ func TestSyncAPISpecsRPCSuccess(t *testing.T) { return true }) dispatcher.AddFunc("GetKey", func(clientAddr, key string) (string, error) { - return jsonMarshalString(CreateStandardSession()), nil + return jsonMarshalString(createStandardSession()), nil }) t.Run("RPC is live", func(t *testing.T) { rpc := startRPCMock(dispatcher) defer stopRPCMock(rpc) - ts := StartTest() + ts := newTykTestServer() defer ts.Close() apiBackup, _ := LoadDefinitionsFromRPCBackup() @@ -252,13 +256,13 @@ func TestSyncAPISpecsRPCSuccess(t *testing.T) { config.SetGlobal(globalConf) // RPC layer is down - ts := StartTest() + ts := newTykTestServer() defer ts.Close() // Wait for backup to load time.Sleep(100 * time.Millisecond) select { - case ReloadTick <- time.Time{}: + case reloadTick <- time.Time{}: case <-time.After(100 * time.Millisecond): } time.Sleep(100 * time.Millisecond) @@ -275,11 +279,12 @@ func TestSyncAPISpecsRPCSuccess(t *testing.T) { }) t.Run("RPC is back, hard reload", func(t *testing.T) { - rpc.ResetEmergencyMode() + rpcEmergencyModeLoaded = false + rpcEmergencyMode = false dispatcher := gorpc.NewDispatcher() dispatcher.AddFunc("GetApiDefinitions", func(clientAddr string, dr *DefRequest) (string, error) { - return jsonMarshalString(BuildAPI( + return jsonMarshalString(buildAPI( func(spec *APISpec) { spec.UseKeylessAccess = false }, func(spec *APISpec) { spec.UseKeylessAccess = false }, )), nil @@ -291,12 +296,12 @@ func TestSyncAPISpecsRPCSuccess(t *testing.T) { return true }) dispatcher.AddFunc("GetKey", func(clientAddr, key string) (string, error) { - return jsonMarshalString(CreateStandardSession()), nil + return jsonMarshalString(createStandardSession()), nil }) // Back to live rpc := startRPCMock(dispatcher) defer stopRPCMock(rpc) - ts := StartTest() + ts := newTykTestServer() defer ts.Close() time.Sleep(100 * time.Millisecond) @@ -319,7 +324,7 @@ func TestSyncAPISpecsRPCSuccess(t *testing.T) { t.Run("RPC is back, live reload", func(t *testing.T) { rpc := startRPCMock(dispatcher) - ts := StartTest() + ts := newTykTestServer() defer ts.Close() time.Sleep(100 * time.Millisecond) diff --git a/sds.c b/sds.c new file mode 120000 index 000000000000..f92f8eb92946 --- /dev/null +++ b/sds.c @@ -0,0 +1 @@ +coprocess/sds/sds.c \ No newline at end of file diff --git a/gateway/service_discovery.go b/service_discovery.go similarity index 99% rename from gateway/service_discovery.go rename to service_discovery.go index 8ec2846d78c1..f51a1309c1f2 100644 --- a/gateway/service_discovery.go +++ b/service_discovery.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "io/ioutil" diff --git a/gateway/service_discovery_test.go b/service_discovery_test.go similarity index 99% rename from gateway/service_discovery_test.go rename to service_discovery_test.go index e262729a0b60..0836004e47b7 100644 --- a/gateway/service_discovery_test.go +++ b/service_discovery_test.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "testing" diff --git a/gateway/session_manager.go b/session_manager.go similarity index 99% rename from gateway/session_manager.go rename to session_manager.go index a625e74f8b4b..3b307723e272 100644 --- a/gateway/session_manager.go +++ b/session_manager.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "net/http" diff --git a/storage/redis_cluster.go b/storage/redis_cluster.go index 0260a61293d6..3c81bfd89b1c 100644 --- a/storage/redis_cluster.go +++ b/storage/redis_cluster.go @@ -9,7 +9,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/garyburd/redigo/redis" - uuid "github.com/satori/go.uuid" + "github.com/satori/go.uuid" "github.com/TykTechnologies/redigocluster/rediscluster" "github.com/TykTechnologies/tyk/config" @@ -25,8 +25,7 @@ const ( ) var ( - redisSingletonMu sync.RWMutex - + redisSingletonMu sync.RWMutex redisClusterSingleton *rediscluster.RedisCluster redisCacheClusterSingleton *rediscluster.RedisCluster ) @@ -156,7 +155,7 @@ func NewRedisClusterPool(isCache bool) *rediscluster.RedisCluster { } // Connect will establish a connection to the r.singleton() -func (r *RedisCluster) Connect() bool { +func (r RedisCluster) Connect() bool { redisSingletonMu.Lock() defer redisSingletonMu.Unlock() disconnected := redisClusterSingleton == nil @@ -177,7 +176,7 @@ func (r *RedisCluster) Connect() bool { return true } -func (r *RedisCluster) singleton() *rediscluster.RedisCluster { +func (r RedisCluster) singleton() *rediscluster.RedisCluster { redisSingletonMu.RLock() defer redisSingletonMu.RUnlock() if r.IsCache { @@ -186,7 +185,7 @@ func (r *RedisCluster) singleton() *rediscluster.RedisCluster { return redisClusterSingleton } -func (r *RedisCluster) hashKey(in string) string { +func (r RedisCluster) hashKey(in string) string { if !r.HashKeys { // Not hashing? Return the raw key return in @@ -194,15 +193,15 @@ func (r *RedisCluster) hashKey(in string) string { return HashStr(in) } -func (r *RedisCluster) fixKey(keyName string) string { +func (r RedisCluster) fixKey(keyName string) string { return r.KeyPrefix + r.hashKey(keyName) } -func (r *RedisCluster) cleanKey(keyName string) string { +func (r RedisCluster) cleanKey(keyName string) string { return strings.Replace(keyName, r.KeyPrefix, "", 1) } -func (r *RedisCluster) ensureConnection() { +func (r RedisCluster) ensureConnection() { if r.singleton() != nil { // already connected return @@ -219,14 +218,11 @@ func (r *RedisCluster) ensureConnection() { } // GetKey will retrieve a key from the database -func (r *RedisCluster) GetKey(keyName string) (string, error) { +func (r RedisCluster) GetKey(keyName string) (string, error) { r.ensureConnection() log.Debug("[STORE] Getting WAS: ", keyName) log.Debug("[STORE] Getting: ", r.fixKey(keyName)) - cluster := r.singleton() - - value, err := redis.String(cluster.Do("GET", r.fixKey(keyName))) - + value, err := redis.String(r.singleton().Do("GET", r.fixKey(keyName))) if err != nil { log.Debug("Error trying to get value:", err) return "", ErrKeyNotFound @@ -235,12 +231,12 @@ func (r *RedisCluster) GetKey(keyName string) (string, error) { return value, nil } -func (r *RedisCluster) GetKeyTTL(keyName string) (ttl int64, err error) { +func (r RedisCluster) GetKeyTTL(keyName string) (ttl int64, err error) { r.ensureConnection() return redis.Int64(r.singleton().Do("TTL", r.fixKey(keyName))) } -func (r *RedisCluster) GetRawKey(keyName string) (string, error) { +func (r RedisCluster) GetRawKey(keyName string) (string, error) { r.ensureConnection() value, err := redis.String(r.singleton().Do("GET", keyName)) if err != nil { @@ -251,7 +247,7 @@ func (r *RedisCluster) GetRawKey(keyName string) (string, error) { return value, nil } -func (r *RedisCluster) GetExp(keyName string) (int64, error) { +func (r RedisCluster) GetExp(keyName string) (int64, error) { log.Debug("Getting exp for key: ", r.fixKey(keyName)) r.ensureConnection() @@ -263,7 +259,7 @@ func (r *RedisCluster) GetExp(keyName string) (int64, error) { return value, nil } -func (r *RedisCluster) SetExp(keyName string, timeout int64) error { +func (r RedisCluster) SetExp(keyName string, timeout int64) error { _, err := r.singleton().Do("EXPIRE", r.fixKey(keyName), timeout) if err != nil { log.Error("Could not EXPIRE key: ", err) @@ -272,7 +268,7 @@ func (r *RedisCluster) SetExp(keyName string, timeout int64) error { } // SetKey will create (or update) a key value in the store -func (r *RedisCluster) SetKey(keyName, session string, timeout int64) error { +func (r RedisCluster) SetKey(keyName, session string, timeout int64) error { log.Debug("[STORE] SET Raw key is: ", keyName) log.Debug("[STORE] Setting key: ", r.fixKey(keyName)) @@ -290,7 +286,7 @@ func (r *RedisCluster) SetKey(keyName, session string, timeout int64) error { return nil } -func (r *RedisCluster) SetRawKey(keyName, session string, timeout int64) error { +func (r RedisCluster) SetRawKey(keyName, session string, timeout int64) error { r.ensureConnection() _, err := r.singleton().Do("SET", keyName, session) if timeout > 0 { @@ -308,7 +304,7 @@ func (r *RedisCluster) SetRawKey(keyName, session string, timeout int64) error { } // Decrement will decrement a key in redis -func (r *RedisCluster) Decrement(keyName string) { +func (r RedisCluster) Decrement(keyName string) { keyName = r.fixKey(keyName) log.Debug("Decrementing key: ", keyName) r.ensureConnection() @@ -319,7 +315,7 @@ func (r *RedisCluster) Decrement(keyName string) { } // IncrementWithExpire will increment a key in redis -func (r *RedisCluster) IncrememntWithExpire(keyName string, expire int64) int64 { +func (r RedisCluster) IncrememntWithExpire(keyName string, expire int64) int64 { log.Debug("Incrementing raw key: ", keyName) r.ensureConnection() // This function uses a raw key, so we shouldn't call fixKey @@ -337,7 +333,7 @@ func (r *RedisCluster) IncrememntWithExpire(keyName string, expire int64) int64 } // GetKeys will return all keys according to the filter (filter is a prefix - e.g. tyk.keys.*) -func (r *RedisCluster) GetKeys(filter string) []string { +func (r RedisCluster) GetKeys(filter string) []string { r.ensureConnection() filterHash := "" if filter != "" { @@ -358,7 +354,7 @@ func (r *RedisCluster) GetKeys(filter string) []string { } // GetKeysAndValuesWithFilter will return all keys and their values with a filter -func (r *RedisCluster) GetKeysAndValuesWithFilter(filter string) map[string]string { +func (r RedisCluster) GetKeysAndValuesWithFilter(filter string) map[string]string { r.ensureConnection() filterHash := "" if filter != "" { @@ -391,7 +387,7 @@ func (r *RedisCluster) GetKeysAndValuesWithFilter(filter string) map[string]stri } // GetKeysAndValues will return all keys and their values - not to be used lightly -func (r *RedisCluster) GetKeysAndValues() map[string]string { +func (r RedisCluster) GetKeysAndValues() map[string]string { r.ensureConnection() searchStr := r.KeyPrefix + "*" sessionsInterface, err := r.singleton().Do("KEYS", searchStr) @@ -415,7 +411,7 @@ func (r *RedisCluster) GetKeysAndValues() map[string]string { } // DeleteKey will remove a key from the database -func (r *RedisCluster) DeleteKey(keyName string) bool { +func (r RedisCluster) DeleteKey(keyName string) bool { r.ensureConnection() log.Debug("DEL Key was: ", keyName) log.Debug("DEL Key became: ", r.fixKey(keyName)) @@ -428,7 +424,7 @@ func (r *RedisCluster) DeleteKey(keyName string) bool { } // DeleteKey will remove a key from the database without prefixing, assumes user knows what they are doing -func (r *RedisCluster) DeleteRawKey(keyName string) bool { +func (r RedisCluster) DeleteRawKey(keyName string) bool { r.ensureConnection() _, err := r.singleton().Do("DEL", keyName) if err != nil { @@ -439,7 +435,7 @@ func (r *RedisCluster) DeleteRawKey(keyName string) bool { } // DeleteKeys will remove a group of keys in bulk -func (r *RedisCluster) DeleteScanMatch(pattern string) bool { +func (r RedisCluster) DeleteScanMatch(pattern string) bool { r.ensureConnection() log.Debug("Deleting: ", pattern) @@ -484,7 +480,7 @@ func (r *RedisCluster) DeleteScanMatch(pattern string) bool { } // DeleteKeys will remove a group of keys in bulk -func (r *RedisCluster) DeleteKeys(keys []string) bool { +func (r RedisCluster) DeleteKeys(keys []string) bool { r.ensureConnection() if len(keys) > 0 { asInterface := make([]interface{}, len(keys)) @@ -506,19 +502,18 @@ func (r *RedisCluster) DeleteKeys(keys []string) bool { // StartPubSubHandler will listen for a signal and run the callback for // every subscription and message event. -func (r *RedisCluster) StartPubSubHandler(channel string, callback func(interface{})) error { - cluster := r.singleton() - if cluster == nil { +func (r RedisCluster) StartPubSubHandler(channel string, callback func(interface{})) error { + if r.singleton() == nil { return errors.New("Redis connection failed") } - handle := cluster.RandomRedisHandle() + handle := r.singleton().RandomRedisHandle() if handle == nil { - return errors.New("Redis connection failed. Handle is nil") + return errors.New("Redis connection failed") } psc := redis.PubSubConn{ - Conn: handle.Pool.Get(), + Conn: r.singleton().RandomRedisHandle().Pool.Get(), } if err := psc.Subscribe(channel); err != nil { return err @@ -538,7 +533,7 @@ func (r *RedisCluster) StartPubSubHandler(channel string, callback func(interfac } } -func (r *RedisCluster) Publish(channel, message string) error { +func (r RedisCluster) Publish(channel, message string) error { r.ensureConnection() _, err := r.singleton().Do("PUBLISH", channel, message) if err != nil { @@ -548,7 +543,7 @@ func (r *RedisCluster) Publish(channel, message string) error { return nil } -func (r *RedisCluster) GetAndDeleteSet(keyName string) []interface{} { +func (r RedisCluster) GetAndDeleteSet(keyName string) []interface{} { log.Debug("Getting raw key set: ", keyName) r.ensureConnection() log.Debug("keyName is: ", keyName) @@ -581,7 +576,7 @@ func (r *RedisCluster) GetAndDeleteSet(keyName string) []interface{} { return vals } -func (r *RedisCluster) AppendToSet(keyName, value string) { +func (r RedisCluster) AppendToSet(keyName, value string) { fixedKey := r.fixKey(keyName) log.WithField("keyName", keyName).Debug("Pushing to raw key list") log.WithField("fixedKey", fixedKey).Debug("Appending to fixed key list") @@ -591,7 +586,7 @@ func (r *RedisCluster) AppendToSet(keyName, value string) { } } -func (r *RedisCluster) AppendToSetPipelined(key string, values []string) { +func (r RedisCluster) AppendToSetPipelined(key string, values []string) { if len(values) == 0 { return } @@ -617,7 +612,7 @@ func (r *RedisCluster) AppendToSetPipelined(key string, values []string) { } } -func (r *RedisCluster) GetSet(keyName string) (map[string]string, error) { +func (r RedisCluster) GetSet(keyName string) (map[string]string, error) { log.Debug("Getting from key set: ", keyName) log.Debug("Getting from fixed key set: ", r.fixKey(keyName)) r.ensureConnection() @@ -636,7 +631,7 @@ func (r *RedisCluster) GetSet(keyName string) (map[string]string, error) { return vals, nil } -func (r *RedisCluster) AddToSet(keyName, value string) { +func (r RedisCluster) AddToSet(keyName, value string) { log.Debug("Pushing to raw key set: ", keyName) log.Debug("Pushing to fixed key set: ", r.fixKey(keyName)) r.ensureConnection() @@ -647,7 +642,7 @@ func (r *RedisCluster) AddToSet(keyName, value string) { } } -func (r *RedisCluster) RemoveFromSet(keyName, value string) { +func (r RedisCluster) RemoveFromSet(keyName, value string) { log.Debug("Removing from raw key set: ", keyName) log.Debug("Removing from fixed key set: ", r.fixKey(keyName)) r.ensureConnection() @@ -658,7 +653,7 @@ func (r *RedisCluster) RemoveFromSet(keyName, value string) { } } -func (r *RedisCluster) IsMemberOfSet(keyName, value string) bool { +func (r RedisCluster) IsMemberOfSet(keyName, value string) bool { r.ensureConnection() val, err := redis.Int64(r.singleton().Do("SISMEMBER", r.fixKey(keyName), value)) @@ -673,7 +668,7 @@ func (r *RedisCluster) IsMemberOfSet(keyName, value string) bool { } // SetRollingWindow will append to a sorted set in redis and extract a timed window of values -func (r *RedisCluster) SetRollingWindow(keyName string, per int64, value_override string, pipeline bool) (int, []interface{}) { +func (r RedisCluster) SetRollingWindow(keyName string, per int64, value_override string, pipeline bool) (int, []interface{}) { log.Debug("Incrementing raw key: ", keyName) r.ensureConnection() log.Debug("keyName is: ", keyName) @@ -775,12 +770,12 @@ func (r RedisCluster) GetRollingWindow(keyName string, per int64, pipeline bool) } // GetPrefix returns storage key prefix -func (r *RedisCluster) GetKeyPrefix() string { +func (r RedisCluster) GetKeyPrefix() string { return r.KeyPrefix } // AddToSortedSet adds value with given score to sorted set identified by keyName -func (r *RedisCluster) AddToSortedSet(keyName, value string, score float64) { +func (r RedisCluster) AddToSortedSet(keyName, value string, score float64) { fixedKey := r.fixKey(keyName) logEntry := logrus.Fields{ "keyName": keyName, @@ -795,7 +790,7 @@ func (r *RedisCluster) AddToSortedSet(keyName, value string, score float64) { } // GetSortedSetRange gets range of elements of sorted set identified by keyName -func (r *RedisCluster) GetSortedSetRange(keyName, scoreFrom, scoreTo string) ([]string, []float64, error) { +func (r RedisCluster) GetSortedSetRange(keyName, scoreFrom, scoreTo string) ([]string, []float64, error) { fixedKey := r.fixKey(keyName) logEntry := logrus.Fields{ "keyName": keyName, @@ -827,7 +822,7 @@ func (r *RedisCluster) GetSortedSetRange(keyName, scoreFrom, scoreTo string) ([] } // RemoveSortedSetRange removes range of elements from sorted set identified by keyName -func (r *RedisCluster) RemoveSortedSetRange(keyName, scoreFrom, scoreTo string) error { +func (r RedisCluster) RemoveSortedSetRange(keyName, scoreFrom, scoreTo string) error { fixedKey := r.fixKey(keyName) logEntry := logrus.Fields{ "keyName": keyName, diff --git a/storage/storage.go b/storage/storage.go index 7841aeb277b7..69f88861b346 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -10,7 +10,7 @@ import ( "strings" "github.com/buger/jsonparser" - uuid "github.com/satori/go.uuid" + "github.com/satori/go.uuid" "github.com/TykTechnologies/murmur3" "github.com/TykTechnologies/tyk/config" diff --git a/swagger.yml b/swagger.yml deleted file mode 100644 index 2b76fa1cfff9..000000000000 --- a/swagger.yml +++ /dev/null @@ -1,2627 +0,0 @@ -openapi: 3.0.0 -info: - title: Tyk Gateway API - version: 2.8.0 - description: |- - The Tyk Gateway REST API is the primary means for integrating your application with the Tyk API Gateway system. This API is very small, and has no granular permissions system. It is intended to be used purely for internal automation and integration. - - **Warning: Under no circumstances should outside parties be granted access to this API.** - - The Tyk Gateway API is capable of: - - * Managing session objects (key generation) - * Managing and listing policies - * Managing and listing API Definitions (only when not using the Dashboard) - * Hot reloads / reloading a cluster configuration - * OAuth client creation (only when not using the Dashboard) - - - In order to use the REST API, you'll need to set the `secret` parameter in your tyk.conf file. - - The shared secret you set should then be sent along as a header with each REST API Request in order for it to be successful: - - ``` - x-tyk-authorization: - ``` -
- The Tyk Gateway API is subsumed by the Tyk Dashboard API in Pro installations. -servers: - - url: 'http://localhost/' - - url: 'https://localhost/' -tags: - - name: Keys - description: |- - All keys that are used to access services via Tyk correspond to a session object that informs Tyk about the context of this particular token, like access rules and rate/quota allowance. - -

Hashed Keys Environment

- - Listing tokens is only possible if you set `enable_hashed_keys_listing` to `true`. See [Using Hashed Keys Environment Endpoints](https://tyk.io/docs/security/#a-name-key-hashing-a-key-hashing) section for more details. - - - endpoints `POST /tyk/keys/create`, `POST /tyk/keys` and `POST /tyk/keys/{keyName}` return `"key_hash"` for future use. - - endpoint `GET /tyk/keys` retrieves all (or per API) key hashes. You can disable this endpoint by using the `tyk.conf` setting `enable_hashed_keys_listing` (set to false by default). - - endpoint `GET /tyk/keys/{keyName}` is able to get a key by its hash. You need to provide the key hash as a `keyName`. - and call it with the optional query parameter `hashed=true`. So the format is `GET /tyk/keys/{keyName}?hashed=true"`. - - The same optional parameter is available for the `DELETE /tyk/keys/{keyName}?hashed=true` endpoint. - -

Example: Import Existing Keys into Tyk

- - - - You can use the `PUT /tyk/keys/{KEY_ID}` endpoint as defined below to import existing keys into Tyk. - - This example uses standard `authorization` header authentication, and assumes that the Dashboard is located at `127.0.0.1:8080` and the Tyk secret is `352d20ee67be67f6340b4c0605b044b7` - update these as necessary to match your environment. - - To import a key called `abc`, save the JSON contents as `token.json` (see example below), then run the following Curl command. - - ``` - curl http://127.0.0.1:8080/tyk/keys/abc -H 'x-tyk-authorization: 352d20ee67be67f6340b4c0605b044b7' -H 'Content-Type: application/json' -d @token.json - ``` - - The following request will fail as the key doesn't exist. - - ``` - curl http://127.0.0.1:8080/quickstart/headers -H 'Authorization. invalid123' - ``` - - But this request will now work, using the imported key. - - ``` - curl http://127.0.0.1:8080/quickstart/headers -H 'Authorization: abc' - ``` - -

Example token.json file

- - ``` - { - "allowance": 1000, - "rate": 1000, - "per": 60, - "expires": -1, - "quota_max": -1, - "quota_renews": 1406121006, - "quota_remaining": 0, - "quota_renewal_rate": 60, - "access_rights": { - "3": { - "api_name": "Tyk Test API", - "api_id": "3" - } - }, - "org_id": "53ac07777cbb8c2d53000002", - "basic_auth_data": { - "password": "", - "hash_type": "" - }, - "hmac_enabled": false, - "hmac_string": "", - "is_inactive": false, - "apply_policy_id": "", - "apply_policies": [ - "59672779fa4387000129507d", - "53222349fa4387004324324e", - "543534s9fa4387004324324d" - ], - "monitor": { - "trigger_limits": [] - } - } - ``` - - Additionally see key session object data format. - - name: OAuth - description: |- - Manage OAuth clients, and manage their tokens - - name: Cache Invalidation - description: |- - Sometimes a cache might contain stale data, or it may just need to be cleared because of an invalid configuration. This call will purge all keys associated with a cache on an API-by-API basis. - - name: Hot Reload - description: - Force restart of the Gateway or whole cluster - - name: Health Checking - description: Check health check of the Gateway and loaded APIs - - name: Organisation Quotas - description: |- - It is possible to force API quota and rate limit across all keys that belong to a specific organisation ID. Rate limiting at an organisation level is useful for creating tiered access levels and trial accounts. - The Organisation rate limiting middleware works with both Quotas and Rate Limiters. In order to manage this functionality, a simple API has been put in place to manage these sessions. - Although the Organisation session-limiter uses the same session object, all other security keys are optional as they are not used. - -

Managing active status

- To disallow access to an entire group of keys without rate limiting the organisation, create a session object with the "is_inactive" key set to true. This will block access before any other middleware is executed. It is useful when managing subscriptions for an organisation group and access needs to be blocked because of non-payment. - - name: Batch requests - description: |- - Tyk supports batch requests, so a client makes a single request to the API but gets a compound response object back. - - This is especially handy if clients have complex requests that have multiple synchronous dependencies and do not wish to have the entire request / response cycle running for each event. - - To enable batch request support, set the `enable_batch_request_support` value to `true` - - This is especially handy if clients have complex requests that have multiple synchronous dependencies and do not wish to have the entire request / response cycle running for each event. - - Batch requests that come into Tyk are *run through the whole Tyk machinery* and *use a relative path to prevent spamming*. This means that a batch request to Tyk for three resources with the same API key will have three requests applied to their session quota and request limiting could become active if they are being throttled. - - Tyk reconstructs the API request based on the data in the batch request. This is to ensure that Tyk is not being used to proxy requests to other hosts outside of the upstream API being accessed. - - Batch requests are created by POSTing to the `/{listen_path}/tyk/batch/` endpoint. These requests **do not require a valid key**, but their request list does. - -

Sample Request

- - ```{json} - { - "requests": [ - { - "method": "GET", - "headers": { - "x-tyk-test": "1", - "x-tyk-version": "1.2", - "authorization": "1dbc83b9c431649d7698faa9797e2900f" - }, - "body": "", - "relative_url": "get" - }, - { - "method": "GET", - "headers": { - "x-tyk-test": "2", - "x-tyk-version": "1.2", - "authorization": "1dbc83b9c431649d7698faa9797e2900f" - }, - "body": "", - "relative_url": "get" - } - ], - "suppress_parallel_execution": false - } - ``` - - The response will will be a structured reply that encapsulates the responses for each of the outbound requests. If `suppress_parallel_execution` is set to `true`, requests will be made synchronously. If set to `false` then they will run in parallel and the response order is not guaranteed. - -

Sample Response

- - ``` - [ - { - "relative_url": "get", - "code": 200, - "headers": { - "Access-Control-Allow-Credentials": [ - "true" - ], - "Access-Control-Allow-Origin": [ - "*" - ], - "Content-Length": [ - "497" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Wed, 12 Nov 2014 15:32:43 GMT" - ], - "Server": [ - "gunicorn/18.0" - ], - "Via": [ - "1.1 vegur" - ] - }, - "body": "{ - "args": {}, - "headers": { - "Accept-Encoding": "gzip", - "Authorization": "1dbc83b9c431649d7698faa9797e2900f", - "Connect-Time": "2", - "Connection": "close", - "Host": "httpbin.org", - "Total-Route-Time": "0", - "User-Agent": "Go 1.1 package http", - "Via": "1.1 vegur", - "X-Request-Id": "6a22499a-2776-4aa1-80c0-686581a8be4d", - "X-Tyk-Test": "2", - "X-Tyk-Version": "1.2" - }, - "origin": "127.0.0.1, 62.232.114.250", - "url": "http://httpbin.org/get" - }" - }, - { - "relative_url": "get", - "code": 200, - "headers": { - "Access-Control-Allow-Credentials": [ - "true" - ], - "Access-Control-Allow-Origin": [ - "*" - ], - "Content-Length": [ - "497" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Wed, 12 Nov 2014 15:32:43 GMT" - ], - "Server": [ - "gunicorn/18.0" - ], - "Via": [ - "1.1 vegur" - ] - }, - "body": "{ - "args": {}, - "headers": { - "Accept-Encoding": "gzip", - "Authorization": "1dbc83b9c431649d7698faa9797e2900f", - "Connect-Time": "7", - "Connection": "close", - "Host": "httpbin.org", - "Total-Route-Time": "0", - "User-Agent": "Go 1.1 package http", - "Via": "1.1 vegur", - "X-Request-Id": "1ab61f50-51ff-4828-a7e2-17240385a6d2", - "X-Tyk-Test": "1", - "X-Tyk-Version": "1.2" - }, - "origin": "127.0.0.1, 62.232.114.250", - "url": "http://httpbin.org/get" - }" - } - ] - ``` - With the body for each request string encoded in the `body` field. - - * `expire_analytics_after`: If you are running a busy API, you may want to ensure that your MongoDB database does not overflow with old data. Set the `expire_analytics_after` value to the number of seconds you would like the data to last for. Setting this flag to anything above `0` will set an `expireAt` field for each record that is written to the database. - - **Important:** Tyk will not create the expiry index for you. In order to implement data expiry for your analytics data, ensure that the index is created This is easily achieved using the [MongoDB command line interface](https://docs.mongodb.com/getting-started/shell/client/). - - * `dont_set_quota_on_create`: This setting defaults to `false`, but if set to `true`, when the API is used to edit, create or add keys, the quota cache in Redis will not be re-set. By default, all updates or creates to Keys that have Quotas set will re-set the quota (This has been the default behaviour since 1.0). - - This behaviour can be bypassed on a case-by-case basis by using the `suppress_reset` parameter when making a REST API request. This is the advised mode of operation as it allows for manual, granular control over key quotas and reset timings. - - * `cache_options`: This section enables you to configure the caching behaviour of Tyk and to enable or disable the caching middleware for your API. - - * `cache_options.enable_cache`: Set this value to `true` if the cache should be enabled for this endpoint, setting it to false will stop all caching behaviour. - - * `cache_options.cache_timeout`: The amount of time, in seconds, to keep cached objects, defaults to `60` seconds. - - * `cache_options.cache_all_safe_requests`: Set this to `true` if you want all *safe* requests (GET, HEAD, OPTIONS) to be cached. This is a blanket setting for APIs where caching is required but you don't want to set individual paths up in the definition. - - * `cache_options.enable_upstream_cache_control`: Set this to `true` if you want your application to control the cache options for Tyk (TTL and whether to cache or not). See [Caching](/docs/reduce-latency/caching/) for more details. - - * `response_processors`: Response processors need to be specifically defined so they are loaded on API creation, otherwise the middleware will not fire. In order to have the two main response middleware components fire, the following configuration object should be supplied. - - ```{json} - "response_processors": [ - { - "name": "header_injector", - "options": { - "add_headers": {"name": "value"}, - "remove_headers": ["name"] - } - }, - { - "name": "response_body_transform", - "options": {} - } - ] - ``` - The options for the `header_injector` are global, and will apply to all outbound requests. - - - - name: APIs - description: |- - **Note: Applies only to Tyk Gateway Community Edition** - - API Management is very simple using the Tyk REST API: each update only affects the underlying file, and this endpoint will only work with disk based installations, not Database-backed ones. - - APIs that are added this way are flushed to to disk into the app_path folder using the format: `{api-id}.json`. Updating existing APIs that use a different naming convention will cause those APIs to be added, which could subsequently lead to a loading error and crash if they use the same listen_path. - - These methods only work on a single API node. If updating a cluster, it is important to ensure that all nodes are updated before initiating a reload. -paths: - /tyk/apis: - get: - description: |- - List APIs - Only if used without the Tyk Dashboard - tags: - - APIs - operationId: listApis - responses: - '200': - description: List of API definitions - content: - application/json: - schema: - type: array - items: - $ref: '#/components/schemas/APIDefinition' - example: - - name: "TestAPI" - use_keyless: true - active: true - proxy: - listen_path: "/test" - post: - description: |- - Create API - A single Tyk node can have its API Definitions queried, deleted and updated remotely. This functionality enables you to remotely update your Tyk definitions without having to manage the files manually. - tags: - - APIs - operationId: createApi - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/APIDefinition" - example: - name: "TestAPI" - use_keyless: true - active: true - proxy: - listen_path: "/test" - responses: - '200': - description: API created - content: - application/json: - schema: - $ref: "#/components/schemas/apiModifyKeySuccess" - example: - status: "ok" - action: "created" - key: "{...API JSON definition...}" - '400': - description: Malformed data - content: - application/json: - schema: - $ref: "#/components/schemas/apiStatusMessage" - example: - status: "error" - message: "Malformed API data" - '/tyk/apis/{apiID}': - parameters: - - description: The API ID - name: apiID - in: path - required: true - schema: - type: string - get: - description: |- - Get API definition - Only if used without the Tyk Dashboard - tags: - - APIs - operationId: getApi - responses: - '200': - description: API definition - content: - application/json: - schema: - $ref: "#/components/schemas/APIDefinition" - example: - name: "TestAPI" - use_keyless: true - active: true - proxy: - listen_path: "/test" - put: - description: | - Updating an API definition uses the same signature an object as a `POST`, however it will first ensure that the API ID that is being updated is the same as the one in the object being `PUT`. - - - Updating will completely replace the file descriptor and will not change an API Definition that has already been loaded, the hot-reload endpoint will need to be called to push the new definition to live. - tags: - - APIs - operationId: updateApi - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/APIDefinition" - example: - name: "TestAPI" - use_keyless: true - active: true - proxy: - listen_path: "/test" - responses: - '200': - description: API updated - content: - application/json: - schema: - $ref: "#/components/schemas/apiModifyKeySuccess" - example: - status: "ok" - action: "updated" - key: "{...API JSON definition...}" - '400': - description: Malformed data - content: - application/json: - schema: - $ref: "#/components/schemas/apiStatusMessage" - example: - status: "error" - message: "Malformed API data" - - delete: - description: |- - Deleting an API definition will remove the file from the file store, the API definition will NOT be unloaded, a separate reload request will need to be made to disable the API endpoint. - tags: - - APIs - operationId: deleteApi - responses: - '200': - description: API deleted - content: - application/json: - schema: - $ref: '#/components/schemas/apiStatusMessage' - example: - message: API deleted - status: ok - '400': - description: No API ID specified - content: - application/json: - schema: - $ref: '#/components/schemas/apiStatusMessage' - example: - message: API ID not specified - status: error - '/tyk/cache/{apiID}': - parameters: - - description: The API ID - name: apiID - in: path - required: true - schema: - type: string - delete: - summary: Invalidate cache - description: Invalidate cache for given API - tags: - - Cache Invalidation - operationId: invalidateCache - responses: - '200': - description: Invalidate cache - content: - application/json: - schema: - $ref: '#/components/schemas/apiStatusMessage' - example: - message: cache invalidated - status: ok - '/tyk/reload/': - get: - summary: Hot-reload a single node - description: Tyk is capable of reloading configurations without having to stop serving requests, this means that API configurations can be added at runtime, or even modified at runtime and those rules applied immediately without any downtime. - parameters: - - description: Block a response, until the reload is performed. This can be useful in scripting environments like CI/CD workflows. - name: block - in: query - required: false - schema: - type: boolean - enum: [true] - tags: - - Hot Reload - operationId: hotReload - responses: - '200': - description: Reload gateway - content: - application/json: - schema: - $ref: '#/components/schemas/apiStatusMessage' - example: - status: ok - '/tyk/reload/group': - get: - summary: Hot-reload a Tyk group - description: To reload a whole group of Tyk nodes (without using the Dashboard or host manager), you can send an API request to a single node, this node will then send a notification through the pub/sub infrastructure to all other listening nodes (including the host manager if it is being used to manage NginX) which will then trigger a global reload. - tags: - - Hot Reload - operationId: hotReloadGroup - responses: - '200': - description: Reload gateway - content: - application/json: - schema: - $ref: '#/components/schemas/apiStatusMessage' - example: - status: ok - '/tyk/hello': - get: - summary: Check the Health of the Gateway - description: | - From v2.7.5 you can now rename the `/hello` endpoint by using the `health_check_endpoint_name` option - - Returns 200 response in case of success - tags: - - Health Checking - operationId: hello - responses: - '200': - description: Success - content: - text/html: - schema: - type: string - example: "Hello Tiki" - '/tyk/{listenPath}/hello': - parameters: - - in: path - name: listenPath - required: true - description: "Listen path of loaded API" - schema: - type: string - get: - summary: Check the Health of the API - description: Should point to API domain if it has own - tags: - - Health Checking - operationId: helloAPI - responses: - '200': - description: Success - content: - text/html: - schema: - type: string - example: "Hello Tiki" - /tyk/keys: - get: - summary: List Keys - description: You can retrieve all the keys in your Tyk instance. Returns an array of Key IDs. - tags: - - Keys - operationId: listKeys - responses: - '200': - description: List of all API keys - content: - application/json: - schema: - type: array - items: - type: string - post: - summary: Create a key - description: |- - Tyk will generate the access token based on the OrgID specified in the API Definition and a random UUID. This ensures that keys can be "owned" by different API Owners should segmentation be needed at an organisational level. -

- API keys without access_rights data will be written to all APIs on the system (this also means that they will be created across all SessionHandlers and StorageHandlers, it is recommended to always embed access_rights data in a key to ensure that only targeted APIs and their back-ends are written to. - tags: - - Keys - operationId: addKey - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/SessionState" - example: - quota_max: 60 - quota_renews: 1406121006 - quota_renewal_rate: 60 - allowance: 100 - rate: 100 - per: 5 - org_id: 53ac07777cbb8c2d53000002 - responses: - '200': - description: New Key added - content: - application/json: - schema: - $ref: '#/components/schemas/apiModifyKeySuccess' - example: - action: created - key: '{...KEY JSON definition...}' - status: ok - '400': - description: Malformed data - content: - application/json: - schema: - $ref: '#/components/schemas/apiStatusMessage' - example: - message: Malformed Key data - status: error - '/tyk/keys/{keyID}': - parameters: - - description: The Key ID - name: keyID - in: path - required: true - schema: - type: string - get: - summary: Get a Key - description: Get session info about the specified key. Should return up to date rate limit and quota usage numbers. - tags: - - Keys - operationId: getKey - responses: - '200': - description: Key object - content: - application/json: - schema: - $ref: '#/components/schemas/SessionState' - example: - quota_max: 60 - quota_renews: 1406121006 - quota_renewal_rate: 60 - allowance: 100 - rate: 100 - per: 5 - org_id: 53ac07777cbb8c2d53000002 - put: - summary: Update Key or Add custom Key - description: |- - You can also manually add keys to Tyk using your own key-generation algorithm. It is recommended if using this approach to ensure that the OrgID being used in the API Definition and the key data is blank so that Tyk does not try to prepend or manage the key in any way. - tags: - - Keys - operationId: updateKey - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/SessionState" - example: - quota_max: 60 - quota_renews: 1406121006 - quota_renewal_rate: 60 - allowance: 100 - rate: 100 - per: 5 - org_id: 53ac07777cbb8c2d53000002 - parameters: - - description: |- - Adding the suppress_reset parameter and setting it to 1, will cause Tyk not to reset the quota limit that is in the current live quota manager. By default Tyk will reset the quota in the live quota manager (initialising it) when adding a key. Adding the `suppress_reset` flag to the URL parameters will avoid this behaviour. - name: suppress_reset - in: query - required: false - schema: - type: string - enum: ["1"] - responses: - '200': - description: Key updated - content: - application/json: - schema: - $ref: '#/components/schemas/apiModifyKeySuccess' - example: - action: updated - status: ok - '400': - description: No or incorrect Key ID specified - content: - application/json: - schema: - $ref: '#/components/schemas/apiStatusMessage' - example: - message: Key ID not specified - status: error - delete: - summary: Delete Key - description: Deleting a key will remove it permanently from the system, however analytics relating to that key will still be available. - tags: - - Keys - operationId: deleteKey - responses: - '200': - description: Key deleted - content: - application/json: - schema: - $ref: '#/components/schemas/apiStatusMessage' - example: - action: Key deleted - status: ok - /tyk/oauth/clients/create: - post: - summary: Create new OAuth client - description: Any OAuth keys must be generated with the help of a client ID. These need to be pre-registered with Tyk before they can be used (in a similar vein to how you would register your app with Twitter before attempting to ask user permissions using their API). -

-

Creating OAuth clients with Access to Multiple APIs

- New from Tyk Gateway 2.6.0 is the ability to create OAuth clients with access to more than one API. If you provide the api_id it works the same as in previous releases. If you don¿ït provide the api_id the request uses policy access rights and enumerates APIs from their setting in the newly created OAuth-client. - - - tags: - - OAuth - operationId: createOAuthClient - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/NewClientRequest" - example: - client_id: test - api_id: id - policy_id: policy - responses: - '200': - description: Client created - content: - application/json: - schema: - $ref: '#/components/schemas/NewClientRequest' - example: - client_id: test - api_id: id - policy_id: policy - '/tyk/oauth/clients/{apiID}': - get: - summary: List oAuth clients - description: OAuth Clients are organised by API ID, and therefore are queried as such. - tags: - - OAuth - operationId: listOAuthClients - parameters: - - description: The API ID - name: apiID - in: path - required: true - schema: - type: string - responses: - '200': - description: Get OAuth client details or a list of OAuth clients - content: - application/json: - schema: - type: array - items: - $ref: '#/components/schemas/NewClientRequest' - '/tyk/oauth/clients/{apiID}/{keyName}': - get: - summary: Get OAuth client - tags: - - OAuth - operationId: getOAuthClient - parameters: - - description: The API ID - name: apiID - in: path - required: true - schema: - type: string - minimum: 1 - - description: The Client ID - name: keyName - in: path - required: true - schema: - type: string - responses: - '200': - description: Get OAuth client details or a list of OAuth clients - content: - application/json: - schema: - $ref: '#/components/schemas/NewClientRequest' - example: - client_id: test - api_id: id - policy_id: policy - delete: - summary: Delete OAuth client - description: Please note that tokens issued with the client ID will still be valid until they expire. - tags: - - OAuth - operationId: deleteOAuthClient - parameters: - - description: The API ID - name: apiID - in: path - required: true - schema: - type: string - minimum: 1 - - description: The Client ID - name: keyName - in: path - required: true - schema: - type: string - responses: - '200': - description: OAuth client deleted - content: - application/json: - schema: - $ref: '#/components/schemas/apiModifyKeySuccess' - example: - action: deleted - status: ok - '/tyk/oauth/clients/{apiID}/{keyName}/tokens': - get: - summary: List tokens - description: This endpoint allows you to retrieve a list of all current tokens and their expiry date for a provided API ID and OAuth-client ID in the following format. This endpoint will work only for newly created tokens. -
-
- You can control how long you want to store expired tokens in this list using `oauth_token_expired_retain_period` gateway option, which specifies retain period for expired tokens stored in Redis. By default expired token not get removed. See here for more details. - tags: - - OAuth - operationId: getOAuthClientTokens - parameters: - - description: The API ID - name: apiID - in: path - required: true - schema: - type: string - minimum: 1 - - description: The Client ID - name: keyName - in: path - required: true - schema: - type: string - responses: - '200': - description: Get a list of tokens - content: - application/json: - schema: - type: array - items: - type: string - example: - - "tok1" - - "tok2" - '/tyk/oauth/refresh/{keyName}': - delete: - summary: Invalidate OAuth refresh token - description: It is possible to invalidate refresh tokens in order to manage OAuth client access more robustly. - tags: - - OAuth - operationId: invalidateOAuthRefresh - parameters: - - description: The API id - name: api_id - in: query - required: true - schema: - type: string - - description: Refresh token - name: keyName - in: path - required: true - schema: - type: string - responses: - '200': - description: Deleted - content: - application/json: - schema: - $ref: '#/components/schemas/apiModifyKeySuccess' - '/tyk/oauth/authorize-client/': - post: - description: With the OAuth flow you will need to create authorisation or access tokens for your clients, in order to do this, Tyk provides a private API endpoint for your application to generate these codes and redirect the end-user back to the API Client. - summary: Authorize client - requestBody: - required: true - content: - application/x-www-form-urlencoded: - schema: - type: object - properties: - response_type: - description: Should be provided by requesting client as part of authorisation request, this should be either `code` or `token` depending on the methods you have specified for the API. - type: string - client_id: - description: Should be provided by requesting client as part of authorisation request. The Client ID that is making the request. - type: string - redirect_uri: - description: Should be provided by requesting client as part of authorisation request. Must match with the record stored with Tyk. - type: string - key_rules: - description: A string representation of a Session Object (form-encoded). This should be provided by your application in order to apply any quotas or rules to the key. - type: string - example: - response_type: code - client_id: 21e2baf424674f6461faca6d45285bbb - redirect_uri: http%3A%2F%2Foauth.com%2Fredirect - key_rules: '%7B+++++%22allowance%22%3A+999%2C+++++%22rate%22%3A+1000%2C+++++%22per%22%3A+60%2C+++++%22expires%22%3A+0%2C+++++%22quota_max%22%3A+-1%2C+++++%22quota_renews%22%3A+1406121006%2C+++++%22quota_remaining%22%3A+0%2C+++++%22quota_renewal_rate%22%3A+60%2C+++++%22access_rights%22%3A+%7B+++++++++%22528a67c1ac9940964f9a41ae79235fcc%22%3A+%7B+++++++++++++%22api_name%22%3A+%22OAuth+Test+API%22%2C+++++++++++++%22api_id%22%3A+%22528a67c1ac9940964f9a41ae79235fcc%22%2C+++++++++++++%22versions%22%3A+%5B+++++++++++++++++%22Default%22+++++++++++++%5D+++++++++%7D+++++%7D%2C+++++%22org_id%22%3A+%2253ac07777cbb8c2d53000002%22+%7D' - tags: - - OAuth - operationId: authorizeClient - responses: - '200': - description: Succesful response - content: - application/json: - schema: - type: object - example: - code: MWY0ZDRkMzktOTYwNi00NDRiLTk2YmQtOWQxOGQ3Mjc5Yzdk - redirect_to: 'http://client-app.com/oauth-redirect/?code=MWY0ZDRkMzktOTYwNi00NDRiLTk2YmQtOWQxOGQ3Mjc5Yzdk' - /tyk/org/keys: - get: - summary: List Organisation Keys - description: |- - You can now set rate limits at the organisation level by using the following fields - allowance and rate. These are the number of allowed requests for the specified per value, and need to be set to the same value. If you don¿ït want to have organisation level rate limiting, set ¿írate¿ì or ¿íper¿ì to zero, or don¿ït add them to your request. - tags: - - Organisation Quotas - operationId: listOrgKeys - responses: - '200': - description: List of all API keys - content: - application/json: - schema: - type: object - properties: - keys: - type: array - items: - type: string - example: - keys: - - "key1" - - "key2" - post: - summary: Create an organisation key - description: |- - This work similar to Keys API except that Key ID is always equals Organisation ID - tags: - - Organisation Quotas - operationId: addOrgKey - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/SessionState" - example: - quota_max: 60 - quota_renews: 1406121006 - quota_renewal_rate: 60 - allowance: 100 - rate: 100 - per: 5 - org_id: 53ac07777cbb8c2d53000002 - responses: - '200': - description: New Key added - content: - application/json: - schema: - $ref: '#/components/schemas/apiModifyKeySuccess' - example: - action: created - key: '{...KEY JSON definition...}' - status: ok - '/tyk/orgs/keys/{keyID}': - parameters: - - description: The Key ID - name: keyID - in: path - required: true - schema: - type: string - get: - summary: Get an Organisation Key - description: Get session info about specified orgnanisation key. Should return up to date rate limit and quota usage numbers. - tags: - - Organisation Quotas - operationId: getOrgKey - responses: - '200': - description: Key object - content: - application/json: - schema: - $ref: '#/components/schemas/SessionState' - put: - summary: Update Organisation Key - description: |- - This work similar to Keys API except that Key ID is always equals Organisation ID - - For Gateway v2.6.0 onwards, you can now set rate limits at the organisation level by using the following fields - allowance and rate. These are the number of allowed requests for the specified per value, and need to be set to the same value. If you don¿ït want to have organisation level rate limiting, set `rate` or `per` to zero, or don¿ït add them to your request. - tags: - - Organisation Quotas - operationId: updateOrgKey - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/SessionState" - example: - quota_max: 60 - quota_renews: 1406121006 - quota_renewal_rate: 60 - allowance: 100 - rate: 100 - per: 5 - org_id: 53ac07777cbb8c2d53000002 - parameters: - - description: |- - Adding the `reset_quota` parameter and setting it to 1, will cause Tyk reset the organisations quota in the live quota manager, it is recommended to use this mechanism to reset organisation-level access if a monthly subscription is in place. - name: reset_quota - in: query - required: false - schema: - type: string - enum: ["1"] - responses: - '200': - description: Key updated - content: - application/json: - schema: - $ref: '#/components/schemas/apiModifyKeySuccess' - example: - action: updated - status: ok - delete: - summary: Delete Organisation Key - description: Deleting a key will remove all limits from organisation. It does not affects regualar keys created within organisation. - tags: - - Organisation Quotas - operationId: deleteOrgKey - responses: - '200': - description: Key deleted - content: - application/json: - schema: - $ref: '#/components/schemas/apiStatusMessage' - example: - action: Key deleted - status: ok - /{listen_path}/tyk/batch: - parameters: - - name: listen_path - in: path - required: true - description: "API listen path" - schema: - type: string - post: - summary: Run batch request - tags: - - Batch requests - operationId: batch - responses: - '200': - $ref: '#/components/schemas/apiStatusMessage' -components: - responses: - parameterBodies: - content: - application/json: - schema: - $ref: '#/components/schemas/OAuthClientToken' - description: parameterBodies - schemas: - APIDefinition: - properties: - tags: - items: - type: string - type: array - x-go-name: Tags - CORS: - properties: - allow_credentials: - type: boolean - x-go-name: AllowCredentials - allowed_headers: - items: - type: string - type: array - x-go-name: AllowedHeaders - allowed_methods: - items: - type: string - type: array - x-go-name: AllowedMethods - allowed_origins: - items: - type: string - type: array - x-go-name: AllowedOrigins - debug: - type: boolean - x-go-name: Debug - enable: - type: boolean - x-go-name: Enable - exposed_headers: - items: - type: string - type: array - x-go-name: ExposedHeaders - max_age: - format: int64 - type: integer - x-go-name: MaxAge - options_passthrough: - type: boolean - x-go-name: OptionsPassthrough - type: object - active: - type: boolean - x-go-name: Active - allowed_ips: - items: - type: string - type: array - x-go-name: AllowedIPs - api_id: - type: string - x-go-name: APIID - auth: - $ref: '#/components/schemas/Auth' - auth_provider: - $ref: '#/components/schemas/AuthProviderMeta' - base_identity_provided_by: - $ref: '#/components/schemas/AuthTypeEnum' - basic_auth: - properties: - body_password_regexp: - type: string - x-go-name: BodyPasswordRegexp - body_user_regexp: - type: string - x-go-name: BodyUserRegexp - cache_ttl: - format: int64 - type: integer - x-go-name: CacheTTL - disable_caching: - type: boolean - x-go-name: DisableCaching - extract_from_body: - type: boolean - x-go-name: ExtractFromBody - type: object - x-go-name: BasicAuth - blacklisted_ips: - items: - type: string - type: array - x-go-name: BlacklistedIPs - cache_options: - $ref: '#/components/schemas/CacheOptions' - certificates: - items: - type: string - type: array - x-go-name: Certificates - client_certificates: - items: - type: string - type: array - x-go-name: ClientCertificates - config_data: - additionalProperties: - type: object - type: object - x-go-name: ConfigData - custom_middleware: - $ref: '#/components/schemas/MiddlewareSection' - custom_middleware_bundle: - type: string - x-go-name: CustomMiddlewareBundle - definition: - properties: - key: - type: string - x-go-name: Key - location: - type: string - x-go-name: Location - strip_path: - type: boolean - x-go-name: StripPath - type: object - x-go-name: VersionDefinition - disable_quota: - type: boolean - x-go-name: DisableQuota - disable_rate_limit: - type: boolean - x-go-name: DisableRateLimit - do_not_track: - type: boolean - x-go-name: DoNotTrack - domain: - type: string - x-go-name: Domain - dont_set_quota_on_create: - type: boolean - x-go-name: DontSetQuotasOnCreate - enable_batch_request_support: - type: boolean - x-go-name: EnableBatchRequestSupport - enable_context_vars: - type: boolean - x-go-name: EnableContextVars - enable_coprocess_auth: - type: boolean - x-go-name: EnableCoProcessAuth - enable_ip_blacklisting: - type: boolean - x-go-name: EnableIpBlacklisting - enable_ip_whitelisting: - type: boolean - x-go-name: EnableIpWhiteListing - enable_jwt: - type: boolean - x-go-name: EnableJWT - enable_signature_checking: - type: boolean - x-go-name: EnableSignatureChecking - event_handlers: - $ref: '#/components/schemas/EventHandlerMetaConfig' - expire_analytics_after: - format: int64 - type: integer - x-go-name: ExpireAnalyticsAfter - global_rate_limit: - $ref: '#/components/schemas/GlobalRateLimit' - hmac_allowed_algorithms: - items: - type: string - type: array - x-go-name: HmacAllowedAlgorithms - hmac_allowed_clock_skew: - format: double - type: number - x-go-name: HmacAllowedClockSkew - id: - $ref: '#/components/schemas/ObjectId' - internal: - type: boolean - x-go-name: Internal - jwt_client_base_field: - type: string - x-go-name: JWTClientIDBaseField - jwt_expires_at_validation_skew: - format: uint64 - type: integer - x-go-name: JWTExpiresAtValidationSkew - jwt_identity_base_field: - type: string - x-go-name: JWTIdentityBaseField - jwt_issued_at_validation_skew: - format: uint64 - type: integer - x-go-name: JWTIssuedAtValidationSkew - jwt_not_before_validation_skew: - format: uint64 - type: integer - x-go-name: JWTNotBeforeValidationSkew - jwt_policy_field_name: - type: string - x-go-name: JWTPolicyFieldName - jwt_scope_claim_name: - type: string - x-go-name: JWTScopeClaimName - jwt_scope_to_policy_mapping: - additionalProperties: - type: string - type: object - x-go-name: JWTScopeToPolicyMapping - jwt_signing_method: - type: string - x-go-name: JWTSigningMethod - jwt_skip_kid: - type: boolean - x-go-name: JWTSkipKid - jwt_source: - type: string - x-go-name: JWTSource - name: - type: string - x-go-name: Name - notifications: - $ref: '#/components/schemas/NotificationsManager' - oauth_meta: - properties: - allowed_access_types: - items: - $ref: '#/components/schemas/AccessRequestType' - type: array - x-go-name: AllowedAccessTypes - allowed_authorize_types: - items: - $ref: '#/components/schemas/AuthorizeRequestType' - type: array - x-go-name: AllowedAuthorizeTypes - auth_login_redirect: - type: string - x-go-name: AuthorizeLoginRedirect - type: object - x-go-name: Oauth2Meta - openid_options: - $ref: '#/components/schemas/OpenIDOptions' - org_id: - type: string - x-go-name: OrgID - pinned_public_keys: - additionalProperties: - type: string - type: object - x-go-name: PinnedPublicKeys - proxy: - properties: - check_host_against_uptime_tests: - type: boolean - x-go-name: CheckHostAgainstUptimeTests - disable_strip_slash: - type: boolean - x-go-name: DisableStripSlash - enable_load_balancing: - type: boolean - x-go-name: EnableLoadBalancing - listen_path: - type: string - x-go-name: ListenPath - preserve_host_header: - type: boolean - x-go-name: PreserveHostHeader - service_discovery: - $ref: '#/components/schemas/ServiceDiscoveryConfiguration' - strip_listen_path: - type: boolean - x-go-name: StripListenPath - target_list: - items: - type: string - type: array - x-go-name: Targets - target_url: - type: string - x-go-name: TargetURL - transport: - properties: - proxy_url: - type: string - x-go-name: ProxyURL - ssl_ciphers: - items: - type: string - type: array - x-go-name: SSLCipherSuites - ssl_insecure_skip_verify: - type: boolean - x-go-name: SSLInsecureSkipVerify - ssl_min_version: - format: uint16 - type: integer - x-go-name: SSLMinVersion - type: object - x-go-name: Transport - type: object - x-go-name: Proxy - response_processors: - items: - $ref: '#/components/schemas/ResponseProcessor' - type: array - x-go-name: ResponseProcessors - session_lifetime: - format: int64 - type: integer - x-go-name: SessionLifetime - session_provider: - $ref: '#/components/schemas/SessionProviderMeta' - slug: - type: string - x-go-name: Slug - strip_auth_data: - type: boolean - x-go-name: StripAuthData - tag_headers: - items: - type: string - type: array - x-go-name: TagHeaders - upstream_certificates: - additionalProperties: - type: string - type: object - x-go-name: UpstreamCertificates - uptime_tests: - properties: - check_list: - items: - $ref: '#/components/schemas/HostCheckObject' - type: array - x-go-name: CheckList - config: - properties: - expire_utime_after: - format: int64 - type: integer - x-go-name: ExpireUptimeAnalyticsAfter - recheck_wait: - format: int64 - type: integer - x-go-name: RecheckWait - service_discovery: - $ref: '#/components/schemas/ServiceDiscoveryConfiguration' - type: object - x-go-name: Config - type: object - x-go-name: UptimeTests - use_basic_auth: - type: boolean - x-go-name: UseBasicAuth - use_keyless: - type: boolean - x-go-name: UseKeylessAccess - use_mutual_tls_auth: - type: boolean - x-go-name: UseMutualTLSAuth - use_oauth2: - type: boolean - x-go-name: UseOauth2 - use_openid: - type: boolean - x-go-name: UseOpenID - use_standard_auth: - type: boolean - x-go-name: UseStandardAuth - version_data: - properties: - default_version: - type: string - x-go-name: DefaultVersion - not_versioned: - type: boolean - x-go-name: NotVersioned - versions: - additionalProperties: - $ref: '#/components/schemas/VersionInfo' - type: object - x-go-name: Versions - type: object - x-go-name: VersionData - title: >- - APIDefinition represents the configuration for a single proxied API and - it's versions. - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - APILimit: - description: APILimit stores quota and rate limit on ACL level (per API) - properties: - per: - format: double - type: number - x-go-name: Per - quota_max: - format: int64 - type: integer - x-go-name: QuotaMax - quota_remaining: - format: int64 - type: integer - x-go-name: QuotaRemaining - quota_renewal_rate: - format: int64 - type: integer - x-go-name: QuotaRenewalRate - quota_renews: - format: int64 - type: integer - x-go-name: QuotaRenews - rate: - format: double - type: number - x-go-name: Rate - set_by_policy: - type: boolean - x-go-name: SetByPolicy - throttle_interval: - format: double - type: number - x-go-name: ThrottleInterval - throttle_retry_limit: - format: int64 - type: integer - x-go-name: ThrottleRetryLimit - type: object - x-go-package: github.com/TykTechnologies/tyk/user - AccessDefinition: - description: AccessDefinition defines which versions of an API a key has access to - properties: - allowed_urls: - items: - $ref: '#/components/schemas/AccessSpec' - type: array - x-go-name: AllowedURLs - api_id: - type: string - x-go-name: APIID - api_name: - type: string - x-go-name: APIName - limit: - $ref: '#/components/schemas/APILimit' - versions: - items: - type: string - type: array - x-go-name: Versions - type: object - x-go-package: github.com/TykTechnologies/tyk/user - AccessRequestType: - description: AccessRequestType is the type for OAuth param `grant_type` - type: string - x-go-package: github.com/TykTechnologies/tyk/vendor/github.com/lonelycode/osin - AccessSpec: - description: >- - AccessSpecs define what URLS a user has access to an what methods are - enabled - properties: - methods: - items: - type: string - type: array - x-go-name: Methods - url: - type: string - x-go-name: URL - type: object - x-go-package: github.com/TykTechnologies/tyk/user - Auth: - properties: - auth_header_name: - type: string - x-go-name: AuthHeaderName - cookie_name: - type: string - x-go-name: CookieName - param_name: - type: string - x-go-name: ParamName - signature: - $ref: '#/components/schemas/SignatureConfig' - use_certificate: - type: boolean - x-go-name: UseCertificate - use_cookie: - type: boolean - x-go-name: UseCookie - use_param: - type: boolean - x-go-name: UseParam - validate_signature: - type: boolean - x-go-name: ValidateSignature - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - AuthProviderCode: - type: string - x-go-package: github.com/TykTechnologies/tyk/apidef - AuthProviderMeta: - properties: - meta: - additionalProperties: - type: object - type: object - x-go-name: Meta - name: - $ref: '#/components/schemas/AuthProviderCode' - storage_engine: - $ref: '#/components/schemas/StorageEngineCode' - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - AuthTypeEnum: - type: string - x-go-package: github.com/TykTechnologies/tyk/apidef - AuthorizeRequestType: - description: AuthorizeRequestType is the type for OAuth param `response_type` - type: string - x-go-package: github.com/TykTechnologies/tyk/vendor/github.com/lonelycode/osin - CacheMeta: - properties: - cache_key_regex: - type: string - x-go-name: CacheKeyRegex - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - CacheOptions: - properties: - cache_all_safe_requests: - type: boolean - x-go-name: CacheAllSafeRequests - cache_control_ttl_header: - type: string - x-go-name: CacheControlTTLHeader - cache_response_codes: - items: - format: int64 - type: integer - type: array - x-go-name: CacheOnlyResponseCodes - cache_timeout: - format: int64 - type: integer - x-go-name: CacheTimeout - enable_cache: - type: boolean - x-go-name: EnableCache - enable_upstream_cache_control: - type: boolean - x-go-name: EnableUpstreamCacheControl - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - CircuitBreakerMeta: - properties: - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - return_to_service_after: - format: int64 - type: integer - x-go-name: ReturnToServiceAfter - samples: - format: int64 - type: integer - x-go-name: Samples - threshold_percent: - format: double - type: number - x-go-name: ThresholdPercent - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - EndPointMeta: - properties: - method_actions: - additionalProperties: - $ref: '#/components/schemas/EndpointMethodMeta' - type: object - x-go-name: MethodActions - path: - type: string - x-go-name: Path - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - EndpointMethodAction: - type: string - x-go-package: github.com/TykTechnologies/tyk/apidef - EndpointMethodMeta: - properties: - action: - $ref: '#/components/schemas/EndpointMethodAction' - code: - format: int64 - type: integer - x-go-name: Code - data: - type: string - x-go-name: Data - headers: - additionalProperties: - type: string - type: object - x-go-name: Headers - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - EventHandlerMetaConfig: - properties: - events: - x-go-name: Events - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - ExtendedPathsSet: - properties: - advance_cache_config: - items: - $ref: '#/components/schemas/CacheMeta' - type: array - x-go-name: AdvanceCacheConfig - black_list: - items: - $ref: '#/components/schemas/EndPointMeta' - type: array - x-go-name: BlackList - cache: - items: - type: string - type: array - x-go-name: Cached - circuit_breakers: - items: - $ref: '#/components/schemas/CircuitBreakerMeta' - type: array - x-go-name: CircuitBreaker - do_not_track_endpoints: - items: - $ref: '#/components/schemas/TrackEndpointMeta' - type: array - x-go-name: DoNotTrackEndpoints - hard_timeouts: - items: - $ref: '#/components/schemas/HardTimeoutMeta' - type: array - x-go-name: HardTimeouts - ignored: - items: - $ref: '#/components/schemas/EndPointMeta' - type: array - x-go-name: Ignored - internal: - items: - $ref: '#/components/schemas/InternalMeta' - type: array - x-go-name: Internal - method_transforms: - items: - $ref: '#/components/schemas/MethodTransformMeta' - type: array - x-go-name: MethodTransforms - size_limits: - items: - $ref: '#/components/schemas/RequestSizeMeta' - type: array - x-go-name: SizeLimit - track_endpoints: - items: - $ref: '#/components/schemas/TrackEndpointMeta' - type: array - x-go-name: TrackEndpoints - transform: - items: - $ref: '#/components/schemas/TemplateMeta' - type: array - x-go-name: Transform - transform_headers: - items: - $ref: '#/components/schemas/HeaderInjectionMeta' - type: array - x-go-name: TransformHeader - transform_jq: - items: - $ref: '#/components/schemas/TransformJQMeta' - type: array - x-go-name: TransformJQ - transform_jq_response: - items: - $ref: '#/components/schemas/TransformJQMeta' - type: array - x-go-name: TransformJQResponse - transform_response: - items: - $ref: '#/components/schemas/TemplateMeta' - type: array - x-go-name: TransformResponse - transform_response_headers: - items: - $ref: '#/components/schemas/HeaderInjectionMeta' - type: array - x-go-name: TransformResponseHeader - url_rewrites: - items: - $ref: '#/components/schemas/URLRewriteMeta' - type: array - x-go-name: URLRewrite - validate_json: - items: - $ref: '#/components/schemas/ValidatePathMeta' - type: array - x-go-name: ValidateJSON - virtual: - items: - $ref: '#/components/schemas/VirtualMeta' - type: array - x-go-name: Virtual - white_list: - items: - $ref: '#/components/schemas/EndPointMeta' - type: array - x-go-name: WhiteList - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - GlobalRateLimit: - properties: - per: - format: double - type: number - x-go-name: Per - rate: - format: double - type: number - x-go-name: Rate - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - HardTimeoutMeta: - properties: - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - timeout: - format: int64 - type: integer - x-go-name: TimeOut - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - HashType: - type: string - x-go-package: github.com/TykTechnologies/tyk/user - HeaderInjectionMeta: - properties: - act_on: - type: boolean - x-go-name: ActOnResponse - add_headers: - additionalProperties: - type: string - type: object - x-go-name: AddHeaders - delete_headers: - items: - type: string - type: array - x-go-name: DeleteHeaders - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - HostCheckObject: - properties: - body: - type: string - x-go-name: Body - headers: - additionalProperties: - type: string - type: object - x-go-name: Headers - method: - type: string - x-go-name: Method - url: - type: string - x-go-name: CheckURL - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - IdExtractorSource: - type: string - x-go-package: github.com/TykTechnologies/tyk/apidef - IdExtractorType: - type: string - x-go-package: github.com/TykTechnologies/tyk/apidef - InternalMeta: - properties: - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - MethodTransformMeta: - properties: - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - to_method: - type: string - x-go-name: ToMethod - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - MiddlewareDefinition: - properties: - name: - type: string - x-go-name: Name - path: - type: string - x-go-name: Path - require_session: - type: boolean - x-go-name: RequireSession - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - MiddlewareDriver: - type: string - x-go-package: github.com/TykTechnologies/tyk/apidef - MiddlewareIdExtractor: - properties: - extract_from: - $ref: '#/components/schemas/IdExtractorSource' - extract_with: - $ref: '#/components/schemas/IdExtractorType' - extractor_config: - additionalProperties: - type: object - type: object - x-go-name: ExtractorConfig - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - MiddlewareSection: - properties: - auth_check: - $ref: '#/components/schemas/MiddlewareDefinition' - driver: - $ref: '#/components/schemas/MiddlewareDriver' - id_extractor: - $ref: '#/components/schemas/MiddlewareIdExtractor' - post: - items: - $ref: '#/components/schemas/MiddlewareDefinition' - type: array - x-go-name: Post - post_key_auth: - items: - $ref: '#/components/schemas/MiddlewareDefinition' - type: array - x-go-name: PostKeyAuth - pre: - items: - $ref: '#/components/schemas/MiddlewareDefinition' - type: array - x-go-name: Pre - response: - items: - $ref: '#/components/schemas/MiddlewareDefinition' - type: array - x-go-name: Response - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - NewClientRequest: - description: >- - NewClientRequest is an outward facing JSON object translated from osin - OAuthClients - properties: - api_id: - type: string - x-go-name: APIID - client_id: - type: string - x-go-name: ClientID - description: - type: string - x-go-name: Description - meta_data: - type: object - x-go-name: MetaData - policy_id: - type: string - x-go-name: PolicyID - redirect_uri: - type: string - x-go-name: ClientRedirectURI - secret: - type: string - x-go-name: ClientSecret - type: object - x-go-package: github.com/TykTechnologies/tyk - NotificationsManager: - description: 'TODO: Make this more generic' - properties: - oauth_on_keychange_url: - type: string - x-go-name: OAuthKeyChangeURL - shared_secret: - type: string - x-go-name: SharedSecret - title: >- - NotificationsManager handles sending notifications to OAuth endpoints to - notify the provider of key changes. - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - OAuthClientToken: - properties: - code: - type: string - x-go-name: Token - expires: - format: int64 - type: integer - x-go-name: Expires - type: object - x-go-package: github.com/TykTechnologies/tyk - OIDProviderConfig: - properties: - client_ids: - additionalProperties: - type: string - type: object - x-go-name: ClientIDs - issuer: - type: string - x-go-name: Issuer - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - ObjectId: - description: 'http://www.mongodb.org/display/DOCS/Object+IDs' - title: >- - ObjectId is a unique ID identifying a BSON value. It must be exactly 12 - bytes - - long. MongoDB objects by default have such a property set in their "_id" - - property. - type: string - x-go-package: github.com/TykTechnologies/tyk/vendor/gopkg.in/mgo.v2/bson - OpenIDOptions: - properties: - providers: - items: - $ref: '#/components/schemas/OIDProviderConfig' - type: array - x-go-name: Providers - segregate_by_client: - type: boolean - x-go-name: SegregateByClient - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - Regexp: - description: Regexp is a wrapper around regexp.Regexp but with caching - properties: - FromCache: - type: boolean - type: object - x-go-package: github.com/TykTechnologies/tyk/regexp - RequestInputType: - type: string - x-go-package: github.com/TykTechnologies/tyk/apidef - RequestSizeMeta: - properties: - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - size_limit: - format: int64 - type: integer - x-go-name: SizeLimit - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - ResponseProcessor: - properties: - name: - type: string - x-go-name: Name - options: - type: object - x-go-name: Options - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - RoutingTrigger: - properties: - 'on': - $ref: '#/components/schemas/RoutingTriggerOnType' - options: - $ref: '#/components/schemas/RoutingTriggerOptions' - rewrite_to: - type: string - x-go-name: RewriteTo - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - RoutingTriggerOnType: - type: string - x-go-package: github.com/TykTechnologies/tyk/apidef - RoutingTriggerOptions: - properties: - header_matches: - additionalProperties: - $ref: '#/components/schemas/StringRegexMap' - type: object - x-go-name: HeaderMatches - path_part_matches: - additionalProperties: - $ref: '#/components/schemas/StringRegexMap' - type: object - x-go-name: PathPartMatches - payload_matches: - $ref: '#/components/schemas/StringRegexMap' - query_val_matches: - additionalProperties: - $ref: '#/components/schemas/StringRegexMap' - type: object - x-go-name: QueryValMatches - request_context_matches: - additionalProperties: - $ref: '#/components/schemas/StringRegexMap' - type: object - x-go-name: RequestContextMatches - session_meta_matches: - additionalProperties: - $ref: '#/components/schemas/StringRegexMap' - type: object - x-go-name: SessionMetaMatches - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - ServiceDiscoveryConfiguration: - properties: - cache_timeout: - format: int64 - type: integer - x-go-name: CacheTimeout - data_path: - type: string - x-go-name: DataPath - endpoint_returns_list: - type: boolean - x-go-name: EndpointReturnsList - parent_data_path: - type: string - x-go-name: ParentDataPath - port_data_path: - type: string - x-go-name: PortDataPath - query_endpoint: - type: string - x-go-name: QueryEndpoint - target_path: - type: string - x-go-name: TargetPath - use_discovery_service: - type: boolean - x-go-name: UseDiscoveryService - use_nested_query: - type: boolean - x-go-name: UseNestedQuery - use_target_list: - type: boolean - x-go-name: UseTargetList - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - SessionProviderCode: - type: string - x-go-package: github.com/TykTechnologies/tyk/apidef - SessionProviderMeta: - properties: - meta: - additionalProperties: - type: object - type: object - x-go-name: Meta - name: - $ref: '#/components/schemas/SessionProviderCode' - storage_engine: - $ref: '#/components/schemas/StorageEngineCode' - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - SessionState: - description: >- - There's a data structure that's based on this and it's used for Protocol - Buffer support, make sure to update - "coprocess/proto/coprocess_session_state.proto" and generate the - bindings using: cd coprocess/proto && ./update_bindings.sh - properties: - tags: - items: - type: string - type: array - x-go-name: Tags - access_rights: - additionalProperties: - $ref: '#/components/schemas/AccessDefinition' - type: object - x-go-name: AccessRights - alias: - type: string - x-go-name: Alias - allowance: - format: double - type: number - x-go-name: Allowance - apply_policies: - items: - type: string - type: array - x-go-name: ApplyPolicies - apply_policy_id: - type: string - x-go-name: ApplyPolicyID - basic_auth_data: - properties: - hash_type: - $ref: '#/components/schemas/HashType' - password: - type: string - x-go-name: Password - type: object - x-go-name: BasicAuthData - certificate: - type: string - x-go-name: Certificate - data_expires: - format: int64 - type: integer - x-go-name: DataExpires - enable_detail_recording: - type: boolean - x-go-name: EnableDetailedRecording - expires: - format: int64 - type: integer - x-go-name: Expires - hmac_enabled: - type: boolean - x-go-name: HMACEnabled - hmac_string: - type: string - x-go-name: HmacSecret - id_extractor_deadline: - format: int64 - type: integer - x-go-name: IdExtractorDeadline - is_inactive: - type: boolean - x-go-name: IsInactive - jwt_data: - properties: - secret: - type: string - x-go-name: Secret - type: object - x-go-name: JWTData - last_check: - format: int64 - type: integer - x-go-name: LastCheck - last_updated: - type: string - x-go-name: LastUpdated - meta_data: - additionalProperties: - type: object - type: object - x-go-name: MetaData - monitor: - properties: - trigger_limits: - items: - format: double - type: number - type: array - x-go-name: TriggerLimits - type: object - x-go-name: Monitor - oauth_client_id: - type: string - x-go-name: OauthClientID - oauth_keys: - additionalProperties: - type: string - type: object - x-go-name: OauthKeys - org_id: - type: string - x-go-name: OrgID - per: - format: double - type: number - x-go-name: Per - quota_max: - format: int64 - type: integer - x-go-name: QuotaMax - quota_remaining: - format: int64 - type: integer - x-go-name: QuotaRemaining - quota_renewal_rate: - format: int64 - type: integer - x-go-name: QuotaRenewalRate - quota_renews: - format: int64 - type: integer - x-go-name: QuotaRenews - rate: - format: double - type: number - x-go-name: Rate - session_lifetime: - format: int64 - type: integer - x-go-name: SessionLifetime - throttle_interval: - format: double - type: number - x-go-name: ThrottleInterval - throttle_retry_limit: - format: int64 - type: integer - x-go-name: ThrottleRetryLimit - title: >- - SessionState objects represent a current API session, mainly used for - rate limiting. - type: object - x-go-package: github.com/TykTechnologies/tyk/user - SignatureConfig: - properties: - algorithm: - type: string - x-go-name: Algorithm - allowed_clock_skew: - format: int64 - type: integer - x-go-name: AllowedClockSkew - error_code: - format: int64 - type: integer - x-go-name: ErrorCode - error_message: - type: string - x-go-name: ErrorMessage - header: - type: string - x-go-name: Header - secret: - type: string - x-go-name: Secret - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - StorageEngineCode: - type: string - x-go-package: github.com/TykTechnologies/tyk/apidef - StringRegexMap: - properties: - match_rx: - type: string - x-go-name: MatchPattern - reverse: - type: boolean - x-go-name: Reverse - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - TemplateData: - properties: - enable_session: - type: boolean - x-go-name: EnableSession - input_type: - $ref: '#/components/schemas/RequestInputType' - template_mode: - $ref: '#/components/schemas/TemplateMode' - template_source: - type: string - x-go-name: TemplateSource - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - TemplateMeta: - properties: - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - template_data: - $ref: '#/components/schemas/TemplateData' - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - TemplateMode: - type: string - x-go-package: github.com/TykTechnologies/tyk/apidef - TrackEndpointMeta: - properties: - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - TransformJQMeta: - properties: - filter: - type: string - x-go-name: Filter - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - URLRewriteMeta: - properties: - MatchRegexp: - $ref: '#/components/schemas/Regexp' - match_pattern: - type: string - x-go-name: MatchPattern - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - rewrite_to: - type: string - x-go-name: RewriteTo - triggers: - items: - $ref: '#/components/schemas/RoutingTrigger' - type: array - x-go-name: Triggers - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - ValidatePathMeta: - properties: - error_response_code: - description: >- - Allows override of default 422 Unprocessible Entity response code - for validation errors. - format: int64 - type: integer - x-go-name: ErrorResponseCode - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - schema: - additionalProperties: - type: object - type: object - x-go-name: Schema - schema_b64: - type: string - x-go-name: SchemaB64 - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - VersionInfo: - properties: - paths: - properties: - black_list: - items: - type: string - type: array - x-go-name: BlackList - ignored: - items: - type: string - type: array - x-go-name: Ignored - white_list: - items: - type: string - type: array - x-go-name: WhiteList - type: object - x-go-name: Paths - expires: - type: string - x-go-name: Expires - extended_paths: - $ref: '#/components/schemas/ExtendedPathsSet' - global_headers: - additionalProperties: - type: string - type: object - x-go-name: GlobalHeaders - global_headers_remove: - items: - type: string - type: array - x-go-name: GlobalHeadersRemove - global_size_limit: - format: int64 - type: integer - x-go-name: GlobalSizeLimit - name: - type: string - x-go-name: Name - override_target: - type: string - x-go-name: OverrideTarget - use_extended_paths: - type: boolean - x-go-name: UseExtendedPaths - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - VirtualMeta: - properties: - function_source_type: - type: string - x-go-name: FunctionSourceType - function_source_uri: - type: string - x-go-name: FunctionSourceURI - method: - type: string - x-go-name: Method - path: - type: string - x-go-name: Path - proxy_on_error: - type: boolean - x-go-name: ProxyOnError - response_function_name: - type: string - x-go-name: ResponseFunctionName - use_session: - type: boolean - x-go-name: UseSession - type: object - x-go-package: github.com/TykTechnologies/tyk/apidef - apiAllKeys: - description: apiAllKeys represents a list of keys in the memory store - properties: - keys: - items: - type: string - type: array - x-go-name: APIKeys - type: object - x-go-package: github.com/TykTechnologies/tyk - apiModifyKeySuccess: - description: apiModifyKeySuccess represents when a Key modification was successful - properties: - action: - type: string - x-go-name: Action - key: - description: 'in:body' - type: string - x-go-name: Key - key_hash: - type: string - x-go-name: KeyHash - status: - type: string - x-go-name: Status - type: object - x-go-package: github.com/TykTechnologies/tyk - apiStatusMessage: - description: apiStatusMessage represents an API status message - properties: - message: - description: Response details - type: string - x-go-name: Message - status: - type: string - x-go-name: Status - type: object - x-go-package: github.com/TykTechnologies/tyk - securitySchemes: - api_key: - in: header - name: X-Tyk-Authorization - type: apiKey -security: - - api_key: [] diff --git a/test/dns.go b/test/dns.go deleted file mode 100644 index b3a828a9a5df..000000000000 --- a/test/dns.go +++ /dev/null @@ -1,225 +0,0 @@ -package test - -import ( - "context" - "fmt" - "net" - "reflect" - "regexp" - "strings" - - "time" - - "sync" - - "github.com/miekg/dns" -) - -var ( - muDefaultResolver sync.RWMutex - DomainsToAddresses = map[string][]string{ - "host1.local.": {"127.0.0.1"}, - "host2.local.": {"127.0.0.1"}, - "host3.local.": {"127.0.0.1"}, - } - DomainsToIgnore = []string{ - "redis.", - "tyk-redis.", - "mongo.", // For dashboard integration tests - "tyk-mongo.", - } -) - -type dnsMockHandler struct { - domainsToAddresses map[string][]string - domainsToErrors map[string]int - - muDomainsToAddresses sync.RWMutex -} - -func (d *dnsMockHandler) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { - msg := dns.Msg{} - msg.SetReply(r) - switch r.Question[0].Qtype { - case dns.TypeA: - msg.Authoritative = true - domain := msg.Question[0].Name - - d.muDomainsToAddresses.RLock() - defer d.muDomainsToAddresses.RUnlock() - - if rcode, ok := d.domainsToErrors[domain]; ok { - m := new(dns.Msg) - m.SetRcode(r, rcode) - w.WriteMsg(m) - return - } - - for _, ignore := range DomainsToIgnore { - if strings.HasPrefix(domain, ignore) { - resolver := &net.Resolver{} - ipAddrs, err := resolver.LookupIPAddr(context.Background(), domain) - if err != nil { - m := new(dns.Msg) - m.SetRcode(r, dns.RcodeServerFailure) - w.WriteMsg(m) - return - } - msg.Answer = append(msg.Answer, &dns.A{ - Hdr: dns.RR_Header{Name: domain, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, - A: ipAddrs[0].IP, - }) - w.WriteMsg(&msg) - return - } - } - - addresses, ok := d.domainsToAddresses[domain] - if !ok { - // ^ start of line - // localhost\. match literally - // ()* match between 0 and unlimited times - // [[:alnum:]]+\. match single character in [a-zA-Z0-9] minimum one time and ending in . literally - reg := regexp.MustCompile(`^localhost\.([[:alnum:]]+\.)*`) - if matched := reg.MatchString(domain); !matched { - panic(fmt.Sprintf("domain not mocked: %s", domain)) - } - - addresses = []string{"127.0.0.1"} - } - - for _, addr := range addresses { - msg.Answer = append(msg.Answer, &dns.A{ - Hdr: dns.RR_Header{Name: domain, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, - A: net.ParseIP(addr), - }) - } - } - w.WriteMsg(&msg) -} - -type DnsMockHandle struct { - id string - mockServer *dns.Server - ShutdownDnsMock func() error -} - -func (h *DnsMockHandle) PushDomains(domainsMap map[string][]string, domainsErrorMap map[string]int) func() { - handler := h.mockServer.Handler.(*dnsMockHandler) - handler.muDomainsToAddresses.Lock() - defer handler.muDomainsToAddresses.Unlock() - - dta := handler.domainsToAddresses - dte := handler.domainsToErrors - - prevDta := map[string][]string{} - prevDte := map[string]int{} - - for key, value := range dta { - prevDta[key] = value - } - - for key, value := range dte { - prevDte[key] = value - } - - pullDomainsFunc := func() { - handler := h.mockServer.Handler.(*dnsMockHandler) - handler.muDomainsToAddresses.Lock() - defer handler.muDomainsToAddresses.Unlock() - - handler.domainsToAddresses = prevDta - handler.domainsToErrors = prevDte - } - - for key, ips := range domainsMap { - addr, ok := dta[key] - if !ok { - dta[key] = ips - } else { - dta[key] = append(addr, ips...) - } - } - - for key, rCode := range domainsErrorMap { - dte[key] = rCode - } - - return pullDomainsFunc -} - -// InitDNSMock initializes dns server on udp:0 address and replaces net.DefaultResolver in order -// to route all dns queries within tests to this server. -// InitDNSMock returns handle, which can be used to add/remove dns query mock responses or initialization error. -func InitDNSMock(domainsMap map[string][]string, domainsErrorMap map[string]int) (*DnsMockHandle, error) { - addr, _ := net.ResolveUDPAddr("udp", ":0") - conn, err := net.ListenUDP("udp", addr) - if err != nil { - return &DnsMockHandle{}, err - } - - startResultChannel := make(chan error) - started := func() { - startResultChannel <- nil - } - - mockServer := &dns.Server{PacketConn: conn, NotifyStartedFunc: started} - handle := &DnsMockHandle{id: time.Now().String(), mockServer: mockServer} - - dnsMux := &dnsMockHandler{muDomainsToAddresses: sync.RWMutex{}} - - if domainsMap != nil { - dnsMux.domainsToAddresses = domainsMap - } else { - dnsMux.domainsToAddresses = DomainsToAddresses - } - - if domainsErrorMap != nil { - dnsMux.domainsToErrors = domainsErrorMap - } - - mockServer.Handler = dnsMux - - go func() { - startResultChannel <- mockServer.ActivateAndServe() - }() - - err = <-startResultChannel - if err != nil { - close(startResultChannel) - return handle, err - } - - muDefaultResolver.RLock() - defaultResolver := net.DefaultResolver - muDefaultResolver.RUnlock() - mockResolver := &net.Resolver{ - PreferGo: true, - Dial: func(ctx context.Context, network, address string) (net.Conn, error) { - d := net.Dialer{} - - //Use write lock to prevent unsafe d.DialContext update of net.DefaultResolver - muDefaultResolver.Lock() - defer muDefaultResolver.Unlock() - return d.DialContext(ctx, network, mockServer.PacketConn.LocalAddr().String()) - }, - } - - muDefaultResolver.Lock() - net.DefaultResolver = mockResolver - muDefaultResolver.Unlock() - - handle.ShutdownDnsMock = func() error { - muDefaultResolver.Lock() - net.DefaultResolver = defaultResolver - muDefaultResolver.Unlock() - - return mockServer.Shutdown() - } - - return handle, nil -} - -func IsDnsRecordsAddrsEqualsTo(itemAddrs, addrs []string) bool { - return reflect.DeepEqual(itemAddrs, addrs) -} diff --git a/test/goplugins/test_goplugin.go b/test/goplugins/test_goplugin.go deleted file mode 100644 index 2ede4e9ae219..000000000000 --- a/test/goplugins/test_goplugin.go +++ /dev/null @@ -1,75 +0,0 @@ -package main - -import ( - "encoding/json" - "net/http" - - "github.com/TykTechnologies/tyk/ctx" - "github.com/TykTechnologies/tyk/user" -) - -// MyPluginPre checks if session is NOT present, adds custom header -// with initial URI path and will be used as "pre" custom MW -func MyPluginPre(rw http.ResponseWriter, r *http.Request) { - session := ctx.GetSession(r) - if session != nil { - rw.WriteHeader(http.StatusInternalServerError) - return - } - - rw.Header().Add("X-Initial-URI", r.URL.RequestURI()) -} - -// MyPluginAuthCheck does custom auth and will be used as -// "auth_check" custom MW -func MyPluginAuthCheck(rw http.ResponseWriter, r *http.Request) { - // perform auth (only one token "abc" is allowed) - token := r.Header.Get("Authorization") - if token != "abc" { - rw.Header().Add("X-Auth-Result", "failed") - rw.WriteHeader(http.StatusForbidden) - return - } - - // create session - session := &user.SessionState{ - OrgID: "default", - Alias: "abc-session", - } - ctx.SetSession(r, session, token, true) - - rw.Header().Add("X-Auth-Result", "OK") -} - -// MyPluginPostKeyAuth checks if session is present, adds custom header with session-alias -// and will be used as "post_key_auth" custom MW -func MyPluginPostKeyAuth(rw http.ResponseWriter, r *http.Request) { - session := ctx.GetSession(r) - if session == nil { - rw.Header().Add("X-Session-Alias", "not found") - rw.WriteHeader(http.StatusInternalServerError) - return - } - - rw.Header().Add("X-Session-Alias", session.Alias) -} - -// MyPluginPost prepares and sends reply, will be used as "post" custom MW -func MyPluginPost(rw http.ResponseWriter, r *http.Request) { - - replyData := map[string]interface{}{ - "message": "post message", - } - - jsonData, err := json.Marshal(replyData) - if err != nil { - rw.WriteHeader(http.StatusInternalServerError) - return - } - - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(http.StatusOK) - rw.Write(jsonData) -} - -func main() {} diff --git a/test/http.go b/test/http.go index 54ad8952f5db..f60876e1844f 100644 --- a/test/http.go +++ b/test/http.go @@ -7,28 +7,21 @@ import ( "io" "io/ioutil" "net/http" - "net/http/httptest" - "net/url" "strings" - "testing" "time" ) type TestCase struct { - Method string `json:",omitempty"` - Path string `json:",omitempty"` - BaseURL string `json:",omitempty"` + Method, Path string `json:",omitempty"` Domain string `json:",omitempty"` - Proto string `json:",omitempty"` Code int `json:",omitempty"` Data interface{} `json:",omitempty"` Headers map[string]string `json:",omitempty"` PathParams map[string]string `json:",omitempty"` - FormParams map[string]string `json:",omitempty"` Cookies []*http.Cookie `json:",omitempty"` Delay time.Duration `json:",omitempty"` BodyMatch string `json:",omitempty"` - BodyMatchFunc func([]byte) bool `json:",omitempty"` + BodyMatchFunc func([]byte) bool `json:",omitempty"` BodyNotMatch string `json:",omitempty"` HeadersMatch map[string]string `json:",omitempty"` HeadersNotMatch map[string]string `json:",omitempty"` @@ -41,9 +34,8 @@ type TestCase struct { ControlRequest bool `json:",omitempty"` } -func AssertResponse(resp *http.Response, tc *TestCase) error { +func AssertResponse(resp *http.Response, tc TestCase) error { body, _ := ioutil.ReadAll(resp.Body) - resp.Body = ioutil.NopCloser(bytes.NewBuffer(body)) defer resp.Body.Close() if tc.Code != 0 && resp.StatusCode != tc.Code { @@ -62,10 +54,6 @@ func AssertResponse(resp *http.Response, tc *TestCase) error { return fmt.Errorf("Response body did not pass BodyMatchFunc: %s", string(body)) } - if tc.Proto != "" && tc.Proto != resp.Proto { - return fmt.Errorf("Expected protocol `%s` got `%s`.", tc.Proto, resp.Proto) - } - for k, v := range tc.HeadersMatch { if resp.Header.Get(k) != v { return fmt.Errorf("Response header `%s` expected `%s` instead `%s`. %v", k, v, resp.Header.Get(k), resp.Header) @@ -98,7 +86,7 @@ func AssertResponse(resp *http.Response, tc *TestCase) error { return nil } -func ReqBodyReader(body interface{}) io.Reader { +func reqBodyReader(body interface{}) io.Reader { switch x := body.(type) { case []byte: return bytes.NewReader(x) @@ -117,39 +105,12 @@ func ReqBodyReader(body interface{}) io.Reader { } } -func NewRequest(tc *TestCase) (req *http.Request, err error) { +func NewRequest(tc TestCase) (req *http.Request) { if tc.Method == "" { tc.Method = "GET" } - if tc.Path == "" { - tc.Path = "/" - } - - if tc.Domain == "" { - tc.Domain = "127.0.0.1" - } - - if tc.Client == nil { - tc.Client = &http.Client{} - } - - uri := tc.Path - if tc.BaseURL != "" { - uri = tc.BaseURL + tc.Path - } - - if strings.HasPrefix(uri, "http") { - uri = strings.Replace(uri, "[::]", tc.Domain, 1) - uri = strings.Replace(uri, "127.0.0.1", tc.Domain, 1) - - req, err = http.NewRequest(tc.Method, uri, ReqBodyReader(tc.Data)) - if err != nil { - return - } - } else { - req = httptest.NewRequest(tc.Method, uri, ReqBodyReader(tc.Data)) - } + req, _ = http.NewRequest(tc.Method, tc.Path, reqBodyReader(tc.Data)) for k, v := range tc.Headers { req.Header.Add(k, v) @@ -159,157 +120,5 @@ func NewRequest(tc *TestCase) (req *http.Request, err error) { req.AddCookie(c) } - formParams := url.Values{} - for k, v := range tc.FormParams { - formParams.Add(k, v) - } - req.PostForm = formParams - req.Form = formParams - - return req, nil -} - -// nopCloser is just like ioutil's, but here to let us re-read the same -// buffer inside by moving position to the start every time we done with reading -type nopCloser struct { - io.ReadSeeker -} - -// Read just a wrapper around real Read which also moves position to the start if we get EOF -// to have it ready for next read-cycle -func (n nopCloser) Read(p []byte) (int, error) { - num, err := n.ReadSeeker.Read(p) - if err == io.EOF { // move to start to have it ready for next read cycle - n.Seek(0, io.SeekStart) - } - return num, err -} - -// Close is a no-op Close -func (n nopCloser) Close() error { - return nil -} - -func copyBody(body io.ReadCloser) io.ReadCloser { - // check if body was already read and converted into our nopCloser - if nc, ok := body.(nopCloser); ok { - // seek to the beginning to have it ready for next read - nc.Seek(0, io.SeekStart) - return body - } - - // body is http's io.ReadCloser - let's close it after we read data - defer body.Close() - - // body is http's io.ReadCloser - read it up until EOF - var bodyRead bytes.Buffer - io.Copy(&bodyRead, body) - - // use seek-able reader for further body usage - reusableBody := bytes.NewReader(bodyRead.Bytes()) - - return nopCloser{reusableBody} -} - -func copyResponse(r *http.Response) *http.Response { - if r.Body != nil { - r.Body = copyBody(r.Body) - } - return r -} - -type HTTPTestRunner struct { - Do func(*http.Request, *TestCase) (*http.Response, error) - Assert func(*http.Response, *TestCase) error - RequestBuilder func(*TestCase) (*http.Request, error) -} - -func (r HTTPTestRunner) Run(t testing.TB, testCases ...TestCase) (*http.Response, error) { - var lastResponse *http.Response - var lastError error - - if r.Do == nil { - panic("Request runner not implemented") - } - - if r.Assert == nil { - r.Assert = AssertResponse - } - - if r.RequestBuilder == nil { - r.RequestBuilder = NewRequest - } - - for ti, tc := range testCases { - req, err := r.RequestBuilder(&tc) - if err != nil { - t.Errorf("[%d] Request build error: %s", ti, err.Error()) - continue - } - lastResponse, lastError = r.Do(req, &tc) - tcJSON, _ := json.Marshal(tc) - - if lastError != nil { - if tc.ErrorMatch != "" { - if !strings.Contains(lastError.Error(), tc.ErrorMatch) { - t.Errorf("[%d] Expect error `%s` to contain `%s`. %s", ti, lastError.Error(), tc.ErrorMatch, string(tcJSON)) - } - } else { - t.Errorf("[%d] Connection error: %s. %s", ti, lastError.Error(), string(tcJSON)) - } - continue - } else if tc.ErrorMatch != "" { - t.Error("Expect error.", string(tcJSON)) - continue - } - - respCopy := copyResponse(lastResponse) - if lastError = r.Assert(respCopy, &tc); lastError != nil { - t.Errorf("[%d] %s. %s\n", ti, lastError.Error(), string(tcJSON)) - } - - delay := tc.Delay - - if delay > 0 { - time.Sleep(delay) - } - } - - return lastResponse, lastError -} - -func HttpHandlerRunner(handler http.HandlerFunc) func(*http.Request, *TestCase) (*http.Response, error) { - return func(r *http.Request, _ *TestCase) (*http.Response, error) { - rec := httptest.NewRecorder() - handler(rec, r) - return rec.Result(), nil - } -} - -func TestHttpHandler(t testing.TB, handle http.HandlerFunc, testCases ...TestCase) { - runner := HTTPTestRunner{ - Do: HttpHandlerRunner(handle), - } - runner.Run(t, testCases...) -} - -func HttpServerRequestBuilder(baseURL string) func(tc *TestCase) (*http.Request, error) { - return func(tc *TestCase) (*http.Request, error) { - tc.BaseURL = baseURL - return NewRequest(tc) - } -} - -func HttpServerRunner() func(*http.Request, *TestCase) (*http.Response, error) { - return func(r *http.Request, tc *TestCase) (*http.Response, error) { - return tc.Client.Do(r) - } -} - -func TestHttpServer(t testing.TB, baseURL string, testCases ...TestCase) { - runner := HTTPTestRunner{ - Do: HttpServerRunner(), - RequestBuilder: HttpServerRequestBuilder(baseURL), - } - runner.Run(t, testCases...) + return req } diff --git a/trace/handler.go b/trace/handler.go deleted file mode 100644 index dc688914e91a..000000000000 --- a/trace/handler.go +++ /dev/null @@ -1,13 +0,0 @@ -package trace - -import "net/http" - -// Handle returns a http.Handler with root opentracting setup. This should be -// the topmost handler. -func Handle(service string, h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - span, req := Root(service, r) - defer span.Finish() - h.ServeHTTP(w, req) - }) -} diff --git a/trace/jaeger/config.go b/trace/jaeger/config.go deleted file mode 100644 index 76168f9eac7b..000000000000 --- a/trace/jaeger/config.go +++ /dev/null @@ -1,24 +0,0 @@ -package jaeger - -import ( - "github.com/uber/jaeger-client-go/config" - "gopkg.in/yaml.v2" -) - -// Load returns jaeger configuration from opts. Please see jaeger configuration -// for details about the key value pairs -// -// https://github.com/jaegertracing/jaeger-client-go/blob/master/config/config.go#L37 -func Load(opts map[string]interface{}) (*config.Configuration, error) { - // The object opts is loaded from json. Instead of decoding every single value - // by had we marshal to then fro yaml. - // - // This is possible because the tags are the same for both json and yaml. - b, err := yaml.Marshal(opts) - if err != nil { - return nil, err - } - var c config.Configuration - err = yaml.Unmarshal(b, &c) - return &c, nil -} diff --git a/trace/jaeger/config_test.go b/trace/jaeger/config_test.go deleted file mode 100644 index e7d6ce853c56..000000000000 --- a/trace/jaeger/config_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package jaeger - -import ( - "encoding/json" - "reflect" - "testing" - - "github.com/uber/jaeger-client-go" - "github.com/uber/jaeger-client-go/config" -) - -const sampleConfig = `{ - "serviceName": "your_service_name", - "disabled": false, - "rpc_metrics": false, - "tags": null, - "sampler": { - "type": "const", - "param": 1, - "samplingServerURL": "", - "maxOperations": 0, - "samplingRefreshInterval": 0 - }, - "reporter": { - "queueSize": 0, - "BufferFlushInterval": 0, - "logSpans": true, - "localAgentHostPort": "", - "collectorEndpoint": "", - "user": "", - "password": "" - }, - "headers": null, - "baggage_restrictions": null, - "throttler": null -}` - -func TestLoad(t *testing.T) { - cfg := config.Configuration{ - ServiceName: "your_service_name", - Sampler: &config.SamplerConfig{ - Type: jaeger.SamplerTypeConst, - Param: 1, - }, - Reporter: &config.ReporterConfig{ - LogSpans: true, - }, - } - var o map[string]interface{} - err := json.Unmarshal([]byte(sampleConfig), &o) - if err != nil { - t.Fatal(err) - } - loadedConfig, err := Load(o) - if err != nil { - t.Fatal(err) - } - a := []interface{}{ - cfg.ServiceName, cfg.Disabled, - cfg.RPCMetrics, cfg.Tags, cfg.Sampler, - cfg.Reporter, cfg.Headers, cfg.BaggageRestrictions, - } - b := []interface{}{ - loadedConfig.ServiceName, loadedConfig.Disabled, - loadedConfig.RPCMetrics, loadedConfig.Tags, - loadedConfig.Sampler, loadedConfig.Reporter, - loadedConfig.Headers, loadedConfig.BaggageRestrictions, - } - if !reflect.DeepEqual(a, b) { - t.Errorf("expected %v\n got %v\n", cfg, loadedConfig) - } -} diff --git a/trace/jaeger/jaeger.go b/trace/jaeger/jaeger.go deleted file mode 100644 index 0f942b3b5187..000000000000 --- a/trace/jaeger/jaeger.go +++ /dev/null @@ -1,48 +0,0 @@ -package jaeger - -import ( - "io" - - "github.com/opentracing/opentracing-go" - "github.com/uber/jaeger-client-go/config" -) - -// Name is the name of this tracer. -const Name = "jaeger" - -type Trace struct { - opentracing.Tracer - io.Closer -} - -func (Trace) Name() string { - return Name -} - -type Logger interface { - Errorf(msg string, args ...interface{}) - Infof(msg string, args ...interface{}) -} - -type wrapLogger struct { - Logger -} - -func (w wrapLogger) Error(msg string) { - w.Errorf("%s", msg) -} - -// Init returns a implementation of tyk.Tracer using jaeger client. -func Init(service string, opts map[string]interface{}, log Logger) (*Trace, error) { - cfg, err := Load(opts) - if service != "" { - cfg.ServiceName = service - } - tr, cls, err := cfg.NewTracer( - config.Logger(&wrapLogger{Logger: log}), - ) - if err != nil { - return nil, err - } - return &Trace{Tracer: tr, Closer: cls}, nil -} diff --git a/trace/log.go b/trace/log.go deleted file mode 100644 index a7bad0d751f7..000000000000 --- a/trace/log.go +++ /dev/null @@ -1,46 +0,0 @@ -package trace - -import ( - "context" - "fmt" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/log" -) - -// Logrus implements a subset of logrus api to reduce friction when we want -// to log both on opentracing and on logrus. -type Logrus interface { - Debug(args ...interface{}) - Error(args ...interface{}) - Warning(args ...interface{}) - Info(args ...interface{}) -} - -// Debug creates debug log on both logrus and span. -func Debug(ctx context.Context, logrus Logrus, args ...interface{}) { - logrus.Debug(args...) - Log(ctx, log.String("DEBUG", fmt.Sprint(args...))) -} - -func Error(ctx context.Context, logrus Logrus, args ...interface{}) { - logrus.Error(args...) - Log(ctx, log.String("ERROR", fmt.Sprint(args...))) -} - -func Warning(ctx context.Context, logrus Logrus, args ...interface{}) { - logrus.Warning(args...) - Log(ctx, log.String("WARN", fmt.Sprint(args...))) -} - -// Log tries to check if there is a span in ctx and adds logs fields on the span. -func Log(ctx context.Context, fields ...log.Field) { - if span := opentracing.SpanFromContext(ctx); span != nil { - span.LogFields(fields...) - } -} - -func Info(ctx context.Context, logrus Logrus, args ...interface{}) { - logrus.Info(args...) - Log(ctx, log.String("INFO", fmt.Sprint(args...))) -} diff --git a/trace/manager.go b/trace/manager.go deleted file mode 100644 index 9bfd2128bb25..000000000000 --- a/trace/manager.go +++ /dev/null @@ -1,263 +0,0 @@ -package trace - -import ( - "context" - "errors" - "net/http" - "strconv" - "sync" - "sync/atomic" - - "github.com/TykTechnologies/tyk/request" - "github.com/opentracing/opentracing-go" -) - -var ErrManagerDisabled = errors.New("trace: trace is diabled") - -// we use a global manager to avoid manual management as for our use case we -// only deal with on tracing server at a time. -var manager = NewManager(nil) - -// serviceID key used to store the service name in request context.Context. -type serviceID = struct{} - -// SetServiceID returns context with service assigned to it. -func SetServiceID(ctx context.Context, service string) context.Context { - return context.WithValue(ctx, serviceID{}, service) -} - -// GetServiceID returns service name attched to context returns an empty string -// if the service name key is not found. -func GetServiceID(ctx context.Context) string { - if v := ctx.Value(serviceID{}); v != nil { - return v.(string) - } - return "" -} - -// Logger defines api for logging messages by the OpenTracer struct. This is a -// workaround to avoid trying this to logrus -type Logger interface { - Errorf(format string, args ...interface{}) - Info(args ...interface{}) - Infof(format string, args ...interface{}) -} - -// OpenTracer manages initializing,storage and retrieving on multiple tracers -// based on service names. -type OpenTracer struct { - mu sync.RWMutex - services map[string]Tracer - log Logger - enabled atomic.Value - config Config -} - -type Config struct { - Name string - Opts map[string]interface{} -} - -// NewManager returns a new opentrace manager. If log is not nil it will be used -// to log errors and info by the manager. -func NewManager(log Logger) *OpenTracer { - return &OpenTracer{log: log, services: make(map[string]Tracer)} -} - -// Get returns a tracer for a given service, it returns a NoopTracer if there is -// no tracer for the service found. -func (o *OpenTracer) Get(service string) Tracer { - o.mu.RLock() - t, ok := o.services[service] - o.mu.RUnlock() - if !ok { - if o.log != nil { - o.log.Info(service, "not found") - } - return NoopTracer{} - } - return t -} - -// Get returns a tracer stored on the global trace manager. -func Get(service string) Tracer { - return manager.Get(service) -} - -// GetOk like Get but instead of returning NoopTracer for missing tracer it -// returns nil and false when the service tracer wasn't found. -func (o *OpenTracer) GetOk(service string) (Tracer, bool) { - o.mu.RLock() - t, ok := o.services[service] - o.mu.RUnlock() - return t, ok -} - -// Set saves tr using service as key on o. -func (o *OpenTracer) Set(service string, tr Tracer) { - o.mu.Lock() - o.services[service] = tr - o.mu.Unlock() -} - -// Close calls Close on the active tracer. -func (o *OpenTracer) Close() error { - o.mu.RLock() - for _, v := range o.services { - if err := v.Close(); err != nil { - return err - } - } - o.mu.RUnlock() - o.mu.Lock() - o.services = make(map[string]Tracer) - o.mu.Unlock() - return nil -} - -// Close calls Close on the global tace manager. -func Close() error { - return manager.Close() -} - -// IsEnabled returns true if the manager is enabled. -func (o *OpenTracer) IsEnabled() bool { - ok := o.enabled.Load() - if ok != nil { - return ok.(bool) - } - return false -} - -// IsEnabled returns true if the global trace manager is enabled. -func IsEnabled() bool { - return manager.IsEnabled() -} - -// Enable sets o to enabled state. -func (o *OpenTracer) Enable() { - o.enabled.Store(true) -} - -// Enable sets the global manager to enabled. -func Enable() { - manager.Enable() -} - -// Disable sets o to disabled state. -func (o *OpenTracer) Disable() { - o.enabled.Store(false) -} - -// Disable disables the global trace manager. -func Disable() { - manager.Disable() -} - -// SetLogger sets log as the default logger for o. -func (o *OpenTracer) SetLogger(log Logger) { - o.mu.Lock() - o.log = log - o.mu.Unlock() -} - -// AddTracer initializes a tracer based on the configuration stored in o for the -// given service name and caches. This does donthing when there is already a -// tracer for the given service. -func (o *OpenTracer) AddTracer(service string) error { - _, ok := o.GetOk(service) - if !ok { - tr, err := Init(o.config.Name, service, o.config.Opts, o.log) - if err != nil { - if o.log != nil { - o.log.Errorf("%v", err) - } - return err - } - o.Set(service, tr) - } - return nil -} - -// AddTracer initialize a tracer for the service. -func AddTracer(service string) error { - if !manager.IsEnabled() { - return ErrManagerDisabled - } - return manager.AddTracer(service) -} - -func SetLogger(log Logger) { - manager.SetLogger(log) -} - -func (o *OpenTracer) SetupTracing(name string, opts map[string]interface{}) { - o.config.Name = name - o.config.Opts = opts -} - -func SetupTracing(name string, opts map[string]interface{}) { - manager.SetupTracing(name, opts) - manager.Enable() -} - -func Root(service string, r *http.Request) (opentracing.Span, *http.Request) { - tr := Get(service) - mainCtx, err := Extract(tr, r.Header) - tags := opentracing.Tags{ - "from_ip": request.RealIP(r), - "method": r.Method, - "endpoint": r.URL.Path, - "raw_url": r.URL.String(), - "size": strconv.Itoa(int(r.ContentLength)), - } - if err != nil { - // TODO log this error? - // We just create a new span here so the log should be a warning. - span, ctx := opentracing.StartSpanFromContextWithTracer(r.Context(), - tr, - service, tags) - return span, r.WithContext(SetServiceID(ctx, service)) - } - span, ctx := opentracing.StartSpanFromContextWithTracer(r.Context(), - tr, - service, - opentracing.ChildOf(mainCtx), tags) - return span, r.WithContext(SetServiceID(ctx, service)) -} - -// Span creates a new span for the given ops. If tracing is disabled in this ctx -// then a noop span is created and the same ctx is returned. -// -// Note that the returned context contains the returned span as active span. So -// any spans created form the returned context will be children of the returned -// span. -func Span(ctx context.Context, ops string, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) { - return opentracing.StartSpanFromContextWithTracer(ctx, - Get(GetServiceID(ctx)), - ops, opts...) -} - -func Extract(tr Tracer, h http.Header) (opentracing.SpanContext, error) { - return tr.Extract( - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(h), - ) -} - -func ExtractFromContext(ctx context.Context, h http.Header) (opentracing.SpanContext, error) { - return Extract(Get(GetServiceID(ctx)), h) -} - -func Inject(service string, span opentracing.Span, h http.Header) error { - tr := Get(service) - return tr.Inject( - span.Context(), - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(h), - ) -} - -func InjectFromContext(ctx context.Context, span opentracing.Span, h http.Header) error { - return Inject(GetServiceID(ctx), span, h) -} diff --git a/trace/openzipkin/config.go b/trace/openzipkin/config.go deleted file mode 100644 index e2d626dd2fb4..000000000000 --- a/trace/openzipkin/config.go +++ /dev/null @@ -1,31 +0,0 @@ -package openzipkin - -import "encoding/json" - -type Config struct { - Reporter Reporter `json:"reporter"` - Sampler Sampler `json:"sampler"` -} - -type Reporter struct { - URL string `json:"url"` - BatchSize int `json:"batch_size"` - MaxBacklog int `json:"max_backlog"` -} - -type Sampler struct { - Name string `json:"name"` - Rate float64 `json:"rate"` - Salt int64 `json:"salt"` - Mod uint64 `json:"mod"` -} - -func Load(opts map[string]interface{}) (*Config, error) { - b, err := json.Marshal(opts) - if err != nil { - return nil, err - } - var c Config - err = json.Unmarshal(b, &c) - return &c, nil -} diff --git a/trace/openzipkin/zipkin.go b/trace/openzipkin/zipkin.go deleted file mode 100644 index 91d3b99800f5..000000000000 --- a/trace/openzipkin/zipkin.go +++ /dev/null @@ -1,293 +0,0 @@ -package openzipkin - -import ( - "errors" - "fmt" - "strings" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/log" - "github.com/openzipkin/zipkin-go" - "github.com/openzipkin/zipkin-go/model" - "github.com/openzipkin/zipkin-go/propagation/b3" - "github.com/openzipkin/zipkin-go/reporter" - "github.com/openzipkin/zipkin-go/reporter/http" -) - -var _ opentracing.Tracer = (*zipkinTracer)(nil) -var _ opentracing.SpanContext = (*spanContext)(nil) -var _ opentracing.Span = (*Span)(nil) - -const Name = "zipkin" - -type Span struct { - span zipkin.Span - tr *zipkinTracer -} - -func (s Span) Context() opentracing.SpanContext { - return spanContext{s.span.Context()} -} - -func (s Span) Finish() { - s.span.Finish() -} - -func (s Span) FinishWithOptions(opts opentracing.FinishOptions) { - s.span.Finish() -} - -func (s Span) SetOperationName(operationName string) opentracing.Span { - s.span.SetName(operationName) - return s -} - -func (s Span) SetTag(key string, value interface{}) opentracing.Span { - s.span.Tag(key, fmt.Sprint(value)) - return s -} - -func (s Span) LogFields(fields ...log.Field) { - now := time.Now() - lg := &logEncoder{h: func(key string, value interface{}) { - s.span.Annotate(now, fmt.Sprintf("%s %s", key, value)) - }} - for _, field := range fields { - field.Marshal(lg) - } -} - -type logEncoder struct { - h func(string, interface{}) -} - -func (e *logEncoder) emit(key string, value interface{}) { - if e.h != nil { - e.h(key, value) - } -} -func (e *logEncoder) EmitString(key, value string) { e.emit(key, value) } -func (e *logEncoder) EmitBool(key string, value bool) { e.emit(key, value) } -func (e *logEncoder) EmitInt(key string, value int) { e.emit(key, value) } -func (e *logEncoder) EmitInt32(key string, value int32) { e.emit(key, value) } -func (e *logEncoder) EmitInt64(key string, value int64) { e.emit(key, value) } -func (e *logEncoder) EmitUint32(key string, value uint32) { e.emit(key, value) } -func (e *logEncoder) EmitUint64(key string, value uint64) { e.emit(key, value) } -func (e *logEncoder) EmitFloat32(key string, value float32) { e.emit(key, value) } -func (e *logEncoder) EmitFloat64(key string, value float64) { e.emit(key, value) } -func (e *logEncoder) EmitObject(key string, value interface{}) { e.emit(key, value) } -func (e *logEncoder) EmitLazyLogger(value log.LazyLogger) {} - -func (s Span) LogKV(alternatingKeyValues ...interface{}) {} -func (s Span) SetBaggageItem(restrictedKey, value string) opentracing.Span { return s } -func (Span) BaggageItem(restrictedKey string) string { return "" } -func (s Span) Tracer() opentracing.Tracer { return s.tr } -func (s Span) LogEvent(event string) {} -func (s Span) LogEventWithPayload(event string, payload interface{}) {} -func (s Span) Log(data opentracing.LogData) {} - -type spanContext struct { - model.SpanContext -} - -func (spanContext) ForeachBaggageItem(handler func(k, v string) bool) {} - -type extractor interface { - extract(carrier interface{}) (spanContext, error) -} - -var emptyContext spanContext - -func extractHTTPHeader(carrier interface{}) (spanContext, error) { - c, ok := carrier.(opentracing.HTTPHeadersCarrier) - if !ok { - return emptyContext, opentracing.ErrInvalidCarrier - } - var ( - traceIDHeader string - spanIDHeader string - parentSpanIDHeader string - sampledHeader string - flagsHeader string - singleHeader string - ) - err := c.ForeachKey(func(key, val string) error { - switch strings.ToLower(key) { - case b3.TraceID: - traceIDHeader = val - case b3.SpanID: - spanIDHeader = val - case b3.ParentSpanID: - parentSpanIDHeader = val - case b3.Sampled: - sampledHeader = val - case b3.Flags: - flagsHeader = val - case b3.Context: - singleHeader = val - } - return nil - }) - if err != nil { - return emptyContext, err - } - if singleHeader != "" { - ctx, err := b3.ParseSingleHeader(singleHeader) - if err != nil { - return emptyContext, err - } - return spanContext{*ctx}, nil - } - ctx, err := b3.ParseHeaders( - traceIDHeader, spanIDHeader, parentSpanIDHeader, - sampledHeader, flagsHeader, - ) - if err != nil { - return emptyContext, err - } - return spanContext{*ctx}, nil -} - -type extractorFn func(carrier interface{}) (spanContext, error) - -func (fn extractorFn) extract(carrier interface{}) (spanContext, error) { - return fn(carrier) -} - -type injector interface { - inject(ctx spanContext, carrier interface{}) error -} - -func injectHTTPHeaders(ctx spanContext, carrier interface{}) error { - c, ok := carrier.(opentracing.HTTPHeadersCarrier) - if !ok { - return opentracing.ErrInvalidCarrier - } - if ctx == emptyContext { - return nil - } - c.Set(b3.Context, b3.BuildSingleHeader(ctx.SpanContext)) - return nil -} - -type injectorFn func(ctx spanContext, carrier interface{}) error - -func (fn injectorFn) inject(ctx spanContext, carrier interface{}) error { - return fn(ctx, carrier) -} - -type zipkinTracer struct { - zip *zipkin.Tracer - extractors map[interface{}]extractor - injectors map[interface{}]injector -} - -func NewTracer(zip *zipkin.Tracer) *zipkinTracer { - return &zipkinTracer{ - zip: zip, - extractors: map[interface{}]extractor{ - opentracing.HTTPHeaders: extractorFn(extractHTTPHeader), - }, - injectors: map[interface{}]injector{ - opentracing.HTTPHeaders: injectorFn(injectHTTPHeaders), - }, - } -} - -func (z *zipkinTracer) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span { - var o []zipkin.SpanOption - if len(opts) > 0 { - var os opentracing.StartSpanOptions - for _, opt := range opts { - opt.Apply(&os) - } - if len(os.Tags) > 0 { - t := make(map[string]string) - for k, v := range os.Tags { - t[k] = fmt.Sprint(v) - } - o = append(o, zipkin.Tags(t)) - } - for _, ref := range os.References { - switch ref.Type { - case opentracing.ChildOfRef: - sp := ref.ReferencedContext.(spanContext) - o = append(o, zipkin.Parent( - sp.SpanContext, - )) - } - } - } - sp := z.zip.StartSpan(operationName, o...) - return Span{tr: z, span: sp} -} - -func (z *zipkinTracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { - if x, ok := z.extractors[format]; ok { - return x.extract(carrier) - } - return nil, opentracing.ErrUnsupportedFormat -} - -func (z *zipkinTracer) Inject(ctx opentracing.SpanContext, format interface{}, carrier interface{}) error { - c, ok := ctx.(spanContext) - if !ok { - return opentracing.ErrInvalidSpanContext - } - if x, ok := z.injectors[format]; ok { - return x.inject(c, carrier) - } - return opentracing.ErrUnsupportedFormat -} - -type Tracer struct { - opentracing.Tracer - reporter.Reporter -} - -func (Tracer) Name() string { - return Name -} - -func Init(service string, opts map[string]interface{}) (*Tracer, error) { - c, err := Load(opts) - if err != nil { - return nil, err - } - if c.Reporter.URL == "" { - return nil, errors.New("zipkin: missing url") - } - r := http.NewReporter(c.Reporter.URL) - endpoint, err := zipkin.NewEndpoint(service, "") - if err != nil { - return nil, err - } - sampler, err := getSampler(c.Sampler) - if err != nil { - return nil, err - } - tr, err := zipkin.NewTracer(r, - zipkin.WithLocalEndpoint(endpoint), - zipkin.WithSampler(sampler), - ) - if err != nil { - return nil, err - } - return &Tracer{Tracer: NewTracer(tr), Reporter: r}, nil -} - -func getSampler(s Sampler) (zipkin.Sampler, error) { - if s.Name == "" { - return zipkin.AlwaysSample, nil - } - switch s.Name { - case "boundary": - return zipkin.NewBoundarySampler(s.Rate, s.Salt) - case "count": - return zipkin.NewCountingSampler(s.Rate) - case "mod": - return zipkin.NewModuloSampler(s.Mod), nil - } - return nil, fmt.Errorf("zipkin: unknown sampler %s", s.Name) -} diff --git a/trace/trace.go b/trace/trace.go deleted file mode 100644 index c3037af84292..000000000000 --- a/trace/trace.go +++ /dev/null @@ -1,41 +0,0 @@ -package trace - -import ( - "io" - - "github.com/TykTechnologies/tyk/trace/jaeger" - "github.com/TykTechnologies/tyk/trace/openzipkin" - "github.com/opentracing/opentracing-go" -) - -type Tracer interface { - Name() string - opentracing.Tracer - io.Closer -} - -// NoopTracer wraps opentracing.NoopTracer to satisfy Tracer interface. -type NoopTracer struct { - opentracing.NoopTracer -} - -// Close implements io.Closer interface by doing nothing. -func (n NoopTracer) Close() error { - return nil -} - -func (n NoopTracer) Name() string { - return "NoopTracer" -} - -// Init returns a tracer for a given name. -func Init(name string, service string, opts map[string]interface{}, logger Logger) (Tracer, error) { - switch name { - case jaeger.Name: - return jaeger.Init(service, opts, logger) - case openzipkin.Name: - return openzipkin.Init(service, opts) - default: - return NoopTracer{}, nil - } -} diff --git a/trace/trace_test.go b/trace/trace_test.go deleted file mode 100644 index 34f21ebd60bd..000000000000 --- a/trace/trace_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package trace - -import "testing" - -func TestInit(t *testing.T) { - t.Run("returns noop tracer when no match", func(ts *testing.T) { - o, err := Init("noop", "noop", nil, nil) - if err != nil { - ts.Fatal("expected err to be nil") - } - _ = o.(NoopTracer) - }) -} diff --git a/gateway/tracing.go b/tracing.go similarity index 90% rename from gateway/tracing.go rename to tracing.go index a963b5b47a5e..51b295149514 100644 --- a/gateway/tracing.go +++ b/tracing.go @@ -1,4 +1,4 @@ -package gateway +package main import ( "bytes" @@ -29,21 +29,25 @@ func (tr *traceHttpRequest) toRequest() *http.Request { return r } -// TraceRequest is for tracing an HTTP request -// swagger:model TraceRequest +// Tracing HTTP request +// +// swagger:model type traceRequest struct { Request *traceHttpRequest `json:"request"` Spec *apidef.APIDefinition `json:"spec"` } -// TraceResponse is for tracing an HTTP response -// swagger:model TraceResponse +// Tracing HTTP response +// +// swagger:model type traceResponse struct { Message string `json:"message"` Response string `json:"response"` Logs string `json:"logs"` } +// swagger:operation POST /trace trace trace +// // Tracing request // Used to test API definition by sending sample request, // and analysisng output of both response and logs @@ -108,7 +112,7 @@ func traceHandler(w http.ResponseWriter, r *http.Request) { loader := &APIDefinitionLoader{} spec := loader.MakeSpec(traceReq.Spec, logrus.NewEntry(logger)) - chainObj := processSpec(spec, nil, &redisStore, &redisOrgStore, &healthStore, &rpcAuthStore, &rpcOrgStore, subrouter, logrus.NewEntry(logger)) + chainObj := processSpec(spec, nil, redisStore, redisOrgStore, healthStore, rpcAuthStore, rpcOrgStore, subrouter, logrus.NewEntry(logger)) spec.middlewareChain = chainObj.ThisHandler if chainObj.ThisHandler == nil { diff --git a/tyk.conf.example b/tyk.conf.example index 64673c54eabe..537ffbdab1e8 100644 --- a/tyk.conf.example +++ b/tyk.conf.example @@ -21,11 +21,6 @@ "type": "", "ignored_ips": [] }, - "dns_cache": { - "enabled": false, - "ttl": 3600, - "check_interval": 60 - }, "optimisations_use_async_session_write": true, "allow_master_keys": false, "policies": { diff --git a/user/session.go b/user/session.go index abb2010611e3..8f6529994b76 100644 --- a/user/session.go +++ b/user/session.go @@ -1,8 +1,6 @@ package user import ( - "crypto/md5" - "fmt" "time" "github.com/TykTechnologies/tyk/config" @@ -48,8 +46,6 @@ type AccessDefinition struct { // SessionState objects represent a current API session, mainly used for rate limiting. // There's a data structure that's based on this and it's used for Protocol Buffer support, make sure to update "coprocess/proto/coprocess_session_state.proto" and generate the bindings using: cd coprocess/proto && ./update_bindings.sh -// -// swagger:model type SessionState struct { LastCheck int64 `json:"last_check" msg:"last_check"` Allowance float64 `json:"allowance" msg:"allowance"` @@ -96,10 +92,6 @@ type SessionState struct { keyHash string } -func (s *SessionState) MD5Hash() string { - return fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%+v", s)))) -} - func (s *SessionState) KeyHash() string { if s.keyHash == "" { panic("KeyHash cache not found. You should call `SetKeyHash` before.") diff --git a/bin/ci-benchmark.sh b/utils/ci-benchmark.sh similarity index 100% rename from bin/ci-benchmark.sh rename to utils/ci-benchmark.sh diff --git a/utils/ci-test.sh b/utils/ci-test.sh new file mode 100755 index 000000000000..2243d9de3a93 --- /dev/null +++ b/utils/ci-test.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +set -e + +MATRIX=( + "-tags 'coprocess python'" + "-tags 'coprocess grpc'" +) + +# print a command and execute it +show() { + echo "$@" >&2 + eval "$@" +} + +fatal() { + echo "$@" >&2 + exit 1 +} + +PKGS="$(go list ./... | grep -v /vendor/)" + +i=0 + +go get -t + +# need to do per-pkg because go test doesn't support a single coverage +# profile for multiple pkgs +for pkg in $PKGS; do + for opts in "${MATRIX[@]}"; do + show go test -timeout 2m -v -coverprofile=test-$i.cov $opts $pkg \ + || fatal "go test errored" + let i++ || true + done +done + +if [[ ! $LATEST_GO ]]; then + echo "Skipping race, checks, and coverage report" + exit 0 +fi + +go test -race $PKGS || fatal "go test -race failed" + +for opts in "${MATRIX[@]}"; do + show go vet $opts $PKGS || fatal "go vet errored" +done + +# Includes all top-level files and dirs that don't start with a dot +# (hidden). Also excludes all of vendor/. +GOFILES=$(find * -name '*.go' -not -path 'vendor/*') + +FMT_FILES="$(gofmt -s -l $GOFILES)" +if [[ -n $FMT_FILES ]]; then + fatal "Run 'gofmt -s -w' on these files:\n$FMT_FILES" +fi + +IMP_FILES="$(goimports -local github.com/TykTechnologies -l $GOFILES)" +if [[ -n $IMP_FILES ]]; then + fatal "Run 'goimports -local github.com/TykTechnologies -w' on these files:\n$IMP_FILES" +fi diff --git a/bin/dist_build.sh b/utils/dist_build.sh similarity index 100% rename from bin/dist_build.sh rename to utils/dist_build.sh diff --git a/bin/dist_push.sh b/utils/dist_push.sh similarity index 100% rename from bin/dist_push.sh rename to utils/dist_push.sh diff --git a/bin/set-version.sh b/utils/set-version.sh similarity index 100% rename from bin/set-version.sh rename to utils/set-version.sh diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md deleted file mode 100644 index b888e20abaa5..000000000000 --- a/vendor/github.com/Masterminds/semver/CHANGELOG.md +++ /dev/null @@ -1,86 +0,0 @@ -# 1.4.2 (2018-04-10) - -## Changed -- #72: Updated the docs to point to vert for a console appliaction -- #71: Update the docs on pre-release comparator handling - -## Fixed -- #70: Fix the handling of pre-releases and the 0.0.0 release edge case - -# 1.4.1 (2018-04-02) - -## Fixed -- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) - -# 1.4.0 (2017-10-04) - -## Changed -- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) - -# 1.3.1 (2017-07-10) - -## Fixed -- Fixed #57: number comparisons in prerelease sometimes inaccurate - -# 1.3.0 (2017-05-02) - -## Added -- #45: Added json (un)marshaling support (thanks @mh-cbon) -- Stability marker. See https://masterminds.github.io/stability/ - -## Fixed -- #51: Fix handling of single digit tilde constraint (thanks @dgodd) - -## Changed -- #55: The godoc icon moved from png to svg - -# 1.2.3 (2017-04-03) - -## Fixed -- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * - -# Release 1.2.2 (2016-12-13) - -## Fixed -- #34: Fixed issue where hyphen range was not working with pre-release parsing. - -# Release 1.2.1 (2016-11-28) - -## Fixed -- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" - properly. - -# Release 1.2.0 (2016-11-04) - -## Added -- #20: Added MustParse function for versions (thanks @adamreese) -- #15: Added increment methods on versions (thanks @mh-cbon) - -## Fixed -- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and - might not satisfy the intended compatibility. The change here ignores pre-releases - on constraint checks (e.g., ~ or ^) when a pre-release is not part of the - constraint. For example, `^1.2.3` will ignore pre-releases while - `^1.2.3-alpha` will include them. - -# Release 1.1.1 (2016-06-30) - -## Changed -- Issue #9: Speed up version comparison performance (thanks @sdboyer) -- Issue #8: Added benchmarks (thanks @sdboyer) -- Updated Go Report Card URL to new location -- Updated Readme to add code snippet formatting (thanks @mh-cbon) -- Updating tagging to v[SemVer] structure for compatibility with other tools. - -# Release 1.1.0 (2016-03-11) - -- Issue #2: Implemented validation to provide reasons a versions failed a - constraint. - -# Release 1.0.1 (2015-12-31) - -- Fixed #1: * constraint failing on valid versions. - -# Release 1.0.0 (2015-10-20) - -- Initial release diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt deleted file mode 100644 index 9ff7da9c48b6..000000000000 --- a/vendor/github.com/Masterminds/semver/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014-2019, Matt Butcher and Matt Farina - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile deleted file mode 100644 index a7a1b4e36de9..000000000000 --- a/vendor/github.com/Masterminds/semver/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -.PHONY: setup -setup: - go get -u gopkg.in/alecthomas/gometalinter.v1 - gometalinter.v1 --install - -.PHONY: test -test: validate lint - @echo "==> Running tests" - go test -v - -.PHONY: validate -validate: - @echo "==> Running static validations" - @gometalinter.v1 \ - --disable-all \ - --enable deadcode \ - --severity deadcode:error \ - --enable gofmt \ - --enable gosimple \ - --enable ineffassign \ - --enable misspell \ - --enable vet \ - --tests \ - --vendor \ - --deadline 60s \ - ./... || exit_code=1 - -.PHONY: lint -lint: - @echo "==> Running linters" - @gometalinter.v1 \ - --disable-all \ - --enable golint \ - --vendor \ - --deadline 60s \ - ./... || : diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md deleted file mode 100644 index af845f12dc8f..000000000000 --- a/vendor/github.com/Masterminds/semver/README.md +++ /dev/null @@ -1,186 +0,0 @@ -# SemVer - -The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: - -* Parse semantic versions -* Sort semantic versions -* Check if a semantic version fits within a set of constraints -* Optionally work with a `v` prefix - -[![Stability: -Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) -[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) - -If you are looking for a command line tool for version comparisons please see -[vert](https://github.com/Masterminds/vert) which uses this library. - -## Parsing Semantic Versions - -To parse a semantic version use the `NewVersion` function. For example, - -```go - v, err := semver.NewVersion("1.2.3-beta.1+build345") -``` - -If there is an error the version wasn't parseable. The version object has methods -to get the parts of the version, compare it to other versions, convert the -version back into a string, and get the original string. For more details -please see the [documentation](https://godoc.org/github.com/Masterminds/semver). - -## Sorting Semantic Versions - -A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/) -package from the standard library. For example, - -```go - raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} - vs := make([]*semver.Version, len(raw)) - for i, r := range raw { - v, err := semver.NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - vs[i] = v - } - - sort.Sort(semver.Collection(vs)) -``` - -## Checking Version Constraints - -Checking a version against version constraints is one of the most featureful -parts of the package. - -```go - c, err := semver.NewConstraint(">= 1.2.3") - if err != nil { - // Handle constraint not being parseable. - } - - v, _ := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - // Check if the version meets the constraints. The a variable will be true. - a := c.Check(v) -``` - -## Basic Comparisons - -There are two elements to the comparisons. First, a comparison string is a list -of comma separated and comparisons. These are then separated by || separated or -comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a -comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -greater than or equal to 4.2.3. - -The basic comparisons are: - -* `=`: equal (aliased to no operator) -* `!=`: not equal -* `>`: greater than -* `<`: less than -* `>=`: greater than or equal to -* `<=`: less than or equal to - -## Working With Pre-release Versions - -Pre-releases, for those not familiar with them, are used for software releases -prior to stable or generally available releases. Examples of pre-releases include -development, alpha, beta, and release candidate releases. A pre-release may be -a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the -order of precidence, pre-releases come before their associated releases. In this -example `1.2.3-beta.1 < 1.2.3`. - -According to the Semantic Version specification pre-releases may not be -API compliant with their release counterpart. It says, - -> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. - -SemVer comparisons without a pre-release comparator will skip pre-release versions. -For example, `>=1.2.3` will skip pre-releases when looking at a list of releases -while `>=1.2.3-0` will evaluate and find pre-releases. - -The reason for the `0` as a pre-release version in the example comparison is -because pre-releases can only contain ASCII alphanumerics and hyphens (along with -`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/)) - -Understanding ASCII sort ordering is important because A-Z comes before a-z. That -means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case -sensitivity doesn't apply here. This is due to ASCII sort ordering which is what -the spec specifies. - -## Hyphen Range Comparisons - -There are multiple methods to handle ranges and the first is hyphens ranges. -These look like: - -* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` -* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` - -## Wildcards In Comparisons - -The `x`, `X`, and `*` characters can be used as a wildcard character. This works -for all comparison operators. When used on the `=` operator it falls -back to the pack level comparison (see tilde below). For example, - -* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -* `>= 1.2.x` is equivalent to `>= 1.2.0` -* `<= 2.x` is equivalent to `< 3` -* `*` is equivalent to `>= 0.0.0` - -## Tilde Range Comparisons (Patch) - -The tilde (`~`) comparison operator is for patch level ranges when a minor -version is specified and major level changes when the minor number is missing. -For example, - -* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` -* `~1` is equivalent to `>= 1, < 2` -* `~2.3` is equivalent to `>= 2.3, < 2.4` -* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -* `~1.x` is equivalent to `>= 1, < 2` - -## Caret Range Comparisons (Major) - -The caret (`^`) comparison operator is for major level changes. This is useful -when comparisons of API versions as a major change is API breaking. For example, - -* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` -* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` -* `^2.3` is equivalent to `>= 2.3, < 3` -* `^2.x` is equivalent to `>= 2.0.0, < 3` - -# Validation - -In addition to testing a version against a constraint, a version can be validated -against a constraint. When validation fails a slice of errors containing why a -version didn't meet the constraint is returned. For example, - -```go - c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") - if err != nil { - // Handle constraint not being parseable. - } - - v, _ := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - - // Validate a version against a constraint. - a, msgs := c.Validate(v) - // a is false - for _, m := range msgs { - fmt.Println(m) - - // Loops over the errors which would read - // "1.3 is greater than 1.2.3" - // "1.3 is less than 1.4" - } -``` - -# Contribute - -If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) -or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/appveyor.yml b/vendor/github.com/Masterminds/semver/appveyor.yml deleted file mode 100644 index b2778df15a40..000000000000 --- a/vendor/github.com/Masterminds/semver/appveyor.yml +++ /dev/null @@ -1,44 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\semver -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -install: - - go version - - go env - - go get -u gopkg.in/alecthomas/gometalinter.v1 - - set PATH=%PATH%;%GOPATH%\bin - - gometalinter.v1.exe --install - -build_script: - - go install -v ./... - -test_script: - - "gometalinter.v1 \ - --disable-all \ - --enable deadcode \ - --severity deadcode:error \ - --enable gofmt \ - --enable gosimple \ - --enable ineffassign \ - --enable misspell \ - --enable vet \ - --tests \ - --vendor \ - --deadline 60s \ - ./... || exit_code=1" - - "gometalinter.v1 \ - --disable-all \ - --enable golint \ - --vendor \ - --deadline 60s \ - ./... || :" - - go test -v - -deploy: off diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go deleted file mode 100644 index a78235895fdc..000000000000 --- a/vendor/github.com/Masterminds/semver/collection.go +++ /dev/null @@ -1,24 +0,0 @@ -package semver - -// Collection is a collection of Version instances and implements the sort -// interface. See the sort package for more details. -// https://golang.org/pkg/sort/ -type Collection []*Version - -// Len returns the length of a collection. The number of Version instances -// on the slice. -func (c Collection) Len() int { - return len(c) -} - -// Less is needed for the sort interface to compare two Version objects on the -// slice. If checks if one is less than the other. -func (c Collection) Less(i, j int) bool { - return c[i].LessThan(c[j]) -} - -// Swap is needed for the sort interface to replace the Version objects -// at two different positions in the slice. -func (c Collection) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go deleted file mode 100644 index 2f3d7793afd2..000000000000 --- a/vendor/github.com/Masterminds/semver/constraints.go +++ /dev/null @@ -1,406 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "regexp" - "strings" -) - -// Constraints is one or more constraint that a semantic version can be -// checked against. -type Constraints struct { - constraints [][]*constraint -} - -// NewConstraint returns a Constraints instance that a Version instance can -// be checked against. If there is a parse error it will be returned. -func NewConstraint(c string) (*Constraints, error) { - - // Rewrite - ranges into a comparison operation. - c = rewriteRange(c) - - ors := strings.Split(c, "||") - or := make([][]*constraint, len(ors)) - for k, v := range ors { - cs := strings.Split(v, ",") - result := make([]*constraint, len(cs)) - for i, s := range cs { - pc, err := parseConstraint(s) - if err != nil { - return nil, err - } - - result[i] = pc - } - or[k] = result - } - - o := &Constraints{constraints: or} - return o, nil -} - -// Check tests if a version satisfies the constraints. -func (cs Constraints) Check(v *Version) bool { - // loop over the ORs and check the inner ANDs - for _, o := range cs.constraints { - joy := true - for _, c := range o { - if !c.check(v) { - joy = false - break - } - } - - if joy { - return true - } - } - - return false -} - -// Validate checks if a version satisfies a constraint. If not a slice of -// reasons for the failure are returned in addition to a bool. -func (cs Constraints) Validate(v *Version) (bool, []error) { - // loop over the ORs and check the inner ANDs - var e []error - for _, o := range cs.constraints { - joy := true - for _, c := range o { - if !c.check(v) { - em := fmt.Errorf(c.msg, v, c.orig) - e = append(e, em) - joy = false - } - } - - if joy { - return true, []error{} - } - } - - return false, e -} - -var constraintOps map[string]cfunc -var constraintMsg map[string]string -var constraintRegex *regexp.Regexp - -func init() { - constraintOps = map[string]cfunc{ - "": constraintTildeOrEqual, - "=": constraintTildeOrEqual, - "!=": constraintNotEqual, - ">": constraintGreaterThan, - "<": constraintLessThan, - ">=": constraintGreaterThanEqual, - "=>": constraintGreaterThanEqual, - "<=": constraintLessThanEqual, - "=<": constraintLessThanEqual, - "~": constraintTilde, - "~>": constraintTilde, - "^": constraintCaret, - } - - constraintMsg = map[string]string{ - "": "%s is not equal to %s", - "=": "%s is not equal to %s", - "!=": "%s is equal to %s", - ">": "%s is less than or equal to %s", - "<": "%s is greater than or equal to %s", - ">=": "%s is less than %s", - "=>": "%s is less than %s", - "<=": "%s is greater than %s", - "=<": "%s is greater than %s", - "~": "%s does not have same major and minor version as %s", - "~>": "%s does not have same major and minor version as %s", - "^": "%s does not have same major version as %s", - } - - ops := make([]string, 0, len(constraintOps)) - for k := range constraintOps { - ops = append(ops, regexp.QuoteMeta(k)) - } - - constraintRegex = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - strings.Join(ops, "|"), - cvRegex)) - - constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( - `\s*(%s)\s+-\s+(%s)\s*`, - cvRegex, cvRegex)) -} - -// An individual constraint -type constraint struct { - // The callback function for the restraint. It performs the logic for - // the constraint. - function cfunc - - msg string - - // The version used in the constraint check. For example, if a constraint - // is '<= 2.0.0' the con a version instance representing 2.0.0. - con *Version - - // The original parsed version (e.g., 4.x from != 4.x) - orig string - - // When an x is used as part of the version (e.g., 1.x) - minorDirty bool - dirty bool - patchDirty bool -} - -// Check if a version meets the constraint -func (c *constraint) check(v *Version) bool { - return c.function(v, c) -} - -type cfunc func(v *Version, c *constraint) bool - -func parseConstraint(c string) (*constraint, error) { - m := constraintRegex.FindStringSubmatch(c) - if m == nil { - return nil, fmt.Errorf("improper constraint: %s", c) - } - - ver := m[2] - orig := ver - minorDirty := false - patchDirty := false - dirty := false - if isX(m[3]) { - ver = "0.0.0" - dirty = true - } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { - minorDirty = true - dirty = true - ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) - } else if isX(strings.TrimPrefix(m[5], ".")) { - dirty = true - patchDirty = true - ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) - } - - con, err := NewVersion(ver) - if err != nil { - - // The constraintRegex should catch any regex parsing errors. So, - // we should never get here. - return nil, errors.New("constraint Parser Error") - } - - cs := &constraint{ - function: constraintOps[m[1]], - msg: constraintMsg[m[1]], - con: con, - orig: orig, - minorDirty: minorDirty, - patchDirty: patchDirty, - dirty: dirty, - } - return cs, nil -} - -// Constraint functions -func constraintNotEqual(v *Version, c *constraint) bool { - if c.dirty { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if c.con.Major() != v.Major() { - return true - } - if c.con.Minor() != v.Minor() && !c.minorDirty { - return true - } else if c.minorDirty { - return false - } - - return false - } - - return !v.Equal(c.con) -} - -func constraintGreaterThan(v *Version, c *constraint) bool { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - return v.Compare(c.con) == 1 -} - -func constraintLessThan(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if !c.dirty { - return v.Compare(c.con) < 0 - } - - if v.Major() > c.con.Major() { - return false - } else if v.Minor() > c.con.Minor() && !c.minorDirty { - return false - } - - return true -} - -func constraintGreaterThanEqual(v *Version, c *constraint) bool { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - return v.Compare(c.con) >= 0 -} - -func constraintLessThanEqual(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if !c.dirty { - return v.Compare(c.con) <= 0 - } - - if v.Major() > c.con.Major() { - return false - } else if v.Minor() > c.con.Minor() && !c.minorDirty { - return false - } - - return true -} - -// ~*, ~>* --> >= 0.0.0 (any) -// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 -// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 -// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 -// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 -// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 -func constraintTilde(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if v.LessThan(c.con) { - return false - } - - // ~0.0.0 is a special case where all constraints are accepted. It's - // equivalent to >= 0.0.0. - if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && - !c.minorDirty && !c.patchDirty { - return true - } - - if v.Major() != c.con.Major() { - return false - } - - if v.Minor() != c.con.Minor() && !c.minorDirty { - return false - } - - return true -} - -// When there is a .x (dirty) status it automatically opts in to ~. Otherwise -// it's a straight = -func constraintTildeOrEqual(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if c.dirty { - c.msg = constraintMsg["~"] - return constraintTilde(v, c) - } - - return v.Equal(c.con) -} - -// ^* --> (any) -// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0 -// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0 -// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0 -// ^1.2.3 --> >=1.2.3, <2.0.0 -// ^1.2.0 --> >=1.2.0, <2.0.0 -func constraintCaret(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if v.LessThan(c.con) { - return false - } - - if v.Major() != c.con.Major() { - return false - } - - return true -} - -var constraintRangeRegex *regexp.Regexp - -const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` - -func isX(x string) bool { - switch x { - case "x", "*", "X": - return true - default: - return false - } -} - -func rewriteRange(i string) string { - m := constraintRangeRegex.FindAllStringSubmatch(i, -1) - if m == nil { - return i - } - o := i - for _, v := range m { - t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) - o = strings.Replace(o, v[0], t, 1) - } - - return o -} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go deleted file mode 100644 index 6a6c24c6d6e2..000000000000 --- a/vendor/github.com/Masterminds/semver/doc.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. - -Specifically it provides the ability to: - - * Parse semantic versions - * Sort semantic versions - * Check if a semantic version fits within a set of constraints - * Optionally work with a `v` prefix - -Parsing Semantic Versions - -To parse a semantic version use the `NewVersion` function. For example, - - v, err := semver.NewVersion("1.2.3-beta.1+build345") - -If there is an error the version wasn't parseable. The version object has methods -to get the parts of the version, compare it to other versions, convert the -version back into a string, and get the original string. For more details -please see the documentation at https://godoc.org/github.com/Masterminds/semver. - -Sorting Semantic Versions - -A set of versions can be sorted using the `sort` package from the standard library. -For example, - - raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} - vs := make([]*semver.Version, len(raw)) - for i, r := range raw { - v, err := semver.NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - vs[i] = v - } - - sort.Sort(semver.Collection(vs)) - -Checking Version Constraints - -Checking a version against version constraints is one of the most featureful -parts of the package. - - c, err := semver.NewConstraint(">= 1.2.3") - if err != nil { - // Handle constraint not being parseable. - } - - v, err := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - // Check if the version meets the constraints. The a variable will be true. - a := c.Check(v) - -Basic Comparisons - -There are two elements to the comparisons. First, a comparison string is a list -of comma separated and comparisons. These are then separated by || separated or -comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a -comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -greater than or equal to 4.2.3. - -The basic comparisons are: - - * `=`: equal (aliased to no operator) - * `!=`: not equal - * `>`: greater than - * `<`: less than - * `>=`: greater than or equal to - * `<=`: less than or equal to - -Hyphen Range Comparisons - -There are multiple methods to handle ranges and the first is hyphens ranges. -These look like: - - * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` - * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` - -Wildcards In Comparisons - -The `x`, `X`, and `*` characters can be used as a wildcard character. This works -for all comparison operators. When used on the `=` operator it falls -back to the pack level comparison (see tilde below). For example, - - * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` - * `>= 1.2.x` is equivalent to `>= 1.2.0` - * `<= 2.x` is equivalent to `<= 3` - * `*` is equivalent to `>= 0.0.0` - -Tilde Range Comparisons (Patch) - -The tilde (`~`) comparison operator is for patch level ranges when a minor -version is specified and major level changes when the minor number is missing. -For example, - - * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` - * `~1` is equivalent to `>= 1, < 2` - * `~2.3` is equivalent to `>= 2.3, < 2.4` - * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` - * `~1.x` is equivalent to `>= 1, < 2` - -Caret Range Comparisons (Major) - -The caret (`^`) comparison operator is for major level changes. This is useful -when comparisons of API versions as a major change is API breaking. For example, - - * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` - * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` - * `^2.3` is equivalent to `>= 2.3, < 3` - * `^2.x` is equivalent to `>= 2.0.0, < 3` -*/ -package semver diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go deleted file mode 100644 index fec61aa5abc5..000000000000 --- a/vendor/github.com/Masterminds/semver/version.go +++ /dev/null @@ -1,421 +0,0 @@ -package semver - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -// The compiled version of the regex created at init() is cached here so it -// only needs to be created once. -var versionRegex *regexp.Regexp -var validPrereleaseRegex *regexp.Regexp - -var ( - // ErrInvalidSemVer is returned a version is found to be invalid when - // being parsed. - ErrInvalidSemVer = errors.New("Invalid Semantic Version") - - // ErrInvalidMetadata is returned when the metadata is an invalid format - ErrInvalidMetadata = errors.New("Invalid Metadata string") - - // ErrInvalidPrerelease is returned when the pre-release is an invalid format - ErrInvalidPrerelease = errors.New("Invalid Prerelease string") -) - -// SemVerRegex is the regular expression used to parse a semantic version. -const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` - -// ValidPrerelease is the regular expression which validates -// both prerelease and metadata values. -const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$` - -// Version represents a single semantic version. -type Version struct { - major, minor, patch int64 - pre string - metadata string - original string -} - -func init() { - versionRegex = regexp.MustCompile("^" + SemVerRegex + "$") - validPrereleaseRegex = regexp.MustCompile(ValidPrerelease) -} - -// NewVersion parses a given version and returns an instance of Version or -// an error if unable to parse the version. -func NewVersion(v string) (*Version, error) { - m := versionRegex.FindStringSubmatch(v) - if m == nil { - return nil, ErrInvalidSemVer - } - - sv := &Version{ - metadata: m[8], - pre: m[5], - original: v, - } - - var temp int64 - temp, err := strconv.ParseInt(m[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) - } - sv.major = temp - - if m[2] != "" { - temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64) - if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) - } - sv.minor = temp - } else { - sv.minor = 0 - } - - if m[3] != "" { - temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64) - if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) - } - sv.patch = temp - } else { - sv.patch = 0 - } - - return sv, nil -} - -// MustParse parses a given version and panics on error. -func MustParse(v string) *Version { - sv, err := NewVersion(v) - if err != nil { - panic(err) - } - return sv -} - -// String converts a Version object to a string. -// Note, if the original version contained a leading v this version will not. -// See the Original() method to retrieve the original value. Semantic Versions -// don't contain a leading v per the spec. Instead it's optional on -// implementation. -func (v *Version) String() string { - var buf bytes.Buffer - - fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) - if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) - } - if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) - } - - return buf.String() -} - -// Original returns the original value passed in to be parsed. -func (v *Version) Original() string { - return v.original -} - -// Major returns the major version. -func (v *Version) Major() int64 { - return v.major -} - -// Minor returns the minor version. -func (v *Version) Minor() int64 { - return v.minor -} - -// Patch returns the patch version. -func (v *Version) Patch() int64 { - return v.patch -} - -// Prerelease returns the pre-release version. -func (v *Version) Prerelease() string { - return v.pre -} - -// Metadata returns the metadata on the version. -func (v *Version) Metadata() string { - return v.metadata -} - -// originalVPrefix returns the original 'v' prefix if any. -func (v *Version) originalVPrefix() string { - - // Note, only lowercase v is supported as a prefix by the parser. - if v.original != "" && v.original[:1] == "v" { - return v.original[:1] - } - return "" -} - -// IncPatch produces the next patch version. -// If the current version does not have prerelease/metadata information, -// it unsets metadata and prerelease values, increments patch number. -// If the current version has any of prerelease or metadata information, -// it unsets both values and keeps curent patch value -func (v Version) IncPatch() Version { - vNext := v - // according to http://semver.org/#spec-item-9 - // Pre-release versions have a lower precedence than the associated normal version. - // according to http://semver.org/#spec-item-10 - // Build metadata SHOULD be ignored when determining version precedence. - if v.pre != "" { - vNext.metadata = "" - vNext.pre = "" - } else { - vNext.metadata = "" - vNext.pre = "" - vNext.patch = v.patch + 1 - } - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext -} - -// IncMinor produces the next minor version. -// Sets patch to 0. -// Increments minor number. -// Unsets metadata. -// Unsets prerelease status. -func (v Version) IncMinor() Version { - vNext := v - vNext.metadata = "" - vNext.pre = "" - vNext.patch = 0 - vNext.minor = v.minor + 1 - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext -} - -// IncMajor produces the next major version. -// Sets patch to 0. -// Sets minor to 0. -// Increments major number. -// Unsets metadata. -// Unsets prerelease status. -func (v Version) IncMajor() Version { - vNext := v - vNext.metadata = "" - vNext.pre = "" - vNext.patch = 0 - vNext.minor = 0 - vNext.major = v.major + 1 - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext -} - -// SetPrerelease defines the prerelease value. -// Value must not include the required 'hypen' prefix. -func (v Version) SetPrerelease(prerelease string) (Version, error) { - vNext := v - if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) { - return vNext, ErrInvalidPrerelease - } - vNext.pre = prerelease - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext, nil -} - -// SetMetadata defines metadata value. -// Value must not include the required 'plus' prefix. -func (v Version) SetMetadata(metadata string) (Version, error) { - vNext := v - if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) { - return vNext, ErrInvalidMetadata - } - vNext.metadata = metadata - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext, nil -} - -// LessThan tests if one version is less than another one. -func (v *Version) LessThan(o *Version) bool { - return v.Compare(o) < 0 -} - -// GreaterThan tests if one version is greater than another one. -func (v *Version) GreaterThan(o *Version) bool { - return v.Compare(o) > 0 -} - -// Equal tests if two versions are equal to each other. -// Note, versions can be equal with different metadata since metadata -// is not considered part of the comparable version. -func (v *Version) Equal(o *Version) bool { - return v.Compare(o) == 0 -} - -// Compare compares this version to another one. It returns -1, 0, or 1 if -// the version smaller, equal, or larger than the other version. -// -// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is -// lower than the version without a prerelease. -func (v *Version) Compare(o *Version) int { - // Compare the major, minor, and patch version for differences. If a - // difference is found return the comparison. - if d := compareSegment(v.Major(), o.Major()); d != 0 { - return d - } - if d := compareSegment(v.Minor(), o.Minor()); d != 0 { - return d - } - if d := compareSegment(v.Patch(), o.Patch()); d != 0 { - return d - } - - // At this point the major, minor, and patch versions are the same. - ps := v.pre - po := o.Prerelease() - - if ps == "" && po == "" { - return 0 - } - if ps == "" { - return 1 - } - if po == "" { - return -1 - } - - return comparePrerelease(ps, po) -} - -// UnmarshalJSON implements JSON.Unmarshaler interface. -func (v *Version) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - temp, err := NewVersion(s) - if err != nil { - return err - } - v.major = temp.major - v.minor = temp.minor - v.patch = temp.patch - v.pre = temp.pre - v.metadata = temp.metadata - v.original = temp.original - temp = nil - return nil -} - -// MarshalJSON implements JSON.Marshaler interface. -func (v *Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -func compareSegment(v, o int64) int { - if v < o { - return -1 - } - if v > o { - return 1 - } - - return 0 -} - -func comparePrerelease(v, o string) int { - - // split the prelease versions by their part. The separator, per the spec, - // is a . - sparts := strings.Split(v, ".") - oparts := strings.Split(o, ".") - - // Find the longer length of the parts to know how many loop iterations to - // go through. - slen := len(sparts) - olen := len(oparts) - - l := slen - if olen > slen { - l = olen - } - - // Iterate over each part of the prereleases to compare the differences. - for i := 0; i < l; i++ { - // Since the lentgh of the parts can be different we need to create - // a placeholder. This is to avoid out of bounds issues. - stemp := "" - if i < slen { - stemp = sparts[i] - } - - otemp := "" - if i < olen { - otemp = oparts[i] - } - - d := comparePrePart(stemp, otemp) - if d != 0 { - return d - } - } - - // Reaching here means two versions are of equal value but have different - // metadata (the part following a +). They are not identical in string form - // but the version comparison finds them to be equal. - return 0 -} - -func comparePrePart(s, o string) int { - // Fastpath if they are equal - if s == o { - return 0 - } - - // When s or o are empty we can use the other in an attempt to determine - // the response. - if s == "" { - if o != "" { - return -1 - } - return 1 - } - - if o == "" { - if s != "" { - return 1 - } - return -1 - } - - // When comparing strings "99" is greater than "103". To handle - // cases like this we need to detect numbers and compare them. - - oi, n1 := strconv.ParseInt(o, 10, 64) - si, n2 := strconv.ParseInt(s, 10, 64) - - // The case where both are strings compare the strings - if n1 != nil && n2 != nil { - if s > o { - return 1 - } - return -1 - } else if n1 != nil { - // o is a string and s is a number - return -1 - } else if n2 != nil { - // s is a string and o is a number - return 1 - } - // Both are numbers - if si > oi { - return 1 - } - return -1 - -} diff --git a/vendor/github.com/TykTechnologies/redigocluster/rediscluster/rediscluster.go b/vendor/github.com/TykTechnologies/redigocluster/rediscluster/rediscluster.go index f3f35a89d9d9..e6b0f94a2acd 100644 --- a/vendor/github.com/TykTechnologies/redigocluster/rediscluster/rediscluster.go +++ b/vendor/github.com/TykTechnologies/redigocluster/rediscluster/rediscluster.go @@ -5,7 +5,6 @@ import "strconv" import "errors" import "math/rand" import "os" -import "sync" import "github.com/TykTechnologies/logrus" import iMap "github.com/TykTechnologies/concurrent-map" @@ -20,11 +19,9 @@ type RedisCluster struct { Handles ConcurrentMap // map[string]*RedisHandle Slots iMap.ConcurrentMap //map[uint16]string RefreshTableASAP bool - singleRedisMode bool + SingleRedisMode bool poolConfig PoolConfig Debug bool - - muSingleRedisMode sync.RWMutex } type ClusterTransaction struct { @@ -34,13 +31,13 @@ type ClusterTransaction struct { func NewRedisCluster(seed_redii []map[string]string, poolConfig PoolConfig, debug bool) RedisCluster { cluster := RedisCluster{ - RefreshTableASAP: false, - singleRedisMode: !poolConfig.IsCluster, - SeedHosts: NewCmap(), //make(map[string]bool), - Handles: NewCmap(), //make(map[string]*RedisHandle), - Slots: iMap.New(), // make(map[uint16]string), - poolConfig: poolConfig, - Debug: debug, + RefreshTableASAP: false, + SingleRedisMode: !poolConfig.IsCluster, + SeedHosts: NewCmap(), //make(map[string]bool), + Handles: NewCmap(), //make(map[string]*RedisHandle), + Slots: iMap.New(), // make(map[uint16]string), + poolConfig: poolConfig, + Debug: debug, } if cluster.Debug { @@ -61,14 +58,14 @@ func NewRedisCluster(seed_redii []map[string]string, poolConfig PoolConfig, debu cluster_enabled := cluster.hasClusterEnabled(node) if cluster_enabled == false { if cluster.SeedHosts.Count() == 1 { - cluster.SetSingleRedisMode(true) + cluster.SingleRedisMode = true } else { log.Fatal(errors.New("Multiple Seed Hosts Given, But Cluster Support Disabled in Redis")) } } } - if cluster.SingleRedisMode() == false { + if cluster.SingleRedisMode == false { cluster.populateSlotsCache() } return cluster @@ -88,7 +85,7 @@ func (self *RedisCluster) hasClusterEnabled(node *RedisHandle) bool { // contact the startup nodes and try to fetch the hash slots -> instances // map in order to initialize the Slots map. func (self *RedisCluster) populateSlotsCache() { - if self.SingleRedisMode() == true { + if self.SingleRedisMode == true { return } if self.Debug { @@ -161,7 +158,7 @@ func (self *RedisCluster) switchToSingleModeIfNeeded() { for item := range self.Handles.Iter() { cluster_enabled := self.hasClusterEnabled(item.Val.(*RedisHandle)) if cluster_enabled == false { - self.SetSingleRedisMode(true) + self.SingleRedisMode = true } } } @@ -178,20 +175,6 @@ func (self *RedisCluster) addRedisHandleIfNeeded(addr string) *RedisHandle { return item.(*RedisHandle) } -//Goroutine safe setter for SingleRedisMode field -func (self *RedisCluster) SetSingleRedisMode(newValue bool) { - self.muSingleRedisMode.Lock() - self.singleRedisMode = newValue - self.muSingleRedisMode.Unlock() -} - -//Goroutine safe getter for SingleRedisMode field -func (self *RedisCluster) SingleRedisMode() bool { - self.muSingleRedisMode.RLock() - defer self.muSingleRedisMode.RUnlock() - return self.singleRedisMode -} - func (self *RedisCluster) KeyForRequest(cmd string, args ...interface{}) string { cmd = strings.ToLower(cmd) if cmd == "info" || @@ -334,7 +317,7 @@ func (self *RedisCluster) SendClusterTransaction(cmds []ClusterTransaction) (rep // forward onto first redis in the handle // if we are set to single mode - if self.SingleRedisMode() == true { + if self.SingleRedisMode == true { // for _, handle := range self.Handles { for item := range self.Handles.Iter() { log.Debug("Running transaction...") @@ -344,7 +327,7 @@ func (self *RedisCluster) SendClusterTransaction(cmds []ClusterTransaction) (rep if self.RefreshTableASAP == true { self.HandleTableRefresh() - if self.SingleRedisMode() == true { + if self.SingleRedisMode == true { // for _, handle := range self.Handles { for item := range self.Handles.Iter() { return item.Val.(*RedisHandle).DoTransaction(cmds) @@ -451,7 +434,7 @@ func (self *RedisCluster) SendClusterPipeline(cmds []ClusterTransaction) (reply // forward onto first redis in the handle // if we are set to single mode - if self.SingleRedisMode() == true { + if self.SingleRedisMode == true { // for _, handle := range self.Handles { for item := range self.Handles.Iter() { log.Debug("Running pipline...") @@ -461,7 +444,7 @@ func (self *RedisCluster) SendClusterPipeline(cmds []ClusterTransaction) (reply if self.RefreshTableASAP == true { self.HandleTableRefresh() - if self.SingleRedisMode() == true { + if self.SingleRedisMode == true { // for _, handle := range self.Handles { for item := range self.Handles.Iter() { return item.Val.(*RedisHandle).DoPipeline(cmds) @@ -568,14 +551,14 @@ func (self *RedisCluster) SendClusterCommand(flush bool, cmd string, args ...int // forward onto first redis in the handle // if we are set to single mode - if self.SingleRedisMode() == true { + if self.SingleRedisMode == true { return self.handleSingleMode(flush, cmd, args...) } if self.RefreshTableASAP == true { self.HandleTableRefresh() // in case we realized we were now in Single Mode - if self.SingleRedisMode() == true { + if self.SingleRedisMode == true { return self.handleSingleMode(flush, cmd, args...) } } @@ -708,7 +691,7 @@ func (self *RedisCluster) SetRefreshNeeded() { func (self *RedisCluster) HandleForKey(key string) *RedisHandle { // forward onto first redis in the handle // if we are set to single mode - if self.SingleRedisMode() == true { + if self.SingleRedisMode == true { // for _, handle := range self.Handles { for item := range self.Handles.Iter() { return item.Val.(*RedisHandle) diff --git a/vendor/github.com/aokoli/goutils/CHANGELOG.md b/vendor/github.com/aokoli/goutils/CHANGELOG.md deleted file mode 100644 index d700ec47f2b8..000000000000 --- a/vendor/github.com/aokoli/goutils/CHANGELOG.md +++ /dev/null @@ -1,8 +0,0 @@ -# 1.0.1 (2017-05-31) - -## Fixed -- #21: Fix generation of alphanumeric strings (thanks @dbarranco) - -# 1.0.0 (2014-04-30) - -- Initial release. diff --git a/vendor/github.com/aokoli/goutils/LICENSE.txt b/vendor/github.com/aokoli/goutils/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aokoli/goutils/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aokoli/goutils/README.md b/vendor/github.com/aokoli/goutils/README.md deleted file mode 100644 index 163ffe72a82d..000000000000 --- a/vendor/github.com/aokoli/goutils/README.md +++ /dev/null @@ -1,70 +0,0 @@ -GoUtils -=========== -[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) -[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) - - -GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some -string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: -* WordUtils -* RandomStringUtils -* StringUtils (partial implementation) - -## Installation -If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: - - go get github.com/Masterminds/goutils - -If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. - - -## Documentation -GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) - - -## Usage -The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). - - package main - - import ( - "fmt" - "github.com/Masterminds/goutils" - ) - - func main() { - - // EXAMPLE 1: A goutils function which returns no errors - fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" - - } -Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). - - package main - - import ( - "fmt" - "github.com/Masterminds/goutils" - ) - - func main() { - - // EXAMPLE 2: A goutils function which returns an error - rand1, err1 := goutils.Random (-1, 0, 0, true, true) - - if err1 != nil { - fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) - } else { - fmt.Println(rand1) - } - - } - -## License -GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. - -## Issue Reporting -Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues - -## Website -* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/aokoli/goutils/appveyor.yml b/vendor/github.com/aokoli/goutils/appveyor.yml deleted file mode 100644 index 657564a8474d..000000000000 --- a/vendor/github.com/aokoli/goutils/appveyor.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\goutils -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -build: off - -install: - - go version - - go env - -test_script: - - go test -v - -deploy: off diff --git a/vendor/github.com/aokoli/goutils/cryptorandomstringutils.go b/vendor/github.com/aokoli/goutils/cryptorandomstringutils.go deleted file mode 100644 index 177dd865848b..000000000000 --- a/vendor/github.com/aokoli/goutils/cryptorandomstringutils.go +++ /dev/null @@ -1,251 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "crypto/rand" - "fmt" - "math" - "math/big" - "regexp" - "unicode" -) - -/* -CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomNonAlphaNumeric(count int) (string, error) { - return CryptoRandomAlphaNumericCustom(count, false, false) -} - -/* -CryptoRandomAscii creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAscii(count int) (string, error) { - return CryptoRandom(count, 32, 127, false, false) -} - -/* -CryptoRandomNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomNumeric(count int) (string, error) { - return CryptoRandom(count, 0, 0, false, true) -} - -/* -CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphabetic(count int) (string, error) { - return CryptoRandom(count, 0, 0, true, false) -} - -/* -CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphaNumeric(count int) (string, error) { - if count == 0 { - return "", nil - } - RandomString, err := CryptoRandom(count, 0, 0, true, true) - if err != nil { - return "", fmt.Errorf("Error: %s", err) - } - match, err := regexp.MatchString("([0-9]+)", RandomString) - if err != nil { - panic(err) - } - - if !match { - //Get the position between 0 and the length of the string-1 to insert a random number - position := getCryptoRandomInt(count) - //Insert a random number between [0-9] in the position - RandomString = RandomString[:position] + string('0' + getCryptoRandomInt(10)) + RandomString[position + 1:] - return RandomString, err - } - return RandomString, err - -} - -/* -CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { - return CryptoRandom(count, 0, 0, letters, numbers) -} - -/* -CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. -If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -If chars is not nil, characters stored in chars that are between start and end are chosen. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode int) to start at - end - the position in set of chars (ASCII/Unicode int) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - -Returns: - string - the random string - error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -*/ -func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { - if count == 0 { - return "", nil - } else if count < 0 { - err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") - return "", err - } - if chars != nil && len(chars) == 0 { - err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") - return "", err - } - - if start == 0 && end == 0 { - if chars != nil { - end = len(chars) - } else { - if !letters && !numbers { - end = math.MaxInt32 - } else { - end = 'z' + 1 - start = ' ' - } - } - } else { - if end <= start { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) - return "", err - } - - if chars != nil && end > len(chars) { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) - return "", err - } - } - - buffer := make([]rune, count) - gap := end - start - - // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 - // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 - - for count != 0 { - count-- - var ch rune - if chars == nil { - ch = rune(getCryptoRandomInt(gap) + int64(start)) - } else { - ch = chars[getCryptoRandomInt(gap) + int64(start)] - } - - if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { - if ch >= 56320 && ch <= 57343 { // low surrogate range - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = ch - count-- - // Insert high surrogate - buffer[count] = rune(55296 + getCryptoRandomInt(128)) - } - } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = rune(56320 + getCryptoRandomInt(128)) - count-- - // Insert high surrogate - buffer[count] = ch - } - } else if ch >= 56192 && ch <= 56319 { - // private high surrogate, skip it - count++ - } else { - // not one of the surrogates* - buffer[count] = ch - } - } else { - count++ - } - } - return string(buffer), nil -} - -func getCryptoRandomInt(count int) int64 { - nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) - if err != nil { - panic(err) - } - return nBig.Int64() -} diff --git a/vendor/github.com/aokoli/goutils/randomstringutils.go b/vendor/github.com/aokoli/goutils/randomstringutils.go deleted file mode 100644 index 1364e0cafdf7..000000000000 --- a/vendor/github.com/aokoli/goutils/randomstringutils.go +++ /dev/null @@ -1,268 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "fmt" - "math" - "math/rand" - "regexp" - "time" - "unicode" -) - -// RANDOM provides the time-based seed used to generate random numbers -var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) - -/* -RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomNonAlphaNumeric(count int) (string, error) { - return RandomAlphaNumericCustom(count, false, false) -} - -/* -RandomAscii creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAscii(count int) (string, error) { - return Random(count, 32, 127, false, false) -} - -/* -RandomNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomNumeric(count int) (string, error) { - return Random(count, 0, 0, false, true) -} - -/* -RandomAlphabetic creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphabetic(count int) (string, error) { - return Random(count, 0, 0, true, false) -} - -/* -RandomAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphaNumeric(count int) (string, error) { - RandomString, err := Random(count, 0, 0, true, true) - if err != nil { - return "", fmt.Errorf("Error: %s", err) - } - match, err := regexp.MatchString("([0-9]+)", RandomString) - if err != nil { - panic(err) - } - - if !match { - //Get the position between 0 and the length of the string-1 to insert a random number - position := rand.Intn(count) - //Insert a random number between [0-9] in the position - RandomString = RandomString[:position] + string('0'+rand.Intn(10)) + RandomString[position+1:] - return RandomString, err - } - return RandomString, err - -} - -/* -RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { - return Random(count, 0, 0, letters, numbers) -} - -/* -Random creates a random string based on a variety of options, using default source of randomness. -This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but -instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode int) to start at - end - the position in set of chars (ASCII/Unicode int) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { - return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) -} - -/* -RandomSeed creates a random string based on a variety of options, using supplied source of randomness. -If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -If chars is not nil, characters stored in chars that are between start and end are chosen. -This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance -with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode decimals) to start at - end - the position in set of chars (ASCII/Unicode decimals) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - random - a source of randomness. - -Returns: - string - the random string - error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -*/ -func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { - - if count == 0 { - return "", nil - } else if count < 0 { - err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") - return "", err - } - if chars != nil && len(chars) == 0 { - err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") - return "", err - } - - if start == 0 && end == 0 { - if chars != nil { - end = len(chars) - } else { - if !letters && !numbers { - end = math.MaxInt32 - } else { - end = 'z' + 1 - start = ' ' - } - } - } else { - if end <= start { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) - return "", err - } - - if chars != nil && end > len(chars) { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) - return "", err - } - } - - buffer := make([]rune, count) - gap := end - start - - // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 - // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 - - for count != 0 { - count-- - var ch rune - if chars == nil { - ch = rune(random.Intn(gap) + start) - } else { - ch = chars[random.Intn(gap)+start] - } - - if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { - if ch >= 56320 && ch <= 57343 { // low surrogate range - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = ch - count-- - // Insert high surrogate - buffer[count] = rune(55296 + random.Intn(128)) - } - } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = rune(56320 + random.Intn(128)) - count-- - // Insert high surrogate - buffer[count] = ch - } - } else if ch >= 56192 && ch <= 56319 { - // private high surrogate, skip it - count++ - } else { - // not one of the surrogates* - buffer[count] = ch - } - } else { - count++ - } - } - return string(buffer), nil -} diff --git a/vendor/github.com/aokoli/goutils/stringutils.go b/vendor/github.com/aokoli/goutils/stringutils.go deleted file mode 100644 index 5037c4516baf..000000000000 --- a/vendor/github.com/aokoli/goutils/stringutils.go +++ /dev/null @@ -1,224 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "bytes" - "fmt" - "strings" - "unicode" -) - -// Typically returned by functions where a searched item cannot be found -const INDEX_NOT_FOUND = -1 - -/* -Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." - -Specifically, the algorithm is as follows: - - - If str is less than maxWidth characters long, return it. - - Else abbreviate it to (str[0:maxWidth - 3] + "..."). - - If maxWidth is less than 4, return an illegal argument error. - - In no case will it return a string of length greater than maxWidth. - -Parameters: - str - the string to check - maxWidth - maximum length of result string, must be at least 4 - -Returns: - string - abbreviated string - error - if the width is too small -*/ -func Abbreviate(str string, maxWidth int) (string, error) { - return AbbreviateFull(str, 0, maxWidth) -} - -/* -AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." -This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not -necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear -somewhere in the result. -In no case will it return a string of length greater than maxWidth. - -Parameters: - str - the string to check - offset - left edge of source string - maxWidth - maximum length of result string, must be at least 4 - -Returns: - string - abbreviated string - error - if the width is too small -*/ -func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { - if str == "" { - return "", nil - } - if maxWidth < 4 { - err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") - return "", err - } - if len(str) <= maxWidth { - return str, nil - } - if offset > len(str) { - offset = len(str) - } - if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 - offset = len(str) - (maxWidth - 3) - } - abrevMarker := "..." - if offset <= 4 { - return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; - } - if maxWidth < 7 { - err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") - return "", err - } - if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 - abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) - return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); - } - return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); -} - -/* -DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). -It returns the string without whitespaces. - -Parameter: - str - the string to delete whitespace from, may be nil - -Returns: - the string without whitespaces -*/ -func DeleteWhiteSpace(str string) string { - if str == "" { - return str - } - sz := len(str) - var chs bytes.Buffer - count := 0 - for i := 0; i < sz; i++ { - ch := rune(str[i]) - if !unicode.IsSpace(ch) { - chs.WriteRune(ch) - count++ - } - } - if count == sz { - return str - } - return chs.String() -} - -/* -IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. - -Parameters: - str1 - the first string - str2 - the second string - -Returns: - the index where str1 and str2 begin to differ; -1 if they are equal -*/ -func IndexOfDifference(str1 string, str2 string) int { - if str1 == str2 { - return INDEX_NOT_FOUND - } - if IsEmpty(str1) || IsEmpty(str2) { - return 0 - } - var i int - for i = 0; i < len(str1) && i < len(str2); i++ { - if rune(str1[i]) != rune(str2[i]) { - break - } - } - if i < len(str2) || i < len(str1) { - return i - } - return INDEX_NOT_FOUND -} - -/* -IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: - - goutils.IsBlank("") = true - goutils.IsBlank(" ") = true - goutils.IsBlank("bob") = false - goutils.IsBlank(" bob ") = false - -Parameter: - str - the string to check - -Returns: - true - if the string is whitespace or empty ("") -*/ -func IsBlank(str string) bool { - strLen := len(str) - if str == "" || strLen == 0 { - return true - } - for i := 0; i < strLen; i++ { - if unicode.IsSpace(rune(str[i])) == false { - return false - } - } - return true -} - -/* -IndexOf returns the index of the first instance of sub in str, with the search beginning from the -index start point specified. -1 is returned if sub is not present in str. - -An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. -A start position greater than the string length returns -1. - -Parameters: - str - the string to check - sub - the substring to find - start - the start position; negative treated as zero - -Returns: - the first index where the sub string was found (always >= start) -*/ -func IndexOf(str string, sub string, start int) int { - - if start < 0 { - start = 0 - } - - if len(str) < start { - return INDEX_NOT_FOUND - } - - if IsEmpty(str) || IsEmpty(sub) { - return INDEX_NOT_FOUND - } - - partialIndex := strings.Index(str[start:len(str)], sub) - if partialIndex == -1 { - return INDEX_NOT_FOUND - } - return partialIndex + start -} - -// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. -func IsEmpty(str string) bool { - return len(str) == 0 -} diff --git a/vendor/github.com/aokoli/goutils/wordutils.go b/vendor/github.com/aokoli/goutils/wordutils.go deleted file mode 100644 index 034cad8e2107..000000000000 --- a/vendor/github.com/aokoli/goutils/wordutils.go +++ /dev/null @@ -1,357 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package goutils provides utility functions to manipulate strings in various ways. -The code snippets below show examples of how to use goutils. Some functions return -errors while others do not, so usage would vary as a result. - -Example: - - package main - - import ( - "fmt" - "github.com/aokoli/goutils" - ) - - func main() { - - // EXAMPLE 1: A goutils function which returns no errors - fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" - - - - // EXAMPLE 2: A goutils function which returns an error - rand1, err1 := goutils.Random (-1, 0, 0, true, true) - - if err1 != nil { - fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) - } else { - fmt.Println(rand1) - } - } -*/ -package goutils - -import ( - "bytes" - "strings" - "unicode" -) - -// VERSION indicates the current version of goutils -const VERSION = "1.0.0" - -/* -Wrap wraps a single line of text, identifying words by ' '. -New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. -Leading spaces on a new line are stripped. Trailing spaces are not stripped. - -Parameters: - str - the string to be word wrapped - wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 - -Returns: - a line with newlines inserted -*/ -func Wrap(str string, wrapLength int) string { - return WrapCustom(str, wrapLength, "", false) -} - -/* -WrapCustom wraps a single line of text, identifying words by ' '. -Leading spaces on a new line are stripped. Trailing spaces are not stripped. - -Parameters: - str - the string to be word wrapped - wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 - newLineStr - the string to insert for a new line, "" uses '\n' - wrapLongWords - true if long words (such as URLs) should be wrapped - -Returns: - a line with newlines inserted -*/ -func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { - - if str == "" { - return "" - } - if newLineStr == "" { - newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons - } - if wrapLength < 1 { - wrapLength = 1 - } - - inputLineLength := len(str) - offset := 0 - - var wrappedLine bytes.Buffer - - for inputLineLength-offset > wrapLength { - - if rune(str[offset]) == ' ' { - offset++ - continue - } - - end := wrapLength + offset + 1 - spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset - - if spaceToWrapAt >= offset { - // normal word (not longer than wrapLength) - wrappedLine.WriteString(str[offset:spaceToWrapAt]) - wrappedLine.WriteString(newLineStr) - offset = spaceToWrapAt + 1 - - } else { - // long word or URL - if wrapLongWords { - end := wrapLength + offset - // long words are wrapped one line at a time - wrappedLine.WriteString(str[offset:end]) - wrappedLine.WriteString(newLineStr) - offset += wrapLength - } else { - // long words aren't wrapped, just extended beyond limit - end := wrapLength + offset - index := strings.IndexRune(str[end:len(str)], ' ') - if index == -1 { - wrappedLine.WriteString(str[offset:len(str)]) - offset = inputLineLength - } else { - spaceToWrapAt = index + end - wrappedLine.WriteString(str[offset:spaceToWrapAt]) - wrappedLine.WriteString(newLineStr) - offset = spaceToWrapAt + 1 - } - } - } - } - - wrappedLine.WriteString(str[offset:len(str)]) - - return wrappedLine.String() - -} - -/* -Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. -To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). -The delimiters represent a set of characters understood to separate words. The first string character -and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". -Capitalization uses the Unicode title case, normally equivalent to upper case. - -Parameters: - str - the string to capitalize - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - capitalized string -*/ -func Capitalize(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - - buffer := []rune(str) - capitalizeNext := true - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if isDelimiter(ch, delimiters...) { - capitalizeNext = true - } else if capitalizeNext { - buffer[i] = unicode.ToTitle(ch) - capitalizeNext = false - } - } - return string(buffer) - -} - -/* -CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a -titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood -to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. -Capitalization uses the Unicode title case, normally equivalent to upper case. - -Parameters: - str - the string to capitalize fully - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - capitalized string -*/ -func CapitalizeFully(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - str = strings.ToLower(str) - return Capitalize(str, delimiters...) -} - -/* -Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. -The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter -character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). - -Parameters: - str - the string to uncapitalize fully - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - uncapitalized string -*/ -func Uncapitalize(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - - buffer := []rune(str) - uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if isDelimiter(ch, delimiters...) { - uncapitalizeNext = true - } else if uncapitalizeNext { - buffer[i] = unicode.ToLower(ch) - uncapitalizeNext = false - } - } - return string(buffer) -} - -/* -SwapCase swaps the case of a string using a word based algorithm. - -Conversion algorithm: - - Upper case character converts to Lower case - Title case character converts to Lower case - Lower case character after Whitespace or at start converts to Title case - Other Lower case character converts to Upper case - Whitespace is defined by unicode.IsSpace(char). - -Parameters: - str - the string to swap case - -Returns: - the changed string -*/ -func SwapCase(str string) string { - if str == "" { - return str - } - buffer := []rune(str) - - whitespace := true - - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if unicode.IsUpper(ch) { - buffer[i] = unicode.ToLower(ch) - whitespace = false - } else if unicode.IsTitle(ch) { - buffer[i] = unicode.ToLower(ch) - whitespace = false - } else if unicode.IsLower(ch) { - if whitespace { - buffer[i] = unicode.ToTitle(ch) - whitespace = false - } else { - buffer[i] = unicode.ToUpper(ch) - } - } else { - whitespace = unicode.IsSpace(ch) - } - } - return string(buffer) -} - -/* -Initials extracts the initial letters from each word in the string. The first letter of the string and all first -letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters -parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. - -Parameters: - str - the string to get initials from - delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter -Returns: - string of initial letters -*/ -func Initials(str string, delimiters ...rune) string { - if str == "" { - return str - } - if delimiters != nil && len(delimiters) == 0 { - return "" - } - strLen := len(str) - var buf bytes.Buffer - lastWasGap := true - for i := 0; i < strLen; i++ { - ch := rune(str[i]) - - if isDelimiter(ch, delimiters...) { - lastWasGap = true - } else if lastWasGap { - buf.WriteRune(ch) - lastWasGap = false - } - } - return buf.String() -} - -// private function (lower case func name) -func isDelimiter(ch rune, delimiters ...rune) bool { - if delimiters == nil { - return unicode.IsSpace(ch) - } - for _, delimiter := range delimiters { - if ch == delimiter { - return true - } - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index 63b0f08bef2a..d9aa3c42d666 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -186,6 +186,7 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } + // x -= 0x80 << 63 // Always zero. return 0, errOverflow diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c09aaf..000000000000 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 3abfed2cff04..c27d35f866bb 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -37,9 +37,27 @@ package proto import ( "errors" + "fmt" "reflect" ) +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + var ( // errRepeatedHasNil is the error returned if Marshal is called with // a struct with a repeated field containing a nil element. diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index f9b6e41b3c10..d4db5a1c1457 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -246,8 +246,7 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { return false } - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) + m1, m2 := e1.value, e2.value if m1 == nil && m2 == nil { // Both have only encoded form. diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index fa88add30a41..816a3b9d6c09 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -185,25 +185,9 @@ type Extension struct { // extension will have only enc set. When such an extension is // accessed using GetExtension (or GetExtensions) desc and value // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. + desc *ExtensionDesc value interface{} - - // enc is the raw bytes for the extension field. - enc []byte + enc []byte } // SetRawExtension is for testing only. @@ -350,7 +334,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // descriptors with the same field number. return nil, errors.New("proto: descriptor conflict") } - return extensionAsLegacyType(e.value), nil + return e.value, nil } if extension.ExtensionType == nil { @@ -365,11 +349,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // Remember the decoded version and drop the encoded version. // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) + e.value = v e.desc = extension e.enc = nil emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil + return e.value, nil } // defaultExtensionValue returns the default value for extension. @@ -504,7 +488,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + return errors.New("proto: bad extension value type") } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension @@ -516,7 +500,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} + extmap[extension.Field] = Extension{desc: extension, value: value} return nil } @@ -557,51 +541,3 @@ func RegisterExtension(desc *ExtensionDesc) { func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } - -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - } - return v -} - -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - } - return v -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index fdd328bb7f54..0e2191b8ada3 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -265,6 +265,7 @@ package proto import ( "encoding/json" + "errors" "fmt" "log" "reflect" @@ -273,66 +274,7 @@ import ( "sync" ) -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} +var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") // Message is implemented by generated protocol buffer messages. type Message interface { @@ -341,6 +283,26 @@ type Message interface { ProtoMessage() } +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; @@ -940,19 +902,13 @@ func isProto3Zero(v reflect.Value) bool { return false } -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true // InternalMessageInfo is a type used internally by generated .pb.go files. // This type is not intended to be used by non-generated code. diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index f48a756761ea..3b6ca41d5e55 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -36,7 +36,13 @@ package proto */ import ( + "bytes" + "encoding/json" "errors" + "fmt" + "reflect" + "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -139,9 +145,46 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + + case map[int32]Extension: + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, + } + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + + default: + return nil, errors.New("proto: not an extension map") + } +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { +func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -179,3 +222,93 @@ func unmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index 94fa9194a882..b6cad90834b3 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -79,13 +79,10 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { +func toAddrPointer(i *interface{}, isptr bool) pointer { v := reflect.ValueOf(*i) u := reflect.New(v.Type()) u.Elem().Set(v) - if deref { - u = u.Elem() - } return pointer{v: u} } diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index dbfffe071b82..d55a335d9453 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -85,21 +85,16 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { +func toAddrPointer(i *interface{}, isptr bool) pointer { // Super-tricky - read or get the address of data word of interface value. if isptr { // The interface is of pointer type, thus it is a direct interface. // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } // valToPointer converts v to a pointer. v must be of pointer type. diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 79668ff5c5ca..f710adab0924 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -139,7 +139,7 @@ type Properties struct { Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field + proto3 bool // whether this is known to be a proto3 field; set for []byte only oneof bool // whether this is a oneof field Default string // default value @@ -148,9 +148,9 @@ type Properties struct { stype reflect.Type // set for struct types only sprop *StructProperties // set for struct types only - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only } // String formats the properties in the protobuf struct field tag style. @@ -275,16 +275,16 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc case reflect.Map: p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} vtype := p.mtype.Elem() if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { // The value type is not a message (*T) or bytes ([]byte), // so we need encoders for the pointer to this type. vtype = reflect.PtrTo(vtype) } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } if p.stype != nil { @@ -334,6 +334,9 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { + if collectStats { + stats.Chit++ + } return sprop } @@ -343,20 +346,17 @@ func GetProperties(t reflect.Type) *StructProperties { return sprop } -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } return prop } + if collectStats { + stats.Cmiss++ + } prop := new(StructProperties) // in case of recursive protos, fill this in now. @@ -391,14 +391,13 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { // Re-order prop.order. sort.Sort(prop) - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) } - if len(oots) > 0 { + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + _, _, _, oots = om.XXX_OneofFuncs() + // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) for _, oot := range oots { diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go index 5cb11fa955e4..0f212b3029d2 100644 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -87,7 +87,6 @@ type marshalElemInfo struct { sizer sizer marshaler marshaler isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr } var ( @@ -232,7 +231,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte return b, err } - var err, errLater error + var err, errreq error // The old marshaler encodes extensions at beginning. if u.extensions.IsValid() { e := ptr.offset(u.extensions).toExtensions() @@ -253,13 +252,11 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte } } for _, f := range u.fields { - if f.required { + if f.required && errreq == nil { if ptr.offset(f.field).getPointer().isNil() { // Required field is not set. // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } + errreq = &RequiredNotSetError{f.name} continue } } @@ -272,21 +269,14 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte if err1, ok := err.(*RequiredNotSetError); ok { // Required field in submessage is not set. // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} + if errreq == nil { + errreq = &RequiredNotSetError{f.name + "." + err1.field} } continue } if err == errRepeatedHasNil { err = errors.New("proto: repeated field " + f.name + " has nil element") } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } return b, err } } @@ -294,7 +284,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte s := *ptr.offset(u.unrecognized).toBytes() b = append(b, s...) } - return b, errLater + return b, errreq } // computeMarshalInfo initializes the marshal info. @@ -321,11 +311,8 @@ func (u *marshalInfo) computeMarshalInfo() { // get oneof implementers var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() } n := t.NumField() @@ -411,22 +398,13 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { panic("tag is not an integer") } wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } e = &marshalElemInfo{ wiretag: uint64(tag)<<3 | wt, tagsize: SizeVarint(uint64(tag) << 3), sizer: sizer, marshaler: marshaler, isptr: t.Kind() == reflect.Ptr, - deref: deref, } // update cache @@ -461,7 +439,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. fi.isPointer = true fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) @@ -489,6 +467,10 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI } } +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + // wiretype returns the wire encoding of the type. func wiretype(encoding string) uint64 { switch encoding { @@ -548,7 +530,6 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma packed := false proto3 := false - validateUTF8 := true for i := 2; i < len(tags); i++ { if tags[i] == "packed" { packed = true @@ -557,7 +538,6 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma proto3 = true } } - validateUTF8 = validateUTF8 && proto3 switch t.Kind() { case reflect.Bool: @@ -755,18 +735,6 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma } return sizeFloat64Value, appendFloat64Value case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } if pointer { return sizeStringPtr, appendStringPtr } @@ -2015,105 +1983,52 @@ func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byt return b, nil } func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool v := *ptr.toString() if !utf8.ValidString(v) { - invalidUTF8 = true + return nil, errInvalidUTF8 } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } return b, nil } -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { v := *ptr.toString() if v == "" { return b, nil } if !utf8.ValidString(v) { - invalidUTF8 = true + return nil, errInvalidUTF8 } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } return b, nil } -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { p := *ptr.toStringPtr() if p == nil { return b, nil } v := *p if !utf8.ValidString(v) { - invalidUTF8 = true + return nil, errInvalidUTF8 } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } return b, nil } -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { s := *ptr.toStringSlice() for _, v := range s { if !utf8.ValidString(v) { - invalidUTF8 = true + return nil, errInvalidUTF8 } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) } - if invalidUTF8 { - return b, errInvalidUTF8 - } return b, nil } func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { @@ -2192,8 +2107,7 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { s := ptr.getPointerSlice() - var err error - var nerr nonFatal + var err, errreq error for _, v := range s { if v.isNil() { return b, errRepeatedHasNil @@ -2201,14 +2115,22 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { b = appendVarint(b, wiretag) // start group b, err = u.marshal(b, v, deterministic) b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } if err == ErrNil { err = errRepeatedHasNil } return b, err } } - return b, nerr.E + return b, errreq } } @@ -2252,8 +2174,7 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { s := ptr.getPointerSlice() - var err error - var nerr nonFatal + var err, errreq error for _, v := range s { if v.isNil() { return b, errRepeatedHasNil @@ -2263,14 +2184,22 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { b = appendVarint(b, uint64(siz)) b, err = u.marshal(b, v, deterministic) - if !nerr.Merge(err) { + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } if err == ErrNil { err = errRepeatedHasNil } return b, err } } - return b, nerr.E + return b, errreq } } @@ -2294,33 +2223,14 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { // value. // Key cannot be pointer-typed. valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } return func(ptr pointer, tagsize int) int { m := ptr.asPointerTo(t).Elem() // the map n := 0 for _, k := range m.MapKeys() { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) n += siz + SizeVarint(uint64(siz)) + tagsize } @@ -2333,26 +2243,24 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { if len(keys) > 1 && deterministic { sort.Sort(mapKeys(keys)) } - - var nerr nonFatal for _, k := range keys { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) b = appendVarint(b, uint64(siz)) b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { + if err != nil { return b, err } b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + if err != nil && err != ErrNil { // allow nil value in map return b, err } } - return b, nerr.E + return b, nil } } @@ -2408,7 +2316,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { // the last time this function was called. ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) n += ei.sizer(p, ei.tagsize) } mu.Unlock() @@ -2425,7 +2333,6 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de defer mu.Unlock() var err error - var nerr nonFatal // Fast-path for common cases: zero or one extensions. // Don't bother sorting the keys. @@ -2443,13 +2350,13 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { + if err != nil { return b, err } } - return b, nerr.E + return b, nil } // Sort the keys to provide a deterministic encoding. @@ -2474,13 +2381,13 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { + if err != nil { return b, err } } - return b, nerr.E + return b, nil } // message set format is: @@ -2519,7 +2426,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) n += ei.sizer(p, 1) // message, tag = 3 (size=1) } mu.Unlock() @@ -2537,7 +2444,6 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de defer mu.Unlock() var err error - var nerr nonFatal // Fast-path for common cases: zero or one extensions. // Don't bother sorting the keys. @@ -2562,14 +2468,14 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { + if err != nil { return b, err } b = append(b, 1<<3|WireEndGroup) } - return b, nerr.E + return b, nil } // Sort the keys to provide a deterministic encoding. @@ -2600,14 +2506,14 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { + if err != nil { return b, err } } - return b, nerr.E + return b, nil } // sizeV1Extensions computes the size of encoded data for a V1-API extension field. @@ -2630,7 +2536,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) n += ei.sizer(p, ei.tagsize) } return n @@ -2650,7 +2556,6 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ sort.Ints(keys) var err error - var nerr nonFatal for _, k := range keys { e := m[int32(k)] if e.value == nil || e.desc == nil { @@ -2665,13 +2570,13 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { + if err != nil { return b, err } } - return b, nerr.E + return b, nil } // newMarshaler is the interface representing objects that can marshal themselves. diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go index acee2fc52964..55f0340a3fde 100644 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -97,8 +97,6 @@ type unmarshalFieldInfo struct { // if a required field, contains a single set bit at this field's index in the required field list. reqMask uint64 - - name string // name of the field, for error reporting } var ( @@ -136,10 +134,10 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { u.computeUnmarshalInfo() } if u.isMessageSet { - return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) + return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } - var reqMask uint64 // bitmask of required fields we've seen. - var errLater error + var reqMask uint64 // bitmask of required fields we've seen. + var rnse *RequiredNotSetError // an instance of a RequiredNotSetError returned by a submessage. for len(b) > 0 { // Read tag and wire type. // Special case 1 and 2 byte varints. @@ -178,20 +176,11 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { if r, ok := err.(*RequiredNotSetError); ok { // Remember this error, but keep parsing. We need to produce // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } + rnse = r reqMask |= f.reqMask continue } if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } return err } // Fragments with bad wire type are treated as unknown fields. @@ -250,16 +239,20 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { emap[int32(tag)] = e } } - if reqMask != u.reqMask && errLater == nil { + if rnse != nil { + // A required field of a submessage/group is missing. Return that error. + return rnse + } + if reqMask != u.reqMask { // A required field of this message is missing. for _, n := range u.reqFields { if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} + return &RequiredNotSetError{n} } reqMask >>= 1 } } - return errLater + return nil } // computeUnmarshalInfo fills in u with information for use @@ -358,52 +351,43 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) + u.setTag(tag, toField(&f), unmarshal, reqMask) } // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1] + tag, err := strconv.Atoi(tagstr) + if err != nil { + panic("protobuf tag field not an integer: " + tagstr) + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(tag, of.field, unmarshal, 0) + } } } - } // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") if fn.IsValid() { if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { panic("a message with extensions, but no extensions field in " + t.Name()) @@ -417,7 +401,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") + }, 0) // Set mask for required field check. u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? for len(u.dense) <= tag { @@ -459,17 +442,11 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { tagArray := strings.Split(tags, ",") encoding := tagArray[0] name := "unknown" - proto3 := false - validateUTF8 := true for _, tag := range tagArray[3:] { if strings.HasPrefix(tag, "name=") { name = tag[5:] } - if tag == "proto3" { - proto3 = true - } } - validateUTF8 = validateUTF8 && proto3 // Figure out packaging (pointer, slice, or both) slice := false @@ -617,15 +594,6 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { } return unmarshalBytesValue case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } if pointer { return unmarshalStringPtr } @@ -1480,6 +1448,9 @@ func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { return nil, io.ErrUnexpectedEOF } v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } *f.toString() = v return b[x:], nil } @@ -1497,49 +1468,14 @@ func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { return nil, io.ErrUnexpectedEOF } v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 + return nil, errInvalidUTF8 } + *f.toStringPtr() = &v return b[x:], nil } -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { if w != WireBytes { return b, errInternalBadWireType } @@ -1552,31 +1488,11 @@ func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { return nil, io.ErrUnexpectedEOF } v := string(b[:x]) - *f.toStringPtr() = &v if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF + return nil, errInvalidUTF8 } - v := string(b[:x]) s := f.toStringSlice() *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } return b[x:], nil } @@ -1758,7 +1674,6 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { // Maps will be somewhat slow. Oh well. // Read key and value from data. - var nerr nonFatal k := reflect.New(kt) v := reflect.New(vt) for len(b) > 0 { @@ -1779,7 +1694,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { err = errInternalBadWireType // skip unknown tag } - if nerr.Merge(err) { + if err == nil { continue } if err != errInternalBadWireType { @@ -1802,7 +1717,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { // Insert into map. m.SetMapIndex(k.Elem(), v.Elem()) - return r, nerr.E + return r, nil } } @@ -1828,16 +1743,15 @@ func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshal // Unmarshal data into holder. // We unmarshal into the first field of the holder object. var err error - var nerr nonFatal b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { + if err != nil { return nil, err } // Write pointer to holder into target field. f.asPointerTo(ityp).Elem().Set(v) - return b, nerr.E + return b, nil } } @@ -1950,7 +1864,7 @@ func encodeVarint(b []byte, x uint64) []byte { // If there is an error, it returns 0,0. func decodeVarint(b []byte) (uint64, int) { var x, y uint64 - if len(b) == 0 { + if len(b) <= 0 { goto bad } x = uint64(b[0]) diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 1aaee725b45b..2205fdaadf84 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -353,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -370,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { + if err := tm.writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index bb55a3af2769..0685bae36d50 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -630,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { if err := p.consumeToken(":"); err != nil { return err } - if err := p.readAny(key, props.MapKeyProp); err != nil { + if err := p.readAny(key, props.mkeyprop); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { return err } - if err := p.readAny(val, props.MapValProp); err != nil { + if err := p.readAny(val, props.mvalprop); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f136b..000000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f6bcd0..000000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc68268d900..000000000000 --- a/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index 9d92c11f16f5..000000000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9d3092..000000000000 --- a/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9af8ce..000000000000 --- a/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod deleted file mode 100644 index fc84cd79d4c7..000000000000 --- a/vendor/github.com/google/uuid/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index b17461631511..000000000000 --- a/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) - h.Write(data) - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 7f9e0c6c0e38..000000000000 --- a/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err == nil { - *uuid = id - } - return err -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b0619f..000000000000 --- a/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index 24b78edc9071..000000000000 --- a/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcddbd6e8..000000000000 --- a/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index f326b54db37a..000000000000 --- a/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go deleted file mode 100644 index e6ef06cdc87a..000000000000 --- a/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c737806e..000000000000 --- a/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index 524404cc5227..000000000000 --- a/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 199a1ac65403..000000000000 --- a/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - copy(uuid[10:], nodeID[:]) - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index 9ad1abad02d3..000000000000 --- a/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - return NewRandomFromReader(rander) -} - -func NewRandomFromReader(r io.Reader) (UUID, error) { - var uuid UUID - _, err := io.ReadFull(r, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} - diff --git a/vendor/github.com/hashicorp/hil/LICENSE b/vendor/github.com/hashicorp/hil/LICENSE deleted file mode 100644 index 82b4de97c7e3..000000000000 --- a/vendor/github.com/hashicorp/hil/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hil/README.md b/vendor/github.com/hashicorp/hil/README.md deleted file mode 100644 index 186ed2518c8f..000000000000 --- a/vendor/github.com/hashicorp/hil/README.md +++ /dev/null @@ -1,102 +0,0 @@ -# HIL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil) - -HIL (HashiCorp Interpolation Language) is a lightweight embedded language used -primarily for configuration interpolation. The goal of HIL is to make a simple -language for interpolations in the various configurations of HashiCorp tools. - -HIL is built to interpolate any string, but is in use by HashiCorp primarily -with [HCL](https://github.com/hashicorp/hcl). HCL is _not required_ in any -way for use with HIL. - -HIL isn't meant to be a general purpose language. It was built for basic -configuration interpolations. Therefore, you can't currently write functions, -have conditionals, set intermediary variables, etc. within HIL itself. It is -possible some of these may be added later but the right use case must exist. - -## Why? - -Many of our tools have support for something similar to templates, but -within the configuration itself. The most prominent requirement was in -[Terraform](https://github.com/hashicorp/terraform) where we wanted the -configuration to be able to reference values from elsewhere in the -configuration. Example: - - foo = "hi ${var.world}" - -We originally used a full templating language for this, but found it -was too heavy weight. Additionally, many full languages required bindings -to C (and thus the usage of cgo) which we try to avoid to make cross-compilation -easier. We then moved to very basic regular expression based -string replacement, but found the need for basic arithmetic and function -calls resulting in overly complex regular expressions. - -Ultimately, we wrote our own mini-language within Terraform itself. As -we built other projects such as [Nomad](https://nomadproject.io) and -[Otto](https://ottoproject.io), the need for basic interpolations arose -again. - -Thus HIL was born. It is extracted from Terraform, cleaned up, and -better tested for general purpose use. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammer is listed here. - -Code begins within `${` and `}`. Outside of this, text is treated -literally. For example, `foo` is a valid HIL program that is just the -string "foo", but `foo ${bar}` is an HIL program that is the string "foo " -concatened with the value of `bar`. For the remainder of the syntax -docs, we'll assume you're within `${}`. - - * Identifiers are any text in the format of `[a-zA-Z0-9-.]`. Example - identifiers: `foo`, `var.foo`, `foo-bar`. - - * Strings are double quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Numbers are assumed to be base 10. If you prefix a number with 0x, - it is treated as a hexadecimal. If it is prefixed with 0, it is - treated as an octal. Numbers can be in scientific notation: "1e10". - - * Unary `-` can be used for negative numbers. Example: `-10` or `-0.2` - - * Boolean values: `true`, `false` - - * The following arithmetic operations are allowed: +, -, *, /, %. - - * Function calls are in the form of `name(arg1, arg2, ...)`. Example: - `add(1, 5)`. Arguments can be any valid HIL expression, example: - `add(1, var.foo)` or even nested function calls: - `add(1, get("some value"))`. - - * Within strings, further interpolations can be opened with `${}`. - Example: `"Hello ${nested}"`. A full example including the - original `${}` (remember this list assumes were inside of one - already) could be: `foo ${func("hello ${var.foo}")}`. - -## Language Changes - -We've used this mini-language in Terraform for years. For backwards compatibility -reasons, we're unlikely to make an incompatible change to the language but -we're not currently making that promise, either. - -The internal API of this project may very well change as we evolve it -to work with more of our projects. We recommend using some sort of dependency -management solution with this package. - -## Future Changes - -The following changes are already planned to be made at some point: - - * Richer types: lists, maps, etc. - - * Convert to a more standard Go parser structure similar to HCL. This - will improve our error messaging as well as allow us to have automatic - formatting. - - * Allow interpolations to result in more types than just a string. While - within the interpolation basic types are honored, the result is always - a string. diff --git a/vendor/github.com/hashicorp/hil/appveyor.yml b/vendor/github.com/hashicorp/hil/appveyor.yml deleted file mode 100644 index feaf7a34e226..000000000000 --- a/vendor/github.com/hashicorp/hil/appveyor.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: "build-{branch}-{build}" -image: Visual Studio 2015 -clone_folder: c:\gopath\src\github.com\hashicorp\hil -environment: - GOPATH: c:\gopath -init: - - git config --global core.autocrlf true -install: -- cmd: >- - echo %Path% - - go version - - go env - - go get -d -v -t ./... -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic.go b/vendor/github.com/hashicorp/hil/ast/arithmetic.go deleted file mode 100644 index 94dc24f89f0d..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/arithmetic.go +++ /dev/null @@ -1,43 +0,0 @@ -package ast - -import ( - "bytes" - "fmt" -) - -// Arithmetic represents a node where the result is arithmetic of -// two or more operands in the order given. -type Arithmetic struct { - Op ArithmeticOp - Exprs []Node - Posx Pos -} - -func (n *Arithmetic) Accept(v Visitor) Node { - for i, expr := range n.Exprs { - n.Exprs[i] = expr.Accept(v) - } - - return v(n) -} - -func (n *Arithmetic) Pos() Pos { - return n.Posx -} - -func (n *Arithmetic) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *Arithmetic) String() string { - var b bytes.Buffer - for _, expr := range n.Exprs { - b.WriteString(fmt.Sprintf("%s", expr)) - } - - return b.String() -} - -func (n *Arithmetic) Type(Scope) (Type, error) { - return TypeInt, nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go deleted file mode 100644 index 18880c604738..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go +++ /dev/null @@ -1,24 +0,0 @@ -package ast - -// ArithmeticOp is the operation to use for the math. -type ArithmeticOp int - -const ( - ArithmeticOpInvalid ArithmeticOp = 0 - - ArithmeticOpAdd ArithmeticOp = iota - ArithmeticOpSub - ArithmeticOpMul - ArithmeticOpDiv - ArithmeticOpMod - - ArithmeticOpLogicalAnd - ArithmeticOpLogicalOr - - ArithmeticOpEqual - ArithmeticOpNotEqual - ArithmeticOpLessThan - ArithmeticOpLessThanOrEqual - ArithmeticOpGreaterThan - ArithmeticOpGreaterThanOrEqual -) diff --git a/vendor/github.com/hashicorp/hil/ast/ast.go b/vendor/github.com/hashicorp/hil/ast/ast.go deleted file mode 100644 index c6350f8bbae3..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/ast.go +++ /dev/null @@ -1,99 +0,0 @@ -package ast - -import ( - "fmt" -) - -// Node is the interface that all AST nodes must implement. -type Node interface { - // Accept is called to dispatch to the visitors. It must return the - // resulting Node (which might be different in an AST transform). - Accept(Visitor) Node - - // Pos returns the position of this node in some source. - Pos() Pos - - // Type returns the type of this node for the given context. - Type(Scope) (Type, error) -} - -// Pos is the starting position of an AST node -type Pos struct { - Column, Line int // Column/Line number, starting at 1 - Filename string // Optional source filename, if known -} - -func (p Pos) String() string { - if p.Filename == "" { - return fmt.Sprintf("%d:%d", p.Line, p.Column) - } else { - return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column) - } -} - -// InitPos is an initiaial position value. This should be used as -// the starting position (presets the column and line to 1). -var InitPos = Pos{Column: 1, Line: 1} - -// Visitors are just implementations of this function. -// -// The function must return the Node to replace this node with. "nil" is -// _not_ a valid return value. If there is no replacement, the original node -// should be returned. We build this replacement directly into the visitor -// pattern since AST transformations are a common and useful tool and -// building it into the AST itself makes it required for future Node -// implementations and very easy to do. -// -// Note that this isn't a true implementation of the visitor pattern, which -// generally requires proper type dispatch on the function. However, -// implementing this basic visitor pattern style is still very useful even -// if you have to type switch. -type Visitor func(Node) Node - -//go:generate stringer -type=Type - -// Type is the type of any value. -type Type uint32 - -const ( - TypeInvalid Type = 0 - TypeAny Type = 1 << iota - TypeBool - TypeString - TypeInt - TypeFloat - TypeList - TypeMap - - // This is a special type used by Terraform to mark "unknown" values. - // It is impossible for this type to be introduced into your HIL programs - // unless you explicitly set a variable to this value. In that case, - // any operation including the variable will return "TypeUnknown" as the - // type. - TypeUnknown -) - -func (t Type) Printable() string { - switch t { - case TypeInvalid: - return "invalid type" - case TypeAny: - return "any type" - case TypeBool: - return "type bool" - case TypeString: - return "type string" - case TypeInt: - return "type int" - case TypeFloat: - return "type float" - case TypeList: - return "type list" - case TypeMap: - return "type map" - case TypeUnknown: - return "type unknown" - default: - return "unknown type" - } -} diff --git a/vendor/github.com/hashicorp/hil/ast/call.go b/vendor/github.com/hashicorp/hil/ast/call.go deleted file mode 100644 index 0557011022fc..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/call.go +++ /dev/null @@ -1,47 +0,0 @@ -package ast - -import ( - "fmt" - "strings" -) - -// Call represents a function call. -type Call struct { - Func string - Args []Node - Posx Pos -} - -func (n *Call) Accept(v Visitor) Node { - for i, a := range n.Args { - n.Args[i] = a.Accept(v) - } - - return v(n) -} - -func (n *Call) Pos() Pos { - return n.Posx -} - -func (n *Call) String() string { - args := make([]string, len(n.Args)) - for i, arg := range n.Args { - args[i] = fmt.Sprintf("%s", arg) - } - - return fmt.Sprintf("Call(%s, %s)", n.Func, strings.Join(args, ", ")) -} - -func (n *Call) Type(s Scope) (Type, error) { - f, ok := s.LookupFunc(n.Func) - if !ok { - return TypeInvalid, fmt.Errorf("unknown function: %s", n.Func) - } - - return f.ReturnType, nil -} - -func (n *Call) GoString() string { - return fmt.Sprintf("*%#v", *n) -} diff --git a/vendor/github.com/hashicorp/hil/ast/conditional.go b/vendor/github.com/hashicorp/hil/ast/conditional.go deleted file mode 100644 index be48f89d46fc..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/conditional.go +++ /dev/null @@ -1,36 +0,0 @@ -package ast - -import ( - "fmt" -) - -type Conditional struct { - CondExpr Node - TrueExpr Node - FalseExpr Node - Posx Pos -} - -// Accept passes the given visitor to the child nodes in this order: -// CondExpr, TrueExpr, FalseExpr. It then finally passes itself to the visitor. -func (n *Conditional) Accept(v Visitor) Node { - n.CondExpr = n.CondExpr.Accept(v) - n.TrueExpr = n.TrueExpr.Accept(v) - n.FalseExpr = n.FalseExpr.Accept(v) - - return v(n) -} - -func (n *Conditional) Pos() Pos { - return n.Posx -} - -func (n *Conditional) Type(Scope) (Type, error) { - // This is not actually a useful value; the type checker ignores - // this function when analyzing conditionals, just as with Arithmetic. - return TypeInt, nil -} - -func (n *Conditional) GoString() string { - return fmt.Sprintf("*%#v", *n) -} diff --git a/vendor/github.com/hashicorp/hil/ast/index.go b/vendor/github.com/hashicorp/hil/ast/index.go deleted file mode 100644 index 860c25fd24d0..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/index.go +++ /dev/null @@ -1,76 +0,0 @@ -package ast - -import ( - "fmt" - "strings" -) - -// Index represents an indexing operation into another data structure -type Index struct { - Target Node - Key Node - Posx Pos -} - -func (n *Index) Accept(v Visitor) Node { - n.Target = n.Target.Accept(v) - n.Key = n.Key.Accept(v) - return v(n) -} - -func (n *Index) Pos() Pos { - return n.Posx -} - -func (n *Index) String() string { - return fmt.Sprintf("Index(%s, %s)", n.Target, n.Key) -} - -func (n *Index) Type(s Scope) (Type, error) { - variableAccess, ok := n.Target.(*VariableAccess) - if !ok { - return TypeInvalid, fmt.Errorf("target is not a variable") - } - - variable, ok := s.LookupVar(variableAccess.Name) - if !ok { - return TypeInvalid, fmt.Errorf("unknown variable accessed: %s", variableAccess.Name) - } - - switch variable.Type { - case TypeList: - return n.typeList(variable, variableAccess.Name) - case TypeMap: - return n.typeMap(variable, variableAccess.Name) - default: - return TypeInvalid, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) - } -} - -func (n *Index) typeList(variable Variable, variableName string) (Type, error) { - // We assume type checking has already determined that this is a list - list := variable.Value.([]Variable) - - return VariableListElementTypesAreHomogenous(variableName, list) -} - -func (n *Index) typeMap(variable Variable, variableName string) (Type, error) { - // We assume type checking has already determined that this is a map - vmap := variable.Value.(map[string]Variable) - - return VariableMapValueTypesAreHomogenous(variableName, vmap) -} - -func reportTypes(typesFound map[Type]struct{}) string { - stringTypes := make([]string, len(typesFound)) - i := 0 - for k, _ := range typesFound { - stringTypes[0] = k.String() - i++ - } - return strings.Join(stringTypes, ", ") -} - -func (n *Index) GoString() string { - return fmt.Sprintf("*%#v", *n) -} diff --git a/vendor/github.com/hashicorp/hil/ast/literal.go b/vendor/github.com/hashicorp/hil/ast/literal.go deleted file mode 100644 index da6014fee2ba..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/literal.go +++ /dev/null @@ -1,88 +0,0 @@ -package ast - -import ( - "fmt" - "reflect" -) - -// LiteralNode represents a single literal value, such as "foo" or -// 42 or 3.14159. Based on the Type, the Value can be safely cast. -type LiteralNode struct { - Value interface{} - Typex Type - Posx Pos -} - -// NewLiteralNode returns a new literal node representing the given -// literal Go value, which must correspond to one of the primitive types -// supported by HIL. Lists and maps cannot currently be constructed via -// this function. -// -// If an inappropriately-typed value is provided, this function will -// return an error. The main intended use of this function is to produce -// "synthetic" literals from constants in code, where the value type is -// well known at compile time. To easily store these in global variables, -// see also MustNewLiteralNode. -func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) { - goType := reflect.TypeOf(value) - var hilType Type - - switch goType.Kind() { - case reflect.Bool: - hilType = TypeBool - case reflect.Int: - hilType = TypeInt - case reflect.Float64: - hilType = TypeFloat - case reflect.String: - hilType = TypeString - default: - return nil, fmt.Errorf("unsupported literal node type: %T", value) - } - - return &LiteralNode{ - Value: value, - Typex: hilType, - Posx: pos, - }, nil -} - -// MustNewLiteralNode wraps NewLiteralNode and panics if an error is -// returned, thus allowing valid literal nodes to be easily assigned to -// global variables. -func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode { - node, err := NewLiteralNode(value, pos) - if err != nil { - panic(err) - } - return node -} - -func (n *LiteralNode) Accept(v Visitor) Node { - return v(n) -} - -func (n *LiteralNode) Pos() Pos { - return n.Posx -} - -func (n *LiteralNode) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *LiteralNode) String() string { - return fmt.Sprintf("Literal(%s, %v)", n.Typex, n.Value) -} - -func (n *LiteralNode) Type(Scope) (Type, error) { - return n.Typex, nil -} - -// IsUnknown returns true either if the node's value is itself unknown -// of if it is a collection containing any unknown elements, deeply. -func (n *LiteralNode) IsUnknown() bool { - return IsUnknown(Variable{ - Type: n.Typex, - Value: n.Value, - }) -} diff --git a/vendor/github.com/hashicorp/hil/ast/output.go b/vendor/github.com/hashicorp/hil/ast/output.go deleted file mode 100644 index 1e27f970b33b..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/output.go +++ /dev/null @@ -1,78 +0,0 @@ -package ast - -import ( - "bytes" - "fmt" -) - -// Output represents the root node of all interpolation evaluations. If the -// output only has one expression which is either a TypeList or TypeMap, the -// Output can be type-asserted to []interface{} or map[string]interface{} -// respectively. Otherwise the Output evaluates as a string, and concatenates -// the evaluation of each expression. -type Output struct { - Exprs []Node - Posx Pos -} - -func (n *Output) Accept(v Visitor) Node { - for i, expr := range n.Exprs { - n.Exprs[i] = expr.Accept(v) - } - - return v(n) -} - -func (n *Output) Pos() Pos { - return n.Posx -} - -func (n *Output) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *Output) String() string { - var b bytes.Buffer - for _, expr := range n.Exprs { - b.WriteString(fmt.Sprintf("%s", expr)) - } - - return b.String() -} - -func (n *Output) Type(s Scope) (Type, error) { - // Special case no expressions for backward compatibility - if len(n.Exprs) == 0 { - return TypeString, nil - } - - // Special case a single expression of types list or map - if len(n.Exprs) == 1 { - exprType, err := n.Exprs[0].Type(s) - if err != nil { - return TypeInvalid, err - } - switch exprType { - case TypeList: - return TypeList, nil - case TypeMap: - return TypeMap, nil - } - } - - // Otherwise ensure all our expressions are strings - for index, expr := range n.Exprs { - exprType, err := expr.Type(s) - if err != nil { - return TypeInvalid, err - } - // We only look for things we know we can't coerce with an implicit conversion func - if exprType == TypeList || exprType == TypeMap { - return TypeInvalid, fmt.Errorf( - "multi-expression HIL outputs may only have string inputs: %d is type %s", - index, exprType) - } - } - - return TypeString, nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/scope.go b/vendor/github.com/hashicorp/hil/ast/scope.go deleted file mode 100644 index 7a975d99930e..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/scope.go +++ /dev/null @@ -1,90 +0,0 @@ -package ast - -import ( - "fmt" - "reflect" -) - -// Scope is the interface used to look up variables and functions while -// evaluating. How these functions/variables are defined are up to the caller. -type Scope interface { - LookupFunc(string) (Function, bool) - LookupVar(string) (Variable, bool) -} - -// Variable is a variable value for execution given as input to the engine. -// It records the value of a variables along with their type. -type Variable struct { - Value interface{} - Type Type -} - -// NewVariable creates a new Variable for the given value. This will -// attempt to infer the correct type. If it can't, an error will be returned. -func NewVariable(v interface{}) (result Variable, err error) { - switch v := reflect.ValueOf(v); v.Kind() { - case reflect.String: - result.Type = TypeString - default: - err = fmt.Errorf("Unknown type: %s", v.Kind()) - } - - result.Value = v - return -} - -// String implements Stringer on Variable, displaying the type and value -// of the Variable. -func (v Variable) String() string { - return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value) -} - -// Function defines a function that can be executed by the engine. -// The type checker will validate that the proper types will be called -// to the callback. -type Function struct { - // ArgTypes is the list of types in argument order. These are the - // required arguments. - // - // ReturnType is the type of the returned value. The Callback MUST - // return this type. - ArgTypes []Type - ReturnType Type - - // Variadic, if true, says that this function is variadic, meaning - // it takes a variable number of arguments. In this case, the - // VariadicType must be set. - Variadic bool - VariadicType Type - - // Callback is the function called for a function. The argument - // types are guaranteed to match the spec above by the type checker. - // The length of the args is strictly == len(ArgTypes) unless Varidiac - // is true, in which case its >= len(ArgTypes). - Callback func([]interface{}) (interface{}, error) -} - -// BasicScope is a simple scope that looks up variables and functions -// using a map. -type BasicScope struct { - FuncMap map[string]Function - VarMap map[string]Variable -} - -func (s *BasicScope) LookupFunc(n string) (Function, bool) { - if s == nil { - return Function{}, false - } - - v, ok := s.FuncMap[n] - return v, ok -} - -func (s *BasicScope) LookupVar(n string) (Variable, bool) { - if s == nil { - return Variable{}, false - } - - v, ok := s.VarMap[n] - return v, ok -} diff --git a/vendor/github.com/hashicorp/hil/ast/stack.go b/vendor/github.com/hashicorp/hil/ast/stack.go deleted file mode 100644 index bd2bc157862a..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/stack.go +++ /dev/null @@ -1,25 +0,0 @@ -package ast - -// Stack is a stack of Node. -type Stack struct { - stack []Node -} - -func (s *Stack) Len() int { - return len(s.stack) -} - -func (s *Stack) Push(n Node) { - s.stack = append(s.stack, n) -} - -func (s *Stack) Pop() Node { - x := s.stack[len(s.stack)-1] - s.stack[len(s.stack)-1] = nil - s.stack = s.stack[:len(s.stack)-1] - return x -} - -func (s *Stack) Reset() { - s.stack = nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/type_string.go b/vendor/github.com/hashicorp/hil/ast/type_string.go deleted file mode 100644 index 1f51a98dd549..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/type_string.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by "stringer -type=Type"; DO NOT EDIT - -package ast - -import "fmt" - -const ( - _Type_name_0 = "TypeInvalid" - _Type_name_1 = "TypeAny" - _Type_name_2 = "TypeBool" - _Type_name_3 = "TypeString" - _Type_name_4 = "TypeInt" - _Type_name_5 = "TypeFloat" - _Type_name_6 = "TypeList" - _Type_name_7 = "TypeMap" - _Type_name_8 = "TypeUnknown" -) - -var ( - _Type_index_0 = [...]uint8{0, 11} - _Type_index_1 = [...]uint8{0, 7} - _Type_index_2 = [...]uint8{0, 8} - _Type_index_3 = [...]uint8{0, 10} - _Type_index_4 = [...]uint8{0, 7} - _Type_index_5 = [...]uint8{0, 9} - _Type_index_6 = [...]uint8{0, 8} - _Type_index_7 = [...]uint8{0, 7} - _Type_index_8 = [...]uint8{0, 11} -) - -func (i Type) String() string { - switch { - case i == 0: - return _Type_name_0 - case i == 2: - return _Type_name_1 - case i == 4: - return _Type_name_2 - case i == 8: - return _Type_name_3 - case i == 16: - return _Type_name_4 - case i == 32: - return _Type_name_5 - case i == 64: - return _Type_name_6 - case i == 128: - return _Type_name_7 - case i == 256: - return _Type_name_8 - default: - return fmt.Sprintf("Type(%d)", i) - } -} diff --git a/vendor/github.com/hashicorp/hil/ast/unknown.go b/vendor/github.com/hashicorp/hil/ast/unknown.go deleted file mode 100644 index d6ddaecc78e7..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/unknown.go +++ /dev/null @@ -1,30 +0,0 @@ -package ast - -// IsUnknown reports whether a variable is unknown or contains any value -// that is unknown. This will recurse into lists and maps and so on. -func IsUnknown(v Variable) bool { - // If it is unknown itself, return true - if v.Type == TypeUnknown { - return true - } - - // If it is a container type, check the values - switch v.Type { - case TypeList: - for _, el := range v.Value.([]Variable) { - if IsUnknown(el) { - return true - } - } - case TypeMap: - for _, el := range v.Value.(map[string]Variable) { - if IsUnknown(el) { - return true - } - } - default: - } - - // Not a container type or survive the above checks - return false -} diff --git a/vendor/github.com/hashicorp/hil/ast/variable_access.go b/vendor/github.com/hashicorp/hil/ast/variable_access.go deleted file mode 100644 index 4c1362d75315..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/variable_access.go +++ /dev/null @@ -1,36 +0,0 @@ -package ast - -import ( - "fmt" -) - -// VariableAccess represents a variable access. -type VariableAccess struct { - Name string - Posx Pos -} - -func (n *VariableAccess) Accept(v Visitor) Node { - return v(n) -} - -func (n *VariableAccess) Pos() Pos { - return n.Posx -} - -func (n *VariableAccess) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -func (n *VariableAccess) String() string { - return fmt.Sprintf("Variable(%s)", n.Name) -} - -func (n *VariableAccess) Type(s Scope) (Type, error) { - v, ok := s.LookupVar(n.Name) - if !ok { - return TypeInvalid, fmt.Errorf("unknown variable: %s", n.Name) - } - - return v.Type, nil -} diff --git a/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/vendor/github.com/hashicorp/hil/ast/variables_helper.go deleted file mode 100644 index 06bd18de2ac2..000000000000 --- a/vendor/github.com/hashicorp/hil/ast/variables_helper.go +++ /dev/null @@ -1,63 +0,0 @@ -package ast - -import "fmt" - -func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) { - if len(list) == 0 { - return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName) - } - - elemType := TypeUnknown - for _, v := range list { - if v.Type == TypeUnknown { - continue - } - - if elemType == TypeUnknown { - elemType = v.Type - continue - } - - if v.Type != elemType { - return TypeInvalid, fmt.Errorf( - "list %q does not have homogenous types. found %s and then %s", - variableName, - elemType, v.Type, - ) - } - - elemType = v.Type - } - - return elemType, nil -} - -func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) { - if len(vmap) == 0 { - return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName) - } - - elemType := TypeUnknown - for _, v := range vmap { - if v.Type == TypeUnknown { - continue - } - - if elemType == TypeUnknown { - elemType = v.Type - continue - } - - if v.Type != elemType { - return TypeInvalid, fmt.Errorf( - "map %q does not have homogenous types. found %s and then %s", - variableName, - elemType, v.Type, - ) - } - - elemType = v.Type - } - - return elemType, nil -} diff --git a/vendor/github.com/hashicorp/hil/builtins.go b/vendor/github.com/hashicorp/hil/builtins.go deleted file mode 100644 index 909c788a2c78..000000000000 --- a/vendor/github.com/hashicorp/hil/builtins.go +++ /dev/null @@ -1,331 +0,0 @@ -package hil - -import ( - "errors" - "strconv" - - "github.com/hashicorp/hil/ast" -) - -// NOTE: All builtins are tested in engine_test.go - -func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope { - if scope == nil { - scope = new(ast.BasicScope) - } - if scope.FuncMap == nil { - scope.FuncMap = make(map[string]ast.Function) - } - - // Implicit conversions - scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString() - scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt() - scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString() - scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat() - scope.FuncMap["__builtin_IntToString"] = builtinIntToString() - scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt() - scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat() - scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool() - - // Math operations - scope.FuncMap["__builtin_IntMath"] = builtinIntMath() - scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath() - scope.FuncMap["__builtin_BoolCompare"] = builtinBoolCompare() - scope.FuncMap["__builtin_FloatCompare"] = builtinFloatCompare() - scope.FuncMap["__builtin_IntCompare"] = builtinIntCompare() - scope.FuncMap["__builtin_StringCompare"] = builtinStringCompare() - scope.FuncMap["__builtin_Logical"] = builtinLogical() - return scope -} - -func builtinFloatMath() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - Variadic: true, - VariadicType: ast.TypeFloat, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - result := args[1].(float64) - for _, raw := range args[2:] { - arg := raw.(float64) - switch op { - case ast.ArithmeticOpAdd: - result += arg - case ast.ArithmeticOpSub: - result -= arg - case ast.ArithmeticOpMul: - result *= arg - case ast.ArithmeticOpDiv: - result /= arg - } - } - - return result, nil - }, - } -} - -func builtinIntMath() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - Variadic: true, - VariadicType: ast.TypeInt, - ReturnType: ast.TypeInt, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - result := args[1].(int) - for _, raw := range args[2:] { - arg := raw.(int) - switch op { - case ast.ArithmeticOpAdd: - result += arg - case ast.ArithmeticOpSub: - result -= arg - case ast.ArithmeticOpMul: - result *= arg - case ast.ArithmeticOpDiv: - if arg == 0 { - return nil, errors.New("divide by zero") - } - - result /= arg - case ast.ArithmeticOpMod: - if arg == 0 { - return nil, errors.New("divide by zero") - } - - result = result % arg - } - } - - return result, nil - }, - } -} - -func builtinBoolCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeBool, ast.TypeBool}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(bool) - rhs := args[2].(bool) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinFloatCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeFloat, ast.TypeFloat}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(float64) - rhs := args[2].(float64) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - case ast.ArithmeticOpLessThan: - return lhs < rhs, nil - case ast.ArithmeticOpLessThanOrEqual: - return lhs <= rhs, nil - case ast.ArithmeticOpGreaterThan: - return lhs > rhs, nil - case ast.ArithmeticOpGreaterThanOrEqual: - return lhs >= rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinIntCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeInt, ast.TypeInt}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(int) - rhs := args[2].(int) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - case ast.ArithmeticOpLessThan: - return lhs < rhs, nil - case ast.ArithmeticOpLessThanOrEqual: - return lhs <= rhs, nil - case ast.ArithmeticOpGreaterThan: - return lhs > rhs, nil - case ast.ArithmeticOpGreaterThanOrEqual: - return lhs >= rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinStringCompare() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString, ast.TypeString}, - Variadic: false, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - lhs := args[1].(string) - rhs := args[2].(string) - - switch op { - case ast.ArithmeticOpEqual: - return lhs == rhs, nil - case ast.ArithmeticOpNotEqual: - return lhs != rhs, nil - default: - return nil, errors.New("invalid comparison operation") - } - }, - } -} - -func builtinLogical() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - Variadic: true, - VariadicType: ast.TypeBool, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - op := args[0].(ast.ArithmeticOp) - result := args[1].(bool) - for _, raw := range args[2:] { - arg := raw.(bool) - switch op { - case ast.ArithmeticOpLogicalOr: - result = result || arg - case ast.ArithmeticOpLogicalAnd: - result = result && arg - default: - return nil, errors.New("invalid logical operator") - } - } - - return result, nil - }, - } -} - -func builtinFloatToInt() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat}, - ReturnType: ast.TypeInt, - Callback: func(args []interface{}) (interface{}, error) { - return int(args[0].(float64)), nil - }, - } -} - -func builtinFloatToString() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeFloat}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return strconv.FormatFloat( - args[0].(float64), 'g', -1, 64), nil - }, - } -} - -func builtinIntToFloat() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - return float64(args[0].(int)), nil - }, - } -} - -func builtinIntToString() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return strconv.FormatInt(int64(args[0].(int)), 10), nil - }, - } -} - -func builtinStringToInt() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeInt}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - v, err := strconv.ParseInt(args[0].(string), 0, 0) - if err != nil { - return nil, err - } - - return int(v), nil - }, - } -} - -func builtinStringToFloat() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeFloat, - Callback: func(args []interface{}) (interface{}, error) { - v, err := strconv.ParseFloat(args[0].(string), 64) - if err != nil { - return nil, err - } - - return v, nil - }, - } -} - -func builtinBoolToString() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeBool}, - ReturnType: ast.TypeString, - Callback: func(args []interface{}) (interface{}, error) { - return strconv.FormatBool(args[0].(bool)), nil - }, - } -} - -func builtinStringToBool() ast.Function { - return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeBool, - Callback: func(args []interface{}) (interface{}, error) { - v, err := strconv.ParseBool(args[0].(string)) - if err != nil { - return nil, err - } - - return v, nil - }, - } -} diff --git a/vendor/github.com/hashicorp/hil/check_identifier.go b/vendor/github.com/hashicorp/hil/check_identifier.go deleted file mode 100644 index 474f50588e17..000000000000 --- a/vendor/github.com/hashicorp/hil/check_identifier.go +++ /dev/null @@ -1,88 +0,0 @@ -package hil - -import ( - "fmt" - "sync" - - "github.com/hashicorp/hil/ast" -) - -// IdentifierCheck is a SemanticCheck that checks that all identifiers -// resolve properly and that the right number of arguments are passed -// to functions. -type IdentifierCheck struct { - Scope ast.Scope - - err error - lock sync.Mutex -} - -func (c *IdentifierCheck) Visit(root ast.Node) error { - c.lock.Lock() - defer c.lock.Unlock() - defer c.reset() - root.Accept(c.visit) - return c.err -} - -func (c *IdentifierCheck) visit(raw ast.Node) ast.Node { - if c.err != nil { - return raw - } - - switch n := raw.(type) { - case *ast.Call: - c.visitCall(n) - case *ast.VariableAccess: - c.visitVariableAccess(n) - case *ast.Output: - // Ignore - case *ast.LiteralNode: - // Ignore - default: - // Ignore - } - - // We never do replacement with this visitor - return raw -} - -func (c *IdentifierCheck) visitCall(n *ast.Call) { - // Look up the function in the map - function, ok := c.Scope.LookupFunc(n.Func) - if !ok { - c.createErr(n, fmt.Sprintf("unknown function called: %s", n.Func)) - return - } - - // Break up the args into what is variadic and what is required - args := n.Args - if function.Variadic && len(args) > len(function.ArgTypes) { - args = n.Args[:len(function.ArgTypes)] - } - - // Verify the number of arguments - if len(args) != len(function.ArgTypes) { - c.createErr(n, fmt.Sprintf( - "%s: expected %d arguments, got %d", - n.Func, len(function.ArgTypes), len(n.Args))) - return - } -} - -func (c *IdentifierCheck) visitVariableAccess(n *ast.VariableAccess) { - // Look up the variable in the map - if _, ok := c.Scope.LookupVar(n.Name); !ok { - c.createErr(n, fmt.Sprintf( - "unknown variable accessed: %s", n.Name)) - return - } -} - -func (c *IdentifierCheck) createErr(n ast.Node, str string) { - c.err = fmt.Errorf("%s: %s", n.Pos(), str) -} - -func (c *IdentifierCheck) reset() { - c.err = nil -} diff --git a/vendor/github.com/hashicorp/hil/check_types.go b/vendor/github.com/hashicorp/hil/check_types.go deleted file mode 100644 index f16da391858a..000000000000 --- a/vendor/github.com/hashicorp/hil/check_types.go +++ /dev/null @@ -1,668 +0,0 @@ -package hil - -import ( - "fmt" - "sync" - - "github.com/hashicorp/hil/ast" -) - -// TypeCheck implements ast.Visitor for type checking an AST tree. -// It requires some configuration to look up the type of nodes. -// -// It also optionally will not type error and will insert an implicit -// type conversions for specific types if specified by the Implicit -// field. Note that this is kind of organizationally weird to put into -// this structure but we'd rather do that than duplicate the type checking -// logic multiple times. -type TypeCheck struct { - Scope ast.Scope - - // Implicit is a map of implicit type conversions that we can do, - // and that shouldn't error. The key of the first map is the from type, - // the key of the second map is the to type, and the final string - // value is the function to call (which must be registered in the Scope). - Implicit map[ast.Type]map[ast.Type]string - - // Stack of types. This shouldn't be used directly except by implementations - // of TypeCheckNode. - Stack []ast.Type - - err error - lock sync.Mutex -} - -// TypeCheckNode is the interface that must be implemented by any -// ast.Node that wants to support type-checking. If the type checker -// encounters a node that doesn't implement this, it will error. -type TypeCheckNode interface { - TypeCheck(*TypeCheck) (ast.Node, error) -} - -func (v *TypeCheck) Visit(root ast.Node) error { - v.lock.Lock() - defer v.lock.Unlock() - defer v.reset() - root.Accept(v.visit) - - // If the resulting type is unknown, then just let the whole thing go. - if v.err == errExitUnknown { - v.err = nil - } - - return v.err -} - -func (v *TypeCheck) visit(raw ast.Node) ast.Node { - if v.err != nil { - return raw - } - - var result ast.Node - var err error - switch n := raw.(type) { - case *ast.Arithmetic: - tc := &typeCheckArithmetic{n} - result, err = tc.TypeCheck(v) - case *ast.Call: - tc := &typeCheckCall{n} - result, err = tc.TypeCheck(v) - case *ast.Conditional: - tc := &typeCheckConditional{n} - result, err = tc.TypeCheck(v) - case *ast.Index: - tc := &typeCheckIndex{n} - result, err = tc.TypeCheck(v) - case *ast.Output: - tc := &typeCheckOutput{n} - result, err = tc.TypeCheck(v) - case *ast.LiteralNode: - tc := &typeCheckLiteral{n} - result, err = tc.TypeCheck(v) - case *ast.VariableAccess: - tc := &typeCheckVariableAccess{n} - result, err = tc.TypeCheck(v) - default: - tc, ok := raw.(TypeCheckNode) - if !ok { - err = fmt.Errorf("unknown node for type check: %#v", raw) - break - } - - result, err = tc.TypeCheck(v) - } - - if err != nil { - pos := raw.Pos() - v.err = fmt.Errorf("At column %d, line %d: %s", - pos.Column, pos.Line, err) - } - - return result -} - -type typeCheckArithmetic struct { - n *ast.Arithmetic -} - -func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) { - // The arguments are on the stack in reverse order, so pop them off. - exprs := make([]ast.Type, len(tc.n.Exprs)) - for i, _ := range tc.n.Exprs { - exprs[len(tc.n.Exprs)-1-i] = v.StackPop() - } - - // If any operand is unknown then our result is automatically unknown - for _, ty := range exprs { - if ty == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - } - - switch tc.n.Op { - case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr: - return tc.checkLogical(v, exprs) - case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual, - ast.ArithmeticOpLessThan, ast.ArithmeticOpGreaterThan, - ast.ArithmeticOpGreaterThanOrEqual, ast.ArithmeticOpLessThanOrEqual: - return tc.checkComparison(v, exprs) - default: - return tc.checkNumeric(v, exprs) - } - -} - -func (tc *typeCheckArithmetic) checkNumeric(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { - // Determine the resulting type we want. We do this by going over - // every expression until we find one with a type we recognize. - // We do this because the first expr might be a string ("var.foo") - // and we need to know what to implicit to. - mathFunc := "__builtin_IntMath" - mathType := ast.TypeInt - for _, v := range exprs { - // We assume int math but if we find ANY float, the entire - // expression turns into floating point math. - if v == ast.TypeFloat { - mathFunc = "__builtin_FloatMath" - mathType = v - break - } - } - - // Verify the args - for i, arg := range exprs { - if arg != mathType { - cn := v.ImplicitConversion(exprs[i], mathType, tc.n.Exprs[i]) - if cn != nil { - tc.n.Exprs[i] = cn - continue - } - - return nil, fmt.Errorf( - "operand %d should be %s, got %s", - i+1, mathType, arg) - } - } - - // Modulo doesn't work for floats - if mathType == ast.TypeFloat && tc.n.Op == ast.ArithmeticOpMod { - return nil, fmt.Errorf("modulo cannot be used with floats") - } - - // Return type - v.StackPush(mathType) - - // Replace our node with a call to the proper function. This isn't - // type checked but we already verified types. - args := make([]ast.Node, len(tc.n.Exprs)+1) - args[0] = &ast.LiteralNode{ - Value: tc.n.Op, - Typex: ast.TypeInt, - Posx: tc.n.Pos(), - } - copy(args[1:], tc.n.Exprs) - return &ast.Call{ - Func: mathFunc, - Args: args, - Posx: tc.n.Pos(), - }, nil -} - -func (tc *typeCheckArithmetic) checkComparison(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { - if len(exprs) != 2 { - // This should never happen, because the parser never produces - // nodes that violate this. - return nil, fmt.Errorf( - "comparison operators must have exactly two operands", - ) - } - - // The first operand always dictates the type for a comparison. - compareFunc := "" - compareType := exprs[0] - switch compareType { - case ast.TypeBool: - compareFunc = "__builtin_BoolCompare" - case ast.TypeFloat: - compareFunc = "__builtin_FloatCompare" - case ast.TypeInt: - compareFunc = "__builtin_IntCompare" - case ast.TypeString: - compareFunc = "__builtin_StringCompare" - default: - return nil, fmt.Errorf( - "comparison operators apply only to bool, float, int, and string", - ) - } - - // For non-equality comparisons, we will do implicit conversions to - // integer types if possible. In this case, we need to go through and - // determine the type of comparison we're doing to enable the implicit - // conversion. - if tc.n.Op != ast.ArithmeticOpEqual && tc.n.Op != ast.ArithmeticOpNotEqual { - compareFunc = "__builtin_IntCompare" - compareType = ast.TypeInt - for _, expr := range exprs { - if expr == ast.TypeFloat { - compareFunc = "__builtin_FloatCompare" - compareType = ast.TypeFloat - break - } - } - } - - // Verify (and possibly, convert) the args - for i, arg := range exprs { - if arg != compareType { - cn := v.ImplicitConversion(exprs[i], compareType, tc.n.Exprs[i]) - if cn != nil { - tc.n.Exprs[i] = cn - continue - } - - return nil, fmt.Errorf( - "operand %d should be %s, got %s", - i+1, compareType, arg, - ) - } - } - - // Only ints and floats can have the <, >, <= and >= operators applied - switch tc.n.Op { - case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual: - // anything goes - default: - switch compareType { - case ast.TypeFloat, ast.TypeInt: - // fine - default: - return nil, fmt.Errorf( - "<, >, <= and >= may apply only to int and float values", - ) - } - } - - // Comparison operators always return bool - v.StackPush(ast.TypeBool) - - // Replace our node with a call to the proper function. This isn't - // type checked but we already verified types. - args := make([]ast.Node, len(tc.n.Exprs)+1) - args[0] = &ast.LiteralNode{ - Value: tc.n.Op, - Typex: ast.TypeInt, - Posx: tc.n.Pos(), - } - copy(args[1:], tc.n.Exprs) - return &ast.Call{ - Func: compareFunc, - Args: args, - Posx: tc.n.Pos(), - }, nil -} - -func (tc *typeCheckArithmetic) checkLogical(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { - for i, t := range exprs { - if t != ast.TypeBool { - cn := v.ImplicitConversion(t, ast.TypeBool, tc.n.Exprs[i]) - if cn == nil { - return nil, fmt.Errorf( - "logical operators require boolean operands, not %s", - t, - ) - } - tc.n.Exprs[i] = cn - } - } - - // Return type is always boolean - v.StackPush(ast.TypeBool) - - // Arithmetic nodes are replaced with a call to a built-in function - args := make([]ast.Node, len(tc.n.Exprs)+1) - args[0] = &ast.LiteralNode{ - Value: tc.n.Op, - Typex: ast.TypeInt, - Posx: tc.n.Pos(), - } - copy(args[1:], tc.n.Exprs) - return &ast.Call{ - Func: "__builtin_Logical", - Args: args, - Posx: tc.n.Pos(), - }, nil -} - -type typeCheckCall struct { - n *ast.Call -} - -func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) { - // Look up the function in the map - function, ok := v.Scope.LookupFunc(tc.n.Func) - if !ok { - return nil, fmt.Errorf("unknown function called: %s", tc.n.Func) - } - - // The arguments are on the stack in reverse order, so pop them off. - args := make([]ast.Type, len(tc.n.Args)) - for i, _ := range tc.n.Args { - args[len(tc.n.Args)-1-i] = v.StackPop() - } - - // Verify the args - for i, expected := range function.ArgTypes { - if expected == ast.TypeAny { - continue - } - - if args[i] == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - if args[i] != expected { - cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i]) - if cn != nil { - tc.n.Args[i] = cn - continue - } - - return nil, fmt.Errorf( - "%s: argument %d should be %s, got %s", - tc.n.Func, i+1, expected.Printable(), args[i].Printable()) - } - } - - // If we're variadic, then verify the types there - if function.Variadic && function.VariadicType != ast.TypeAny { - args = args[len(function.ArgTypes):] - for i, t := range args { - if t == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - if t != function.VariadicType { - realI := i + len(function.ArgTypes) - cn := v.ImplicitConversion( - t, function.VariadicType, tc.n.Args[realI]) - if cn != nil { - tc.n.Args[realI] = cn - continue - } - - return nil, fmt.Errorf( - "%s: argument %d should be %s, got %s", - tc.n.Func, realI, - function.VariadicType.Printable(), t.Printable()) - } - } - } - - // Return type - v.StackPush(function.ReturnType) - - return tc.n, nil -} - -type typeCheckConditional struct { - n *ast.Conditional -} - -func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) { - // On the stack we have the types of the condition, true and false - // expressions, but they are in reverse order. - falseType := v.StackPop() - trueType := v.StackPop() - condType := v.StackPop() - - if condType == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - if condType != ast.TypeBool { - cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr) - if cn == nil { - return nil, fmt.Errorf( - "condition must be type bool, not %s", condType.Printable(), - ) - } - tc.n.CondExpr = cn - } - - // The types of the true and false expression must match - if trueType != falseType && trueType != ast.TypeUnknown && falseType != ast.TypeUnknown { - - // Since passing around stringified versions of other types is - // common, we pragmatically allow the false expression to dictate - // the result type when the true expression is a string. - if trueType == ast.TypeString { - cn := v.ImplicitConversion(trueType, falseType, tc.n.TrueExpr) - if cn == nil { - return nil, fmt.Errorf( - "true and false expression types must match; have %s and %s", - trueType.Printable(), falseType.Printable(), - ) - } - tc.n.TrueExpr = cn - trueType = falseType - } else { - cn := v.ImplicitConversion(falseType, trueType, tc.n.FalseExpr) - if cn == nil { - return nil, fmt.Errorf( - "true and false expression types must match; have %s and %s", - trueType.Printable(), falseType.Printable(), - ) - } - tc.n.FalseExpr = cn - falseType = trueType - } - } - - // Currently list and map types cannot be used, because we cannot - // generally assert that their element types are consistent. - // Such support might be added later, either by improving the type - // system or restricting usage to only variable and literal expressions, - // but for now this is simply prohibited because it doesn't seem to - // be a common enough case to be worth the complexity. - switch trueType { - case ast.TypeList: - return nil, fmt.Errorf( - "conditional operator cannot be used with list values", - ) - case ast.TypeMap: - return nil, fmt.Errorf( - "conditional operator cannot be used with map values", - ) - } - - // Result type (guaranteed to also match falseType due to the above) - if trueType == ast.TypeUnknown { - // falseType may also be unknown, but that's okay because two - // unknowns means our result is unknown anyway. - v.StackPush(falseType) - } else { - v.StackPush(trueType) - } - - return tc.n, nil -} - -type typeCheckOutput struct { - n *ast.Output -} - -func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) { - n := tc.n - types := make([]ast.Type, len(n.Exprs)) - for i, _ := range n.Exprs { - types[len(n.Exprs)-1-i] = v.StackPop() - } - - for _, ty := range types { - if ty == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - } - - // If there is only one argument and it is a list, we evaluate to a list - if len(types) == 1 { - switch t := types[0]; t { - case ast.TypeList: - fallthrough - case ast.TypeMap: - v.StackPush(t) - return n, nil - } - } - - // Otherwise, all concat args must be strings, so validate that - resultType := ast.TypeString - for i, t := range types { - - if t == ast.TypeUnknown { - resultType = ast.TypeUnknown - continue - } - - if t != ast.TypeString { - cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i]) - if cn != nil { - n.Exprs[i] = cn - continue - } - - return nil, fmt.Errorf( - "output of an HIL expression must be a string, or a single list (argument %d is %s)", i+1, t) - } - } - - // This always results in type string, unless there are unknowns - v.StackPush(resultType) - - return n, nil -} - -type typeCheckLiteral struct { - n *ast.LiteralNode -} - -func (tc *typeCheckLiteral) TypeCheck(v *TypeCheck) (ast.Node, error) { - v.StackPush(tc.n.Typex) - return tc.n, nil -} - -type typeCheckVariableAccess struct { - n *ast.VariableAccess -} - -func (tc *typeCheckVariableAccess) TypeCheck(v *TypeCheck) (ast.Node, error) { - // Look up the variable in the map - variable, ok := v.Scope.LookupVar(tc.n.Name) - if !ok { - return nil, fmt.Errorf( - "unknown variable accessed: %s", tc.n.Name) - } - - // Add the type to the stack - v.StackPush(variable.Type) - - return tc.n, nil -} - -type typeCheckIndex struct { - n *ast.Index -} - -func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) { - keyType := v.StackPop() - targetType := v.StackPop() - - if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - - // Ensure we have a VariableAccess as the target - varAccessNode, ok := tc.n.Target.(*ast.VariableAccess) - if !ok { - return nil, fmt.Errorf( - "target of an index must be a VariableAccess node, was %T", tc.n.Target) - } - - // Get the variable - variable, ok := v.Scope.LookupVar(varAccessNode.Name) - if !ok { - return nil, fmt.Errorf( - "unknown variable accessed: %s", varAccessNode.Name) - } - - switch targetType { - case ast.TypeList: - if keyType != ast.TypeInt { - tc.n.Key = v.ImplicitConversion(keyType, ast.TypeInt, tc.n.Key) - if tc.n.Key == nil { - return nil, fmt.Errorf( - "key of an index must be an int, was %s", keyType) - } - } - - valType, err := ast.VariableListElementTypesAreHomogenous( - varAccessNode.Name, variable.Value.([]ast.Variable)) - if err != nil { - return tc.n, err - } - - v.StackPush(valType) - return tc.n, nil - case ast.TypeMap: - if keyType != ast.TypeString { - tc.n.Key = v.ImplicitConversion(keyType, ast.TypeString, tc.n.Key) - if tc.n.Key == nil { - return nil, fmt.Errorf( - "key of an index must be a string, was %s", keyType) - } - } - - valType, err := ast.VariableMapValueTypesAreHomogenous( - varAccessNode.Name, variable.Value.(map[string]ast.Variable)) - if err != nil { - return tc.n, err - } - - v.StackPush(valType) - return tc.n, nil - default: - return nil, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) - } -} - -func (v *TypeCheck) ImplicitConversion( - actual ast.Type, expected ast.Type, n ast.Node) ast.Node { - if v.Implicit == nil { - return nil - } - - fromMap, ok := v.Implicit[actual] - if !ok { - return nil - } - - toFunc, ok := fromMap[expected] - if !ok { - return nil - } - - return &ast.Call{ - Func: toFunc, - Args: []ast.Node{n}, - Posx: n.Pos(), - } -} - -func (v *TypeCheck) reset() { - v.Stack = nil - v.err = nil -} - -func (v *TypeCheck) StackPush(t ast.Type) { - v.Stack = append(v.Stack, t) -} - -func (v *TypeCheck) StackPop() ast.Type { - var x ast.Type - x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1] - return x -} - -func (v *TypeCheck) StackPeek() ast.Type { - if len(v.Stack) == 0 { - return ast.TypeInvalid - } - - return v.Stack[len(v.Stack)-1] -} diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go deleted file mode 100644 index f2024d01c28a..000000000000 --- a/vendor/github.com/hashicorp/hil/convert.go +++ /dev/null @@ -1,159 +0,0 @@ -package hil - -import ( - "fmt" - "reflect" - - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/mapstructure" -) - -// UnknownValue is a sentinel value that can be used to denote -// that a value of a variable (or map element, list element, etc.) -// is unknown. This will always have the type ast.TypeUnknown. -const UnknownValue = "74D93920-ED26-11E3-AC10-0800200C9A66" - -var hilMapstructureDecodeHookSlice []interface{} -var hilMapstructureDecodeHookStringSlice []string -var hilMapstructureDecodeHookMap map[string]interface{} - -// hilMapstructureWeakDecode behaves in the same way as mapstructure.WeakDecode -// but has a DecodeHook which defeats the backward compatibility mode of mapstructure -// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This -// allows us to use WeakDecode (desirable), but not fail on empty lists. -func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error { - config := &mapstructure.DecoderConfig{ - DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) { - sliceType := reflect.TypeOf(hilMapstructureDecodeHookSlice) - stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice) - mapType := reflect.TypeOf(hilMapstructureDecodeHookMap) - - if (source == sliceType || source == stringSliceType) && target == mapType { - return nil, fmt.Errorf("Cannot convert %s into a %s", source, target) - } - - return val, nil - }, - WeaklyTypedInput: true, - Result: rawVal, - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(m) -} - -func InterfaceToVariable(input interface{}) (ast.Variable, error) { - if inputVariable, ok := input.(ast.Variable); ok { - return inputVariable, nil - } - - var stringVal string - if err := hilMapstructureWeakDecode(input, &stringVal); err == nil { - // Special case the unknown value to turn into "unknown" - if stringVal == UnknownValue { - return ast.Variable{Value: UnknownValue, Type: ast.TypeUnknown}, nil - } - - // Otherwise return the string value - return ast.Variable{ - Type: ast.TypeString, - Value: stringVal, - }, nil - } - - var mapVal map[string]interface{} - if err := hilMapstructureWeakDecode(input, &mapVal); err == nil { - elements := make(map[string]ast.Variable) - for i, element := range mapVal { - varElement, err := InterfaceToVariable(element) - if err != nil { - return ast.Variable{}, err - } - elements[i] = varElement - } - - return ast.Variable{ - Type: ast.TypeMap, - Value: elements, - }, nil - } - - var sliceVal []interface{} - if err := hilMapstructureWeakDecode(input, &sliceVal); err == nil { - elements := make([]ast.Variable, len(sliceVal)) - for i, element := range sliceVal { - varElement, err := InterfaceToVariable(element) - if err != nil { - return ast.Variable{}, err - } - elements[i] = varElement - } - - return ast.Variable{ - Type: ast.TypeList, - Value: elements, - }, nil - } - - return ast.Variable{}, fmt.Errorf("value for conversion must be a string, interface{} or map[string]interface: got %T", input) -} - -func VariableToInterface(input ast.Variable) (interface{}, error) { - if input.Type == ast.TypeString { - if inputStr, ok := input.Value.(string); ok { - return inputStr, nil - } else { - return nil, fmt.Errorf("ast.Variable with type string has value which is not a string") - } - } - - if input.Type == ast.TypeList { - inputList, ok := input.Value.([]ast.Variable) - if !ok { - return nil, fmt.Errorf("ast.Variable with type list has value which is not a []ast.Variable") - } - - result := make([]interface{}, 0) - if len(inputList) == 0 { - return result, nil - } - - for _, element := range inputList { - if convertedElement, err := VariableToInterface(element); err == nil { - result = append(result, convertedElement) - } else { - return nil, err - } - } - - return result, nil - } - - if input.Type == ast.TypeMap { - inputMap, ok := input.Value.(map[string]ast.Variable) - if !ok { - return nil, fmt.Errorf("ast.Variable with type map has value which is not a map[string]ast.Variable") - } - - result := make(map[string]interface{}, 0) - if len(inputMap) == 0 { - return result, nil - } - - for key, value := range inputMap { - if convertedValue, err := VariableToInterface(value); err == nil { - result[key] = convertedValue - } else { - return nil, err - } - } - - return result, nil - } - - return nil, fmt.Errorf("unknown input type: %s", input.Type) -} diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go deleted file mode 100644 index 27820769e81a..000000000000 --- a/vendor/github.com/hashicorp/hil/eval.go +++ /dev/null @@ -1,472 +0,0 @@ -package hil - -import ( - "bytes" - "errors" - "fmt" - "sync" - - "github.com/hashicorp/hil/ast" -) - -// EvalConfig is the configuration for evaluating. -type EvalConfig struct { - // GlobalScope is the global scope of execution for evaluation. - GlobalScope *ast.BasicScope - - // SemanticChecks is a list of additional semantic checks that will be run - // on the tree prior to evaluating it. The type checker, identifier checker, - // etc. will be run before these automatically. - SemanticChecks []SemanticChecker -} - -// SemanticChecker is the type that must be implemented to do a -// semantic check on an AST tree. This will be called with the root node. -type SemanticChecker func(ast.Node) error - -// EvaluationResult is a struct returned from the hil.Eval function, -// representing the result of an interpolation. Results are returned in their -// "natural" Go structure rather than in terms of the HIL AST. For the types -// currently implemented, this means that the Value field can be interpreted as -// the following Go types: -// TypeInvalid: undefined -// TypeString: string -// TypeList: []interface{} -// TypeMap: map[string]interface{} -// TypBool: bool -type EvaluationResult struct { - Type EvalType - Value interface{} -} - -// InvalidResult is a structure representing the result of a HIL interpolation -// which has invalid syntax, missing variables, or some other type of error. -// The error is described out of band in the accompanying error return value. -var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil} - -// errExitUnknown is an internal error that when returned means the result -// is an unknown value. We use this for early exit. -var errExitUnknown = errors.New("unknown value") - -func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) { - output, outputType, err := internalEval(root, config) - if err != nil { - return InvalidResult, err - } - - // If the result contains any nested unknowns then the result as a whole - // is unknown, so that callers only have to deal with "entirely known" - // or "entirely unknown" as outcomes. - if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) { - outputType = ast.TypeUnknown - output = UnknownValue - } - - switch outputType { - case ast.TypeList: - val, err := VariableToInterface(ast.Variable{ - Type: ast.TypeList, - Value: output, - }) - return EvaluationResult{ - Type: TypeList, - Value: val, - }, err - case ast.TypeMap: - val, err := VariableToInterface(ast.Variable{ - Type: ast.TypeMap, - Value: output, - }) - return EvaluationResult{ - Type: TypeMap, - Value: val, - }, err - case ast.TypeString: - return EvaluationResult{ - Type: TypeString, - Value: output, - }, nil - case ast.TypeBool: - return EvaluationResult{ - Type: TypeBool, - Value: output, - }, nil - case ast.TypeUnknown: - return EvaluationResult{ - Type: TypeUnknown, - Value: UnknownValue, - }, nil - default: - return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType) - } -} - -// Eval evaluates the given AST tree and returns its output value, the type -// of the output, and any error that occurred. -func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, error) { - // Copy the scope so we can add our builtins - if config == nil { - config = new(EvalConfig) - } - scope := registerBuiltins(config.GlobalScope) - implicitMap := map[ast.Type]map[ast.Type]string{ - ast.TypeFloat: { - ast.TypeInt: "__builtin_FloatToInt", - ast.TypeString: "__builtin_FloatToString", - }, - ast.TypeInt: { - ast.TypeFloat: "__builtin_IntToFloat", - ast.TypeString: "__builtin_IntToString", - }, - ast.TypeString: { - ast.TypeInt: "__builtin_StringToInt", - ast.TypeFloat: "__builtin_StringToFloat", - ast.TypeBool: "__builtin_StringToBool", - }, - ast.TypeBool: { - ast.TypeString: "__builtin_BoolToString", - }, - } - - // Build our own semantic checks that we always run - tv := &TypeCheck{Scope: scope, Implicit: implicitMap} - ic := &IdentifierCheck{Scope: scope} - - // Build up the semantic checks for execution - checks := make( - []SemanticChecker, - len(config.SemanticChecks), - len(config.SemanticChecks)+2) - copy(checks, config.SemanticChecks) - checks = append(checks, ic.Visit) - checks = append(checks, tv.Visit) - - // Run the semantic checks - for _, check := range checks { - if err := check(root); err != nil { - return nil, ast.TypeInvalid, err - } - } - - // Execute - v := &evalVisitor{Scope: scope} - return v.Visit(root) -} - -// EvalNode is the interface that must be implemented by any ast.Node -// to support evaluation. This will be called in visitor pattern order. -// The result of each call to Eval is automatically pushed onto the -// stack as a LiteralNode. Pop elements off the stack to get child -// values. -type EvalNode interface { - Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) -} - -type evalVisitor struct { - Scope ast.Scope - Stack ast.Stack - - err error - lock sync.Mutex -} - -func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) { - // Run the actual visitor pattern - root.Accept(v.visit) - - // Get our result and clear out everything else - var result *ast.LiteralNode - if v.Stack.Len() > 0 { - result = v.Stack.Pop().(*ast.LiteralNode) - } else { - result = new(ast.LiteralNode) - } - resultErr := v.err - if resultErr == errExitUnknown { - // This means the return value is unknown and we used the error - // as an early exit mechanism. Reset since the value on the stack - // should be the unknown value. - resultErr = nil - } - - // Clear everything else so we aren't just dangling - v.Stack.Reset() - v.err = nil - - t, err := result.Type(v.Scope) - if err != nil { - return nil, ast.TypeInvalid, err - } - - return result.Value, t, resultErr -} - -func (v *evalVisitor) visit(raw ast.Node) ast.Node { - if v.err != nil { - return raw - } - - en, err := evalNode(raw) - if err != nil { - v.err = err - return raw - } - - out, outType, err := en.Eval(v.Scope, &v.Stack) - if err != nil { - v.err = err - return raw - } - - v.Stack.Push(&ast.LiteralNode{ - Value: out, - Typex: outType, - }) - - if outType == ast.TypeUnknown { - // Halt immediately - v.err = errExitUnknown - return raw - } - - return raw -} - -// evalNode is a private function that returns an EvalNode for built-in -// types as well as any other EvalNode implementations. -func evalNode(raw ast.Node) (EvalNode, error) { - switch n := raw.(type) { - case *ast.Index: - return &evalIndex{n}, nil - case *ast.Call: - return &evalCall{n}, nil - case *ast.Conditional: - return &evalConditional{n}, nil - case *ast.Output: - return &evalOutput{n}, nil - case *ast.LiteralNode: - return &evalLiteralNode{n}, nil - case *ast.VariableAccess: - return &evalVariableAccess{n}, nil - default: - en, ok := n.(EvalNode) - if !ok { - return nil, fmt.Errorf("node doesn't support evaluation: %#v", raw) - } - - return en, nil - } -} - -type evalCall struct{ *ast.Call } - -func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - // Look up the function in the map - function, ok := s.LookupFunc(v.Func) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "unknown function called: %s", v.Func) - } - - // The arguments are on the stack in reverse order, so pop them off. - args := make([]interface{}, len(v.Args)) - for i, _ := range v.Args { - node := stack.Pop().(*ast.LiteralNode) - if node.IsUnknown() { - // If any arguments are unknown then the result is automatically unknown - return UnknownValue, ast.TypeUnknown, nil - } - args[len(v.Args)-1-i] = node.Value - } - - // Call the function - result, err := function.Callback(args) - if err != nil { - return nil, ast.TypeInvalid, fmt.Errorf("%s: %s", v.Func, err) - } - - return result, function.ReturnType, nil -} - -type evalConditional struct{ *ast.Conditional } - -func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - // On the stack we have literal nodes representing the resulting values - // of the condition, true and false expressions, but they are in reverse - // order. - falseLit := stack.Pop().(*ast.LiteralNode) - trueLit := stack.Pop().(*ast.LiteralNode) - condLit := stack.Pop().(*ast.LiteralNode) - - if condLit.IsUnknown() { - // If our conditional is unknown then our result is also unknown - return UnknownValue, ast.TypeUnknown, nil - } - - if condLit.Value.(bool) { - return trueLit.Value, trueLit.Typex, nil - } else { - return falseLit.Value, trueLit.Typex, nil - } -} - -type evalIndex struct{ *ast.Index } - -func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - key := stack.Pop().(*ast.LiteralNode) - target := stack.Pop().(*ast.LiteralNode) - - variableName := v.Index.Target.(*ast.VariableAccess).Name - - if key.IsUnknown() { - // If our key is unknown then our result is also unknown - return UnknownValue, ast.TypeUnknown, nil - } - - // For target, we'll accept collections containing unknown values but - // we still need to catch when the collection itself is unknown, shallowly. - if target.Typex == ast.TypeUnknown { - return UnknownValue, ast.TypeUnknown, nil - } - - switch target.Typex { - case ast.TypeList: - return v.evalListIndex(variableName, target.Value, key.Value) - case ast.TypeMap: - return v.evalMapIndex(variableName, target.Value, key.Value) - default: - return nil, ast.TypeInvalid, fmt.Errorf( - "target %q for indexing must be ast.TypeList or ast.TypeMap, is %s", - variableName, target.Typex) - } -} - -func (v *evalIndex) evalListIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) { - // We assume type checking was already done and we can assume that target - // is a list and key is an int - list, ok := target.([]ast.Variable) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast target to []Variable, is: %T", target) - } - - keyInt, ok := key.(int) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast key to int, is: %T", key) - } - - if len(list) == 0 { - return nil, ast.TypeInvalid, fmt.Errorf("list is empty") - } - - if keyInt < 0 || len(list) < keyInt+1 { - return nil, ast.TypeInvalid, fmt.Errorf( - "index %d out of range for list %s (max %d)", - keyInt, variableName, len(list)) - } - - returnVal := list[keyInt].Value - returnType := list[keyInt].Type - return returnVal, returnType, nil -} - -func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) { - // We assume type checking was already done and we can assume that target - // is a map and key is a string - vmap, ok := target.(map[string]ast.Variable) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast target to map[string]Variable, is: %T", target) - } - - keyString, ok := key.(string) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "cannot cast key to string, is: %T", key) - } - - if len(vmap) == 0 { - return nil, ast.TypeInvalid, fmt.Errorf("map is empty") - } - - value, ok := vmap[keyString] - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "key %q does not exist in map %s", keyString, variableName) - } - - return value.Value, value.Type, nil -} - -type evalOutput struct{ *ast.Output } - -func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - // The expressions should all be on the stack in reverse - // order. So pop them off, reverse their order, and concatenate. - nodes := make([]*ast.LiteralNode, 0, len(v.Exprs)) - haveUnknown := false - for range v.Exprs { - n := stack.Pop().(*ast.LiteralNode) - nodes = append(nodes, n) - - // If we have any unknowns then the whole result is unknown - // (we must deal with this first, because the type checker can - // skip type conversions in the presence of unknowns, and thus - // any of our other nodes may be incorrectly typed.) - if n.IsUnknown() { - haveUnknown = true - } - } - - if haveUnknown { - return UnknownValue, ast.TypeUnknown, nil - } - - // Special case the single list and map - if len(nodes) == 1 { - switch t := nodes[0].Typex; t { - case ast.TypeList: - fallthrough - case ast.TypeMap: - fallthrough - case ast.TypeUnknown: - return nodes[0].Value, t, nil - } - } - - // Otherwise concatenate the strings - var buf bytes.Buffer - for i := len(nodes) - 1; i >= 0; i-- { - if nodes[i].Typex != ast.TypeString { - return nil, ast.TypeInvalid, fmt.Errorf( - "invalid output with %s value at index %d: %#v", - nodes[i].Typex, - i, - nodes[i].Value, - ) - } - buf.WriteString(nodes[i].Value.(string)) - } - - return buf.String(), ast.TypeString, nil -} - -type evalLiteralNode struct{ *ast.LiteralNode } - -func (v *evalLiteralNode) Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) { - return v.Value, v.Typex, nil -} - -type evalVariableAccess struct{ *ast.VariableAccess } - -func (v *evalVariableAccess) Eval(scope ast.Scope, _ *ast.Stack) (interface{}, ast.Type, error) { - // Look up the variable in the map - variable, ok := scope.LookupVar(v.Name) - if !ok { - return nil, ast.TypeInvalid, fmt.Errorf( - "unknown variable accessed: %s", v.Name) - } - - return variable.Value, variable.Type, nil -} diff --git a/vendor/github.com/hashicorp/hil/eval_type.go b/vendor/github.com/hashicorp/hil/eval_type.go deleted file mode 100644 index 6946ecd23f09..000000000000 --- a/vendor/github.com/hashicorp/hil/eval_type.go +++ /dev/null @@ -1,16 +0,0 @@ -package hil - -//go:generate stringer -type=EvalType eval_type.go - -// EvalType represents the type of the output returned from a HIL -// evaluation. -type EvalType uint32 - -const ( - TypeInvalid EvalType = 0 - TypeString EvalType = 1 << iota - TypeBool - TypeList - TypeMap - TypeUnknown -) diff --git a/vendor/github.com/hashicorp/hil/evaltype_string.go b/vendor/github.com/hashicorp/hil/evaltype_string.go deleted file mode 100644 index b107ddd451dd..000000000000 --- a/vendor/github.com/hashicorp/hil/evaltype_string.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by "stringer -type=EvalType eval_type.go"; DO NOT EDIT - -package hil - -import "fmt" - -const ( - _EvalType_name_0 = "TypeInvalid" - _EvalType_name_1 = "TypeString" - _EvalType_name_2 = "TypeBool" - _EvalType_name_3 = "TypeList" - _EvalType_name_4 = "TypeMap" - _EvalType_name_5 = "TypeUnknown" -) - -var ( - _EvalType_index_0 = [...]uint8{0, 11} - _EvalType_index_1 = [...]uint8{0, 10} - _EvalType_index_2 = [...]uint8{0, 8} - _EvalType_index_3 = [...]uint8{0, 8} - _EvalType_index_4 = [...]uint8{0, 7} - _EvalType_index_5 = [...]uint8{0, 11} -) - -func (i EvalType) String() string { - switch { - case i == 0: - return _EvalType_name_0 - case i == 2: - return _EvalType_name_1 - case i == 4: - return _EvalType_name_2 - case i == 8: - return _EvalType_name_3 - case i == 16: - return _EvalType_name_4 - case i == 32: - return _EvalType_name_5 - default: - return fmt.Sprintf("EvalType(%d)", i) - } -} diff --git a/vendor/github.com/hashicorp/hil/go.mod b/vendor/github.com/hashicorp/hil/go.mod deleted file mode 100644 index 45719a69b7d4..000000000000 --- a/vendor/github.com/hashicorp/hil/go.mod +++ /dev/null @@ -1,6 +0,0 @@ -module github.com/hashicorp/hil - -require ( - github.com/mitchellh/mapstructure v1.1.2 - github.com/mitchellh/reflectwalk v1.0.0 -) diff --git a/vendor/github.com/hashicorp/hil/go.sum b/vendor/github.com/hashicorp/hil/go.sum deleted file mode 100644 index 83639b6919db..000000000000 --- a/vendor/github.com/hashicorp/hil/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= diff --git a/vendor/github.com/hashicorp/hil/parse.go b/vendor/github.com/hashicorp/hil/parse.go deleted file mode 100644 index ecbe1fdbfa42..000000000000 --- a/vendor/github.com/hashicorp/hil/parse.go +++ /dev/null @@ -1,29 +0,0 @@ -package hil - -import ( - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/parser" - "github.com/hashicorp/hil/scanner" -) - -// Parse parses the given program and returns an executable AST tree. -// -// Syntax errors are returned with error having the dynamic type -// *parser.ParseError, which gives the caller access to the source position -// where the error was found, which allows (for example) combining it with -// a known source filename to add context to the error message. -func Parse(v string) (ast.Node, error) { - return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1}) -} - -// ParseWithPosition is like Parse except that it overrides the source -// row and column position of the first character in the string, which should -// be 1-based. -// -// This can be used when HIL is embedded in another language and the outer -// parser knows the row and column where the HIL expression started within -// the overall source file. -func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) { - ch := scanner.Scan(v, pos) - return parser.Parse(ch) -} diff --git a/vendor/github.com/hashicorp/hil/parser/binary_op.go b/vendor/github.com/hashicorp/hil/parser/binary_op.go deleted file mode 100644 index 2e013e01d6ca..000000000000 --- a/vendor/github.com/hashicorp/hil/parser/binary_op.go +++ /dev/null @@ -1,45 +0,0 @@ -package parser - -import ( - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -var binaryOps []map[scanner.TokenType]ast.ArithmeticOp - -func init() { - // This operation table maps from the operator's scanner token type - // to the AST arithmetic operation. All expressions produced from - // binary operators are *ast.Arithmetic nodes. - // - // Binary operator groups are listed in order of precedence, with - // the *lowest* precedence first. Operators within the same group - // have left-to-right associativity. - binaryOps = []map[scanner.TokenType]ast.ArithmeticOp{ - { - scanner.OR: ast.ArithmeticOpLogicalOr, - }, - { - scanner.AND: ast.ArithmeticOpLogicalAnd, - }, - { - scanner.EQUAL: ast.ArithmeticOpEqual, - scanner.NOTEQUAL: ast.ArithmeticOpNotEqual, - }, - { - scanner.GT: ast.ArithmeticOpGreaterThan, - scanner.GTE: ast.ArithmeticOpGreaterThanOrEqual, - scanner.LT: ast.ArithmeticOpLessThan, - scanner.LTE: ast.ArithmeticOpLessThanOrEqual, - }, - { - scanner.PLUS: ast.ArithmeticOpAdd, - scanner.MINUS: ast.ArithmeticOpSub, - }, - { - scanner.STAR: ast.ArithmeticOpMul, - scanner.SLASH: ast.ArithmeticOpDiv, - scanner.PERCENT: ast.ArithmeticOpMod, - }, - } -} diff --git a/vendor/github.com/hashicorp/hil/parser/error.go b/vendor/github.com/hashicorp/hil/parser/error.go deleted file mode 100644 index bacd696457d2..000000000000 --- a/vendor/github.com/hashicorp/hil/parser/error.go +++ /dev/null @@ -1,38 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -type ParseError struct { - Message string - Pos ast.Pos -} - -func Errorf(pos ast.Pos, format string, args ...interface{}) error { - return &ParseError{ - Message: fmt.Sprintf(format, args...), - Pos: pos, - } -} - -// TokenErrorf is a convenient wrapper around Errorf that uses the -// position of the given token. -func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error { - return Errorf(token.Pos, format, args...) -} - -func ExpectationError(wanted string, got *scanner.Token) error { - return TokenErrorf(got, "expected %s but found %s", wanted, got) -} - -func (e *ParseError) Error() string { - return fmt.Sprintf("parse error at %s: %s", e.Pos, e.Message) -} - -func (e *ParseError) String() string { - return e.Error() -} diff --git a/vendor/github.com/hashicorp/hil/parser/fuzz.go b/vendor/github.com/hashicorp/hil/parser/fuzz.go deleted file mode 100644 index de954f383600..000000000000 --- a/vendor/github.com/hashicorp/hil/parser/fuzz.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build gofuzz - -package parser - -import ( - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -// This is a fuzz testing function designed to be used with go-fuzz: -// https://github.com/dvyukov/go-fuzz -// -// It's not included in a normal build due to the gofuzz build tag above. -// -// There are some input files that you can use as a seed corpus for go-fuzz -// in the directory ./fuzz-corpus . - -func Fuzz(data []byte) int { - str := string(data) - - ch := scanner.Scan(str, ast.Pos{Line: 1, Column: 1}) - _, err := Parse(ch) - if err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/hashicorp/hil/parser/parser.go b/vendor/github.com/hashicorp/hil/parser/parser.go deleted file mode 100644 index 376f1c49da3b..000000000000 --- a/vendor/github.com/hashicorp/hil/parser/parser.go +++ /dev/null @@ -1,522 +0,0 @@ -package parser - -import ( - "strconv" - "unicode/utf8" - - "github.com/hashicorp/hil/ast" - "github.com/hashicorp/hil/scanner" -) - -func Parse(ch <-chan *scanner.Token) (ast.Node, error) { - peeker := scanner.NewPeeker(ch) - parser := &parser{peeker} - output, err := parser.ParseTopLevel() - peeker.Close() - return output, err -} - -type parser struct { - peeker *scanner.Peeker -} - -func (p *parser) ParseTopLevel() (ast.Node, error) { - return p.parseInterpolationSeq(false) -} - -func (p *parser) ParseQuoted() (ast.Node, error) { - return p.parseInterpolationSeq(true) -} - -// parseInterpolationSeq parses either the top-level sequence of literals -// and interpolation expressions or a similar sequence within a quoted -// string inside an interpolation expression. The latter case is requested -// by setting 'quoted' to true. -func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) { - literalType := scanner.LITERAL - endType := scanner.EOF - if quoted { - // exceptions for quoted sequences - literalType = scanner.STRING - endType = scanner.CQUOTE - } - - startPos := p.peeker.Peek().Pos - - if quoted { - tok := p.peeker.Read() - if tok.Type != scanner.OQUOTE { - return nil, ExpectationError("open quote", tok) - } - } - - var exprs []ast.Node - for { - tok := p.peeker.Read() - - if tok.Type == endType { - break - } - - switch tok.Type { - case literalType: - val, err := p.parseStringToken(tok) - if err != nil { - return nil, err - } - exprs = append(exprs, &ast.LiteralNode{ - Value: val, - Typex: ast.TypeString, - Posx: tok.Pos, - }) - case scanner.BEGIN: - expr, err := p.ParseInterpolation() - if err != nil { - return nil, err - } - exprs = append(exprs, expr) - default: - return nil, ExpectationError(`"${"`, tok) - } - } - - if len(exprs) == 0 { - // If we have no parts at all then the input must've - // been an empty string. - exprs = append(exprs, &ast.LiteralNode{ - Value: "", - Typex: ast.TypeString, - Posx: startPos, - }) - } - - // As a special case, if our "Output" contains only one expression - // and it's a literal string then we'll hoist it up to be our - // direct return value, so callers can easily recognize a string - // that has no interpolations at all. - if len(exprs) == 1 { - if lit, ok := exprs[0].(*ast.LiteralNode); ok { - if lit.Typex == ast.TypeString { - return lit, nil - } - } - } - - return &ast.Output{ - Exprs: exprs, - Posx: startPos, - }, nil -} - -// parseStringToken takes a token of either LITERAL or STRING type and -// returns the interpreted string, after processing any relevant -// escape sequences. -func (p *parser) parseStringToken(tok *scanner.Token) (string, error) { - var backslashes bool - switch tok.Type { - case scanner.LITERAL: - backslashes = false - case scanner.STRING: - backslashes = true - default: - panic("unsupported string token type") - } - - raw := []byte(tok.Content) - buf := make([]byte, 0, len(raw)) - - for i := 0; i < len(raw); i++ { - b := raw[i] - more := len(raw) > (i + 1) - - if b == '$' { - if more && raw[i+1] == '$' { - // skip over the second dollar sign - i++ - } - } else if backslashes && b == '\\' { - if !more { - return "", Errorf( - ast.Pos{ - Column: tok.Pos.Column + utf8.RuneCount(raw[:i]), - Line: tok.Pos.Line, - }, - `unfinished backslash escape sequence`, - ) - } - escapeType := raw[i+1] - switch escapeType { - case '\\': - // skip over the second slash - i++ - case 'n': - b = '\n' - i++ - case '"': - b = '"' - i++ - default: - return "", Errorf( - ast.Pos{ - Column: tok.Pos.Column + utf8.RuneCount(raw[:i]), - Line: tok.Pos.Line, - }, - `invalid backslash escape sequence`, - ) - } - } - - buf = append(buf, b) - } - - return string(buf), nil -} - -func (p *parser) ParseInterpolation() (ast.Node, error) { - // By the time we're called, we're already "inside" the ${ sequence - // because the caller consumed the ${ token. - - expr, err := p.ParseExpression() - if err != nil { - return nil, err - } - - err = p.requireTokenType(scanner.END, `"}"`) - if err != nil { - return nil, err - } - - return expr, nil -} - -func (p *parser) ParseExpression() (ast.Node, error) { - return p.parseTernaryCond() -} - -func (p *parser) parseTernaryCond() (ast.Node, error) { - // The ternary condition operator (.. ? .. : ..) behaves somewhat - // like a binary operator except that the "operator" is itself - // an expression enclosed in two punctuation characters. - // The middle expression is parsed as if the ? and : symbols - // were parentheses. The "rhs" (the "false expression") is then - // treated right-associatively so it behaves similarly to the - // middle in terms of precedence. - - startPos := p.peeker.Peek().Pos - - var cond, trueExpr, falseExpr ast.Node - var err error - - cond, err = p.parseBinaryOps(binaryOps) - if err != nil { - return nil, err - } - - next := p.peeker.Peek() - if next.Type != scanner.QUESTION { - return cond, nil - } - - p.peeker.Read() // eat question mark - - trueExpr, err = p.ParseExpression() - if err != nil { - return nil, err - } - - colon := p.peeker.Read() - if colon.Type != scanner.COLON { - return nil, ExpectationError(":", colon) - } - - falseExpr, err = p.ParseExpression() - if err != nil { - return nil, err - } - - return &ast.Conditional{ - CondExpr: cond, - TrueExpr: trueExpr, - FalseExpr: falseExpr, - Posx: startPos, - }, nil -} - -// parseBinaryOps calls itself recursively to work through all of the -// operator precedence groups, and then eventually calls ParseExpressionTerm -// for each operand. -func (p *parser) parseBinaryOps(ops []map[scanner.TokenType]ast.ArithmeticOp) (ast.Node, error) { - if len(ops) == 0 { - // We've run out of operators, so now we'll just try to parse a term. - return p.ParseExpressionTerm() - } - - thisLevel := ops[0] - remaining := ops[1:] - - startPos := p.peeker.Peek().Pos - - var lhs, rhs ast.Node - operator := ast.ArithmeticOpInvalid - var err error - - // parse a term that might be the first operand of a binary - // expression or it might just be a standalone term, but - // we won't know until we've parsed it and can look ahead - // to see if there's an operator token. - lhs, err = p.parseBinaryOps(remaining) - if err != nil { - return nil, err - } - - // We'll keep eating up arithmetic operators until we run - // out, so that operators with the same precedence will combine in a - // left-associative manner: - // a+b+c => (a+b)+c, not a+(b+c) - // - // Should we later want to have right-associative operators, a way - // to achieve that would be to call back up to ParseExpression here - // instead of iteratively parsing only the remaining operators. - for { - next := p.peeker.Peek() - var newOperator ast.ArithmeticOp - var ok bool - if newOperator, ok = thisLevel[next.Type]; !ok { - break - } - - // Are we extending an expression started on - // the previous iteration? - if operator != ast.ArithmeticOpInvalid { - lhs = &ast.Arithmetic{ - Op: operator, - Exprs: []ast.Node{lhs, rhs}, - Posx: startPos, - } - } - - operator = newOperator - p.peeker.Read() // eat operator token - rhs, err = p.parseBinaryOps(remaining) - if err != nil { - return nil, err - } - } - - if operator != ast.ArithmeticOpInvalid { - return &ast.Arithmetic{ - Op: operator, - Exprs: []ast.Node{lhs, rhs}, - Posx: startPos, - }, nil - } else { - return lhs, nil - } -} - -func (p *parser) ParseExpressionTerm() (ast.Node, error) { - - next := p.peeker.Peek() - - switch next.Type { - - case scanner.OPAREN: - p.peeker.Read() - expr, err := p.ParseExpression() - if err != nil { - return nil, err - } - err = p.requireTokenType(scanner.CPAREN, `")"`) - return expr, err - - case scanner.OQUOTE: - return p.ParseQuoted() - - case scanner.INTEGER: - tok := p.peeker.Read() - val, err := strconv.Atoi(tok.Content) - if err != nil { - return nil, TokenErrorf(tok, "invalid integer: %s", err) - } - return &ast.LiteralNode{ - Value: val, - Typex: ast.TypeInt, - Posx: tok.Pos, - }, nil - - case scanner.FLOAT: - tok := p.peeker.Read() - val, err := strconv.ParseFloat(tok.Content, 64) - if err != nil { - return nil, TokenErrorf(tok, "invalid float: %s", err) - } - return &ast.LiteralNode{ - Value: val, - Typex: ast.TypeFloat, - Posx: tok.Pos, - }, nil - - case scanner.BOOL: - tok := p.peeker.Read() - // the scanner guarantees that tok.Content is either "true" or "false" - var val bool - if tok.Content[0] == 't' { - val = true - } else { - val = false - } - return &ast.LiteralNode{ - Value: val, - Typex: ast.TypeBool, - Posx: tok.Pos, - }, nil - - case scanner.MINUS: - opTok := p.peeker.Read() - // important to use ParseExpressionTerm rather than ParseExpression - // here, otherwise we can capture a following binary expression into - // our negation. - // e.g. -46+5 should parse as (0-46)+5, not 0-(46+5) - operand, err := p.ParseExpressionTerm() - if err != nil { - return nil, err - } - // The AST currently represents negative numbers as - // a binary subtraction of the number from zero. - return &ast.Arithmetic{ - Op: ast.ArithmeticOpSub, - Exprs: []ast.Node{ - &ast.LiteralNode{ - Value: 0, - Typex: ast.TypeInt, - Posx: opTok.Pos, - }, - operand, - }, - Posx: opTok.Pos, - }, nil - - case scanner.BANG: - opTok := p.peeker.Read() - // important to use ParseExpressionTerm rather than ParseExpression - // here, otherwise we can capture a following binary expression into - // our negation. - operand, err := p.ParseExpressionTerm() - if err != nil { - return nil, err - } - // The AST currently represents binary negation as an equality - // test with "false". - return &ast.Arithmetic{ - Op: ast.ArithmeticOpEqual, - Exprs: []ast.Node{ - &ast.LiteralNode{ - Value: false, - Typex: ast.TypeBool, - Posx: opTok.Pos, - }, - operand, - }, - Posx: opTok.Pos, - }, nil - - case scanner.IDENTIFIER: - return p.ParseScopeInteraction() - - default: - return nil, ExpectationError("expression", next) - } -} - -// ParseScopeInteraction parses the expression types that interact -// with the evaluation scope: variable access, function calls, and -// indexing. -// -// Indexing should actually be a distinct operator in its own right, -// so that e.g. it can be applied to the result of a function call, -// but for now we're preserving the behavior of the older yacc-based -// parser. -func (p *parser) ParseScopeInteraction() (ast.Node, error) { - first := p.peeker.Read() - startPos := first.Pos - if first.Type != scanner.IDENTIFIER { - return nil, ExpectationError("identifier", first) - } - - next := p.peeker.Peek() - if next.Type == scanner.OPAREN { - // function call - funcName := first.Content - p.peeker.Read() // eat paren - var args []ast.Node - - for { - if p.peeker.Peek().Type == scanner.CPAREN { - break - } - - arg, err := p.ParseExpression() - if err != nil { - return nil, err - } - - args = append(args, arg) - - if p.peeker.Peek().Type == scanner.COMMA { - p.peeker.Read() // eat comma - continue - } else { - break - } - } - - err := p.requireTokenType(scanner.CPAREN, `")"`) - if err != nil { - return nil, err - } - - return &ast.Call{ - Func: funcName, - Args: args, - Posx: startPos, - }, nil - } - - varNode := &ast.VariableAccess{ - Name: first.Content, - Posx: startPos, - } - - if p.peeker.Peek().Type == scanner.OBRACKET { - // index operator - startPos := p.peeker.Read().Pos // eat bracket - indexExpr, err := p.ParseExpression() - if err != nil { - return nil, err - } - err = p.requireTokenType(scanner.CBRACKET, `"]"`) - if err != nil { - return nil, err - } - return &ast.Index{ - Target: varNode, - Key: indexExpr, - Posx: startPos, - }, nil - } - - return varNode, nil -} - -// requireTokenType consumes the next token an returns an error if its -// type does not match the given type. nil is returned if the type matches. -// -// This is a helper around peeker.Read() for situations where the parser just -// wants to assert that a particular token type must be present. -func (p *parser) requireTokenType(wantType scanner.TokenType, wantName string) error { - token := p.peeker.Read() - if token.Type != wantType { - return ExpectationError(wantName, token) - } - return nil -} diff --git a/vendor/github.com/hashicorp/hil/scanner/peeker.go b/vendor/github.com/hashicorp/hil/scanner/peeker.go deleted file mode 100644 index 4de372831fc8..000000000000 --- a/vendor/github.com/hashicorp/hil/scanner/peeker.go +++ /dev/null @@ -1,55 +0,0 @@ -package scanner - -// Peeker is a utility that wraps a token channel returned by Scan and -// provides an interface that allows a caller (e.g. the parser) to -// work with the token stream in a mode that allows one token of lookahead, -// and provides utilities for more convenient processing of the stream. -type Peeker struct { - ch <-chan *Token - peeked *Token -} - -func NewPeeker(ch <-chan *Token) *Peeker { - return &Peeker{ - ch: ch, - } -} - -// Peek returns the next token in the stream without consuming it. A -// subsequent call to Read will return the same token. -func (p *Peeker) Peek() *Token { - if p.peeked == nil { - p.peeked = <-p.ch - } - return p.peeked -} - -// Read consumes the next token in the stream and returns it. -func (p *Peeker) Read() *Token { - token := p.Peek() - - // As a special case, we will produce the EOF token forever once - // it is reached. - if token.Type != EOF { - p.peeked = nil - } - - return token -} - -// Close ensures that the token stream has been exhausted, to prevent -// the goroutine in the underlying scanner from leaking. -// -// It's not necessary to call this if the caller reads the token stream -// to EOF, since that implicitly closes the scanner. -func (p *Peeker) Close() { - for _ = range p.ch { - // discard - } - // Install a synthetic EOF token in 'peeked' in case someone - // erroneously calls Peek() or Read() after we've closed. - p.peeked = &Token{ - Type: EOF, - Content: "", - } -} diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go deleted file mode 100644 index 86085de018fa..000000000000 --- a/vendor/github.com/hashicorp/hil/scanner/scanner.go +++ /dev/null @@ -1,556 +0,0 @@ -package scanner - -import ( - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hil/ast" -) - -// Scan returns a channel that recieves Tokens from the given input string. -// -// The scanner's job is just to partition the string into meaningful parts. -// It doesn't do any transformation of the raw input string, so the caller -// must deal with any further interpretation required, such as parsing INTEGER -// tokens into real ints, or dealing with escape sequences in LITERAL or -// STRING tokens. -// -// Strings in the returned tokens are slices from the original string. -// -// startPos should be set to ast.InitPos unless the caller knows that -// this interpolation string is part of a larger file and knows the position -// of the first character in that larger file. -func Scan(s string, startPos ast.Pos) <-chan *Token { - ch := make(chan *Token) - go scan(s, ch, startPos) - return ch -} - -func scan(s string, ch chan<- *Token, pos ast.Pos) { - // 'remain' starts off as the whole string but we gradually - // slice of the front of it as we work our way through. - remain := s - - // nesting keeps track of how many ${ .. } sequences we are - // inside, so we can recognize the minor differences in syntax - // between outer string literals (LITERAL tokens) and quoted - // string literals (STRING tokens). - nesting := 0 - - // We're going to flip back and forth between parsing literals/strings - // and parsing interpolation sequences ${ .. } until we reach EOF or - // some INVALID token. -All: - for { - startPos := pos - // Literal string processing first, since the beginning of - // a string is always outside of an interpolation sequence. - literalVal, terminator := scanLiteral(remain, pos, nesting > 0) - - if len(literalVal) > 0 { - litType := LITERAL - if nesting > 0 { - litType = STRING - } - ch <- &Token{ - Type: litType, - Content: literalVal, - Pos: startPos, - } - remain = remain[len(literalVal):] - } - - ch <- terminator - remain = remain[len(terminator.Content):] - pos = terminator.Pos - // Safe to use len() here because none of the terminator tokens - // can contain UTF-8 sequences. - pos.Column = pos.Column + len(terminator.Content) - - switch terminator.Type { - case INVALID: - // Synthetic EOF after invalid token, since further scanning - // is likely to just produce more garbage. - ch <- &Token{ - Type: EOF, - Content: "", - Pos: pos, - } - break All - case EOF: - // All done! - break All - case BEGIN: - nesting++ - case CQUOTE: - // nothing special to do - default: - // Should never happen - panic("invalid string/literal terminator") - } - - // Now we do the processing of the insides of ${ .. } sequences. - // This loop terminates when we encounter either a closing } or - // an opening ", which will cause us to return to literal processing. - Interpolation: - for { - - token, size, newPos := scanInterpolationToken(remain, pos) - ch <- token - remain = remain[size:] - pos = newPos - - switch token.Type { - case INVALID: - // Synthetic EOF after invalid token, since further scanning - // is likely to just produce more garbage. - ch <- &Token{ - Type: EOF, - Content: "", - Pos: pos, - } - break All - case EOF: - // All done - // (though a syntax error that we'll catch in the parser) - break All - case END: - nesting-- - if nesting < 0 { - // Can happen if there are unbalanced ${ and } sequences - // in the input, which we'll catch in the parser. - nesting = 0 - } - break Interpolation - case OQUOTE: - // Beginning of nested quoted string - break Interpolation - } - } - } - - close(ch) -} - -// Returns the token found at the start of the given string, followed by -// the number of bytes that were consumed from the string and the adjusted -// source position. -// -// Note that the number of bytes consumed can be more than the length of -// the returned token contents if the string begins with whitespace, since -// it will be silently consumed before reading the token. -func scanInterpolationToken(s string, startPos ast.Pos) (*Token, int, ast.Pos) { - pos := startPos - size := 0 - - // Consume whitespace, if any - for len(s) > 0 && byteIsSpace(s[0]) { - if s[0] == '\n' { - pos.Column = 1 - pos.Line++ - } else { - pos.Column++ - } - size++ - s = s[1:] - } - - // Unexpected EOF during sequence - if len(s) == 0 { - return &Token{ - Type: EOF, - Content: "", - Pos: pos, - }, size, pos - } - - next := s[0] - var token *Token - - switch next { - case '(', ')', '[', ']', ',', '.', '+', '-', '*', '/', '%', '?', ':': - // Easy punctuation symbols that don't have any special meaning - // during scanning, and that stand for themselves in the - // TokenType enumeration. - token = &Token{ - Type: TokenType(next), - Content: s[:1], - Pos: pos, - } - case '}': - token = &Token{ - Type: END, - Content: s[:1], - Pos: pos, - } - case '"': - token = &Token{ - Type: OQUOTE, - Content: s[:1], - Pos: pos, - } - case '!': - if len(s) >= 2 && s[:2] == "!=" { - token = &Token{ - Type: NOTEQUAL, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: BANG, - Content: s[:1], - Pos: pos, - } - } - case '<': - if len(s) >= 2 && s[:2] == "<=" { - token = &Token{ - Type: LTE, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: LT, - Content: s[:1], - Pos: pos, - } - } - case '>': - if len(s) >= 2 && s[:2] == ">=" { - token = &Token{ - Type: GTE, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: GT, - Content: s[:1], - Pos: pos, - } - } - case '=': - if len(s) >= 2 && s[:2] == "==" { - token = &Token{ - Type: EQUAL, - Content: s[:2], - Pos: pos, - } - } else { - // A single equals is not a valid operator - token = &Token{ - Type: INVALID, - Content: s[:1], - Pos: pos, - } - } - case '&': - if len(s) >= 2 && s[:2] == "&&" { - token = &Token{ - Type: AND, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: INVALID, - Content: s[:1], - Pos: pos, - } - } - case '|': - if len(s) >= 2 && s[:2] == "||" { - token = &Token{ - Type: OR, - Content: s[:2], - Pos: pos, - } - } else { - token = &Token{ - Type: INVALID, - Content: s[:1], - Pos: pos, - } - } - default: - if next >= '0' && next <= '9' { - num, numType := scanNumber(s) - token = &Token{ - Type: numType, - Content: num, - Pos: pos, - } - } else if stringStartsWithIdentifier(s) { - ident, runeLen := scanIdentifier(s) - tokenType := IDENTIFIER - if ident == "true" || ident == "false" { - tokenType = BOOL - } - token = &Token{ - Type: tokenType, - Content: ident, - Pos: pos, - } - // Skip usual token handling because it doesn't - // know how to deal with UTF-8 sequences. - pos.Column = pos.Column + runeLen - return token, size + len(ident), pos - } else { - _, byteLen := utf8.DecodeRuneInString(s) - token = &Token{ - Type: INVALID, - Content: s[:byteLen], - Pos: pos, - } - // Skip usual token handling because it doesn't - // know how to deal with UTF-8 sequences. - pos.Column = pos.Column + 1 - return token, size + byteLen, pos - } - } - - // Here we assume that the token content contains no UTF-8 sequences, - // because we dealt with UTF-8 characters as a special case where - // necessary above. - size = size + len(token.Content) - pos.Column = pos.Column + len(token.Content) - - return token, size, pos -} - -// Returns the (possibly-empty) prefix of the given string that represents -// a literal, followed by the token that marks the end of the literal. -func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) { - litLen := 0 - pos := startPos - var terminator *Token - for { - - if litLen >= len(s) { - if nested { - // We've ended in the middle of a quoted string, - // which means this token is actually invalid. - return "", &Token{ - Type: INVALID, - Content: s, - Pos: startPos, - } - } - terminator = &Token{ - Type: EOF, - Content: "", - Pos: pos, - } - break - } - - next := s[litLen] - - if next == '$' && len(s) > litLen+1 { - follow := s[litLen+1] - - if follow == '{' { - terminator = &Token{ - Type: BEGIN, - Content: s[litLen : litLen+2], - Pos: pos, - } - pos.Column = pos.Column + 2 - break - } else if follow == '$' { - // Double-$ escapes the special processing of $, - // so we will consume both characters here. - pos.Column = pos.Column + 2 - litLen = litLen + 2 - continue - } - } - - // special handling that applies only to quoted strings - if nested { - if next == '"' { - terminator = &Token{ - Type: CQUOTE, - Content: s[litLen : litLen+1], - Pos: pos, - } - pos.Column = pos.Column + 1 - break - } - - // Escaped quote marks do not terminate the string. - // - // All we do here in the scanner is avoid terminating a string - // due to an escaped quote. The parser is responsible for the - // full handling of escape sequences, since it's able to produce - // better error messages than we can produce in here. - if next == '\\' && len(s) > litLen+1 { - follow := s[litLen+1] - - if follow == '"' { - // \" escapes the special processing of ", - // so we will consume both characters here. - pos.Column = pos.Column + 2 - litLen = litLen + 2 - continue - } else if follow == '\\' { - // \\ escapes \ - // so we will consume both characters here. - pos.Column = pos.Column + 2 - litLen = litLen + 2 - continue - } - } - } - - if next == '\n' { - pos.Column = 1 - pos.Line++ - litLen++ - } else { - pos.Column++ - - // "Column" measures runes, so we need to actually consume - // a valid UTF-8 character here. - _, size := utf8.DecodeRuneInString(s[litLen:]) - litLen = litLen + size - } - - } - - return s[:litLen], terminator -} - -// scanNumber returns the extent of the prefix of the string that represents -// a valid number, along with what type of number it represents: INT or FLOAT. -// -// scanNumber does only basic character analysis: numbers consist of digits -// and periods, with at least one period signalling a FLOAT. It's the parser's -// responsibility to validate the form and range of the number, such as ensuring -// that a FLOAT actually contains only one period, etc. -func scanNumber(s string) (string, TokenType) { - period := -1 - byteLen := 0 - numType := INTEGER - for { - if byteLen >= len(s) { - break - } - - next := s[byteLen] - if next != '.' && (next < '0' || next > '9') { - // If our last value was a period, then we're not a float, - // we're just an integer that ends in a period. - if period == byteLen-1 { - byteLen-- - numType = INTEGER - } - - break - } - - if next == '.' { - // If we've already seen a period, break out - if period >= 0 { - break - } - - period = byteLen - numType = FLOAT - } - - byteLen++ - } - - return s[:byteLen], numType -} - -// scanIdentifier returns the extent of the prefix of the string that -// represents a valid identifier, along with the length of that prefix -// in runes. -// -// Identifiers may contain utf8-encoded non-Latin letters, which will -// cause the returned "rune length" to be shorter than the byte length -// of the returned string. -func scanIdentifier(s string) (string, int) { - byteLen := 0 - runeLen := 0 - for { - if byteLen >= len(s) { - break - } - - nextRune, size := utf8.DecodeRuneInString(s[byteLen:]) - if !(nextRune == '_' || - nextRune == '-' || - nextRune == '.' || - nextRune == '*' || - unicode.IsNumber(nextRune) || - unicode.IsLetter(nextRune) || - unicode.IsMark(nextRune)) { - break - } - - // If we reach a star, it must be between periods to be part - // of the same identifier. - if nextRune == '*' && s[byteLen-1] != '.' { - break - } - - // If our previous character was a star, then the current must - // be period. Otherwise, undo that and exit. - if byteLen > 0 && s[byteLen-1] == '*' && nextRune != '.' { - byteLen-- - if s[byteLen-1] == '.' { - byteLen-- - } - - break - } - - byteLen = byteLen + size - runeLen = runeLen + 1 - } - - return s[:byteLen], runeLen -} - -// byteIsSpace implements a restrictive interpretation of spaces that includes -// only what's valid inside interpolation sequences: spaces, tabs, newlines. -func byteIsSpace(b byte) bool { - switch b { - case ' ', '\t', '\r', '\n': - return true - default: - return false - } -} - -// stringStartsWithIdentifier returns true if the given string begins with -// a character that is a legal start of an identifier: an underscore or -// any character that Unicode considers to be a letter. -func stringStartsWithIdentifier(s string) bool { - if len(s) == 0 { - return false - } - - first := s[0] - - // Easy ASCII cases first - if (first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_' { - return true - } - - // If our first byte begins a UTF-8 sequence then the sequence might - // be a unicode letter. - if utf8.RuneStart(first) { - firstRune, _ := utf8.DecodeRuneInString(s) - if unicode.IsLetter(firstRune) { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/hil/scanner/token.go b/vendor/github.com/hashicorp/hil/scanner/token.go deleted file mode 100644 index b6c82ae9b08a..000000000000 --- a/vendor/github.com/hashicorp/hil/scanner/token.go +++ /dev/null @@ -1,105 +0,0 @@ -package scanner - -import ( - "fmt" - - "github.com/hashicorp/hil/ast" -) - -type Token struct { - Type TokenType - Content string - Pos ast.Pos -} - -//go:generate stringer -type=TokenType -type TokenType rune - -const ( - // Raw string data outside of ${ .. } sequences - LITERAL TokenType = 'o' - - // STRING is like a LITERAL but it's inside a quoted string - // within a ${ ... } sequence, and so it can contain backslash - // escaping. - STRING TokenType = 'S' - - // Other Literals - INTEGER TokenType = 'I' - FLOAT TokenType = 'F' - BOOL TokenType = 'B' - - BEGIN TokenType = '$' // actually "${" - END TokenType = '}' - OQUOTE TokenType = '“' // Opening quote of a nested quoted sequence - CQUOTE TokenType = '”' // Closing quote of a nested quoted sequence - OPAREN TokenType = '(' - CPAREN TokenType = ')' - OBRACKET TokenType = '[' - CBRACKET TokenType = ']' - COMMA TokenType = ',' - - IDENTIFIER TokenType = 'i' - - PERIOD TokenType = '.' - PLUS TokenType = '+' - MINUS TokenType = '-' - STAR TokenType = '*' - SLASH TokenType = '/' - PERCENT TokenType = '%' - - AND TokenType = '∧' - OR TokenType = '∨' - BANG TokenType = '!' - - EQUAL TokenType = '=' - NOTEQUAL TokenType = '≠' - GT TokenType = '>' - LT TokenType = '<' - GTE TokenType = '≥' - LTE TokenType = '≤' - - QUESTION TokenType = '?' - COLON TokenType = ':' - - EOF TokenType = '␄' - - // Produced for sequences that cannot be understood as valid tokens - // e.g. due to use of unrecognized punctuation. - INVALID TokenType = '�' -) - -func (t *Token) String() string { - switch t.Type { - case EOF: - return "end of string" - case INVALID: - return fmt.Sprintf("invalid sequence %q", t.Content) - case INTEGER: - return fmt.Sprintf("integer %s", t.Content) - case FLOAT: - return fmt.Sprintf("float %s", t.Content) - case STRING: - return fmt.Sprintf("string %q", t.Content) - case LITERAL: - return fmt.Sprintf("literal %q", t.Content) - case OQUOTE: - return fmt.Sprintf("opening quote") - case CQUOTE: - return fmt.Sprintf("closing quote") - case AND: - return "&&" - case OR: - return "||" - case NOTEQUAL: - return "!=" - case GTE: - return ">=" - case LTE: - return "<=" - default: - // The remaining token types have content that - // speaks for itself. - return fmt.Sprintf("%q", t.Content) - } -} diff --git a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go deleted file mode 100644 index a602f5fdd8e7..000000000000 --- a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by "stringer -type=TokenType"; DO NOT EDIT - -package scanner - -import "fmt" - -const _TokenType_name = "BANGBEGINPERCENTOPARENCPARENSTARPLUSCOMMAMINUSPERIODSLASHCOLONLTEQUALGTQUESTIONBOOLFLOATINTEGERSTRINGOBRACKETCBRACKETIDENTIFIERLITERALENDOQUOTECQUOTEANDORNOTEQUALLTEGTEEOFINVALID" - -var _TokenType_map = map[TokenType]string{ - 33: _TokenType_name[0:4], - 36: _TokenType_name[4:9], - 37: _TokenType_name[9:16], - 40: _TokenType_name[16:22], - 41: _TokenType_name[22:28], - 42: _TokenType_name[28:32], - 43: _TokenType_name[32:36], - 44: _TokenType_name[36:41], - 45: _TokenType_name[41:46], - 46: _TokenType_name[46:52], - 47: _TokenType_name[52:57], - 58: _TokenType_name[57:62], - 60: _TokenType_name[62:64], - 61: _TokenType_name[64:69], - 62: _TokenType_name[69:71], - 63: _TokenType_name[71:79], - 66: _TokenType_name[79:83], - 70: _TokenType_name[83:88], - 73: _TokenType_name[88:95], - 83: _TokenType_name[95:101], - 91: _TokenType_name[101:109], - 93: _TokenType_name[109:117], - 105: _TokenType_name[117:127], - 111: _TokenType_name[127:134], - 125: _TokenType_name[134:137], - 8220: _TokenType_name[137:143], - 8221: _TokenType_name[143:149], - 8743: _TokenType_name[149:152], - 8744: _TokenType_name[152:154], - 8800: _TokenType_name[154:162], - 8804: _TokenType_name[162:165], - 8805: _TokenType_name[165:168], - 9220: _TokenType_name[168:171], - 65533: _TokenType_name[171:178], -} - -func (i TokenType) String() string { - if str, ok := _TokenType_map[i]; ok { - return str - } - return fmt.Sprintf("TokenType(%d)", i) -} diff --git a/vendor/github.com/hashicorp/hil/transform_fixed.go b/vendor/github.com/hashicorp/hil/transform_fixed.go deleted file mode 100644 index e69df294325b..000000000000 --- a/vendor/github.com/hashicorp/hil/transform_fixed.go +++ /dev/null @@ -1,29 +0,0 @@ -package hil - -import ( - "github.com/hashicorp/hil/ast" -) - -// FixedValueTransform transforms an AST to return a fixed value for -// all interpolations. i.e. you can make "hi ${anything}" always -// turn into "hi foo". -// -// The primary use case for this is for config validations where you can -// verify that interpolations result in a certain type of string. -func FixedValueTransform(root ast.Node, Value *ast.LiteralNode) ast.Node { - // We visit the nodes in top-down order - result := root - switch n := result.(type) { - case *ast.Output: - for i, v := range n.Exprs { - n.Exprs[i] = FixedValueTransform(v, Value) - } - case *ast.LiteralNode: - // We keep it as-is - default: - // Anything else we replace - result = Value - } - - return result -} diff --git a/vendor/github.com/hashicorp/hil/walk.go b/vendor/github.com/hashicorp/hil/walk.go deleted file mode 100644 index 0ace83065f9e..000000000000 --- a/vendor/github.com/hashicorp/hil/walk.go +++ /dev/null @@ -1,266 +0,0 @@ -package hil - -import ( - "fmt" - "reflect" - "strings" - - "github.com/hashicorp/hil/ast" - "github.com/mitchellh/reflectwalk" -) - -// WalkFn is the type of function to pass to Walk. Modify fields within -// WalkData to control whether replacement happens. -type WalkFn func(*WalkData) error - -// WalkData is the structure passed to the callback of the Walk function. -// -// This structure contains data passed in as well as fields that are expected -// to be written by the caller as a result. Please see the documentation for -// each field for more information. -type WalkData struct { - // Root is the parsed root of this HIL program - Root ast.Node - - // Location is the location within the structure where this - // value was found. This can be used to modify behavior within - // slices and so on. - Location reflectwalk.Location - - // The below two values must be set by the callback to have any effect. - // - // Replace, if true, will replace the value in the structure with - // ReplaceValue. It is up to the caller to make sure this is a string. - Replace bool - ReplaceValue string -} - -// Walk will walk an arbitrary Go structure and parse any string as an -// HIL program and call the callback cb to determine what to replace it -// with. -// -// This function is very useful for arbitrary HIL program interpolation -// across a complex configuration structure. Due to the heavy use of -// reflection in this function, it is recommend to write many unit tests -// with your typical configuration structures to hilp mitigate the risk -// of panics. -func Walk(v interface{}, cb WalkFn) error { - walker := &interpolationWalker{F: cb} - return reflectwalk.Walk(v, walker) -} - -// interpolationWalker implements interfaces for the reflectwalk package -// (github.com/mitchellh/reflectwalk) that can be used to automatically -// execute a callback for an interpolation. -type interpolationWalker struct { - F WalkFn - - key []string - lastValue reflect.Value - loc reflectwalk.Location - cs []reflect.Value - csKey []reflect.Value - csData interface{} - sliceIndex int - unknownKeys []string -} - -func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { - w.loc = loc - return nil -} - -func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { - w.loc = reflectwalk.None - - switch loc { - case reflectwalk.Map: - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.MapValue: - w.key = w.key[:len(w.key)-1] - w.csKey = w.csKey[:len(w.csKey)-1] - case reflectwalk.Slice: - // Split any values that need to be split - w.splitSlice() - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.SliceElem: - w.csKey = w.csKey[:len(w.csKey)-1] - } - - return nil -} - -func (w *interpolationWalker) Map(m reflect.Value) error { - w.cs = append(w.cs, m) - return nil -} - -func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { - w.csData = k - w.csKey = append(w.csKey, k) - w.key = append(w.key, k.String()) - w.lastValue = v - return nil -} - -func (w *interpolationWalker) Slice(s reflect.Value) error { - w.cs = append(w.cs, s) - return nil -} - -func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error { - w.csKey = append(w.csKey, reflect.ValueOf(i)) - w.sliceIndex = i - return nil -} - -func (w *interpolationWalker) Primitive(v reflect.Value) error { - setV := v - - // We only care about strings - if v.Kind() == reflect.Interface { - setV = v - v = v.Elem() - } - if v.Kind() != reflect.String { - return nil - } - - astRoot, err := Parse(v.String()) - if err != nil { - return err - } - - // If the AST we got is just a literal string value with the same - // value then we ignore it. We have to check if its the same value - // because it is possible to input a string, get out a string, and - // have it be different. For example: "foo-$${bar}" turns into - // "foo-${bar}" - if n, ok := astRoot.(*ast.LiteralNode); ok { - if s, ok := n.Value.(string); ok && s == v.String() { - return nil - } - } - - if w.F == nil { - return nil - } - - data := WalkData{Root: astRoot, Location: w.loc} - if err := w.F(&data); err != nil { - return fmt.Errorf( - "%s in:\n\n%s", - err, v.String()) - } - - if data.Replace { - /* - if remove { - w.removeCurrent() - return nil - } - */ - - resultVal := reflect.ValueOf(data.ReplaceValue) - switch w.loc { - case reflectwalk.MapKey: - m := w.cs[len(w.cs)-1] - - // Delete the old value - var zero reflect.Value - m.SetMapIndex(w.csData.(reflect.Value), zero) - - // Set the new key with the existing value - m.SetMapIndex(resultVal, w.lastValue) - - // Set the key to be the new key - w.csData = resultVal - case reflectwalk.MapValue: - // If we're in a map, then the only way to set a map value is - // to set it directly. - m := w.cs[len(w.cs)-1] - mk := w.csData.(reflect.Value) - m.SetMapIndex(mk, resultVal) - default: - // Otherwise, we should be addressable - setV.Set(resultVal) - } - } - - return nil -} - -func (w *interpolationWalker) removeCurrent() { - // Append the key to the unknown keys - w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) - - for i := 1; i <= len(w.cs); i++ { - c := w.cs[len(w.cs)-i] - switch c.Kind() { - case reflect.Map: - // Zero value so that we delete the map key - var val reflect.Value - - // Get the key and delete it - k := w.csData.(reflect.Value) - c.SetMapIndex(k, val) - return - } - } - - panic("No container found for removeCurrent") -} - -func (w *interpolationWalker) replaceCurrent(v reflect.Value) { - c := w.cs[len(w.cs)-2] - switch c.Kind() { - case reflect.Map: - // Get the key and delete it - k := w.csKey[len(w.csKey)-1] - c.SetMapIndex(k, v) - } -} - -func (w *interpolationWalker) splitSlice() { - // Get the []interface{} slice so we can do some operations on - // it without dealing with reflection. We'll document each step - // here to be clear. - var s []interface{} - raw := w.cs[len(w.cs)-1] - switch v := raw.Interface().(type) { - case []interface{}: - s = v - case []map[string]interface{}: - return - default: - panic("Unknown kind: " + raw.Kind().String()) - } - - // Check if we have any elements that we need to split. If not, then - // just return since we're done. - split := false - if !split { - return - } - - // Make a new result slice that is twice the capacity to fit our growth. - result := make([]interface{}, 0, len(s)*2) - - // Go over each element of the original slice and start building up - // the resulting slice by splitting where we have to. - for _, v := range s { - sv, ok := v.(string) - if !ok { - // Not a string, so just set it - result = append(result, v) - continue - } - - // Not a string list, so just set it - result = append(result, sv) - } - - // Our slice is now done, we have to replace the slice now - // with this new one that we have. - w.replaceCurrent(reflect.ValueOf(result)) -} diff --git a/vendor/github.com/hashicorp/terraform/LICENSE b/vendor/github.com/hashicorp/terraform/LICENSE deleted file mode 100644 index c33dcc7c928c..000000000000 --- a/vendor/github.com/hashicorp/terraform/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go deleted file mode 100644 index 1449065e9d84..000000000000 --- a/vendor/github.com/hashicorp/terraform/flatmap/expand.go +++ /dev/null @@ -1,152 +0,0 @@ -package flatmap - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hil" -) - -// Expand takes a map and a key (prefix) and expands that value into -// a more complex structure. This is the reverse of the Flatten operation. -func Expand(m map[string]string, key string) interface{} { - // If the key is exactly a key in the map, just return it - if v, ok := m[key]; ok { - if v == "true" { - return true - } else if v == "false" { - return false - } - - return v - } - - // Check if the key is an array, and if so, expand the array - if v, ok := m[key+".#"]; ok { - // If the count of the key is unknown, then just put the unknown - // value in the value itself. This will be detected by Terraform - // core later. - if v == hil.UnknownValue { - return v - } - - return expandArray(m, key) - } - - // Check if this is a prefix in the map - prefix := key + "." - for k := range m { - if strings.HasPrefix(k, prefix) { - return expandMap(m, prefix) - } - } - - return nil -} - -func expandArray(m map[string]string, prefix string) []interface{} { - num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) - if err != nil { - panic(err) - } - - // If the number of elements in this array is 0, then return an - // empty slice as there is nothing to expand. Trying to expand it - // anyway could lead to crashes as any child maps, arrays or sets - // that no longer exist are still shown as empty with a count of 0. - if num == 0 { - return []interface{}{} - } - - // NOTE: "num" is not necessarily accurate, e.g. if a user tampers - // with state, so the following code should not crash when given a - // number of items more or less than what's given in num. The - // num key is mainly just a hint that this is a list or set. - - // The Schema "Set" type stores its values in an array format, but - // using numeric hash values instead of ordinal keys. Take the set - // of keys regardless of value, and expand them in numeric order. - // See GH-11042 for more details. - keySet := map[int]bool{} - computed := map[string]bool{} - for k := range m { - if !strings.HasPrefix(k, prefix+".") { - continue - } - - key := k[len(prefix)+1:] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - - // skip the count value - if key == "#" { - continue - } - - // strip the computed flag if there is one - if strings.HasPrefix(key, "~") { - key = key[1:] - computed[key] = true - } - - k, err := strconv.Atoi(key) - if err != nil { - panic(err) - } - keySet[int(k)] = true - } - - keysList := make([]int, 0, num) - for key := range keySet { - keysList = append(keysList, key) - } - sort.Ints(keysList) - - result := make([]interface{}, len(keysList)) - for i, key := range keysList { - keyString := strconv.Itoa(key) - if computed[keyString] { - keyString = "~" + keyString - } - result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) - } - - return result -} - -func expandMap(m map[string]string, prefix string) map[string]interface{} { - // Submaps may not have a '%' key, so we can't count on this value being - // here. If we don't have a count, just proceed as if we have have a map. - if count, ok := m[prefix+"%"]; ok && count == "0" { - return map[string]interface{}{} - } - - result := make(map[string]interface{}) - for k := range m { - if !strings.HasPrefix(k, prefix) { - continue - } - - key := k[len(prefix):] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - if _, ok := result[key]; ok { - continue - } - - // skip the map count value - if key == "%" { - continue - } - - result[key] = Expand(m, k[:len(prefix)+len(key)]) - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go deleted file mode 100644 index 9ff6e4265266..000000000000 --- a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go +++ /dev/null @@ -1,71 +0,0 @@ -package flatmap - -import ( - "fmt" - "reflect" -) - -// Flatten takes a structure and turns into a flat map[string]string. -// -// Within the "thing" parameter, only primitive values are allowed. Structs are -// not supported. Therefore, it can only be slices, maps, primitives, and -// any combination of those together. -// -// See the tests for examples of what inputs are turned into. -func Flatten(thing map[string]interface{}) Map { - result := make(map[string]string) - - for k, raw := range thing { - flatten(result, k, reflect.ValueOf(raw)) - } - - return Map(result) -} - -func flatten(result map[string]string, prefix string, v reflect.Value) { - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - result[prefix] = "true" - } else { - result[prefix] = "false" - } - case reflect.Int: - result[prefix] = fmt.Sprintf("%d", v.Int()) - case reflect.Map: - flattenMap(result, prefix, v) - case reflect.Slice: - flattenSlice(result, prefix, v) - case reflect.String: - result[prefix] = v.String() - default: - panic(fmt.Sprintf("Unknown: %s", v)) - } -} - -func flattenMap(result map[string]string, prefix string, v reflect.Value) { - for _, k := range v.MapKeys() { - if k.Kind() == reflect.Interface { - k = k.Elem() - } - - if k.Kind() != reflect.String { - panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) - } - - flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) - } -} - -func flattenSlice(result map[string]string, prefix string, v reflect.Value) { - prefix = prefix + "." - - result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) - for i := 0; i < v.Len(); i++ { - flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) - } -} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/map.go b/vendor/github.com/hashicorp/terraform/flatmap/map.go deleted file mode 100644 index 46b72c4014a0..000000000000 --- a/vendor/github.com/hashicorp/terraform/flatmap/map.go +++ /dev/null @@ -1,82 +0,0 @@ -package flatmap - -import ( - "strings" -) - -// Map is a wrapper around map[string]string that provides some helpers -// above it that assume the map is in the format that flatmap expects -// (the result of Flatten). -// -// All modifying functions such as Delete are done in-place unless -// otherwise noted. -type Map map[string]string - -// Contains returns true if the map contains the given key. -func (m Map) Contains(key string) bool { - for _, k := range m.Keys() { - if k == key { - return true - } - } - - return false -} - -// Delete deletes a key out of the map with the given prefix. -func (m Map) Delete(prefix string) { - for k, _ := range m { - match := k == prefix - if !match { - if !strings.HasPrefix(k, prefix) { - continue - } - - if k[len(prefix):len(prefix)+1] != "." { - continue - } - } - - delete(m, k) - } -} - -// Keys returns all of the top-level keys in this map -func (m Map) Keys() []string { - ks := make(map[string]struct{}) - for k, _ := range m { - idx := strings.Index(k, ".") - if idx == -1 { - idx = len(k) - } - - ks[k[:idx]] = struct{}{} - } - - result := make([]string, 0, len(ks)) - for k, _ := range ks { - result = append(result, k) - } - - return result -} - -// Merge merges the contents of the other Map into this one. -// -// This merge is smarter than a simple map iteration because it -// will fully replace arrays and other complex structures that -// are present in this map with the other map's. For example, if -// this map has a 3 element "foo" list, and m2 has a 2 element "foo" -// list, then the result will be that m has a 2 element "foo" -// list. -func (m Map) Merge(m2 Map) { - for _, prefix := range m2.Keys() { - m.Delete(prefix) - - for k, v := range m2 { - if strings.HasPrefix(k, prefix) { - m[k] = v - } - } - } -} diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md deleted file mode 100644 index d7b4b8d584b7..000000000000 --- a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ -# Contributing # - -Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated. - -## New API or feature ## - -I want to speak more about how to add new functions to this package. - -Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible. - -* Rule 1: Only string algorithm, which takes string as input, can be included. -* Rule 2: If a function has been implemented in package `string`, it must not be included. -* Rule 3: If a function is not language neutral, it must not be included. -* Rule 4: If a function is a part of standard library in other languages, it can be included. -* Rule 5: If a function is quite useful in some famous framework or library, it can be included. - -New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected. - -## Pull request ## - -Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit. - -If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list. diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE deleted file mode 100644 index 270177259365..000000000000 --- a/vendor/github.com/huandu/xstrings/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Huan Du - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md deleted file mode 100644 index 292bf2f39e13..000000000000 --- a/vendor/github.com/huandu/xstrings/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# xstrings # - -[![Build Status](https://travis-ci.org/huandu/xstrings.svg?branch=master)](https://travis-ci.org/huandu/xstrings) -[![GoDoc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://godoc.org/github.com/huandu/xstrings) -[![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings) -[![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master) - -Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). - -All functions are well tested and carefully tuned for performance. - -## Propose a new function ## - -Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. - -## Install ## - -Use `go get` to install this library. - - go get github.com/huandu/xstrings - -## API document ## - -See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. - -## Function list ## - -Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. - -Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. - -### Package `xstrings` functions ### - -*Keep this table sorted by Function in ascending order.* - -| Function | Friends | # | -| -------- | ------- | --- | -| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | -| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | -| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | -| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | -| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | -| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | -| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | -| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | -| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | -| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | -| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | -| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | -| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | -| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | -| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | -| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | -| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | -| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | -| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | -| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | -| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | -| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | -| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | -| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | - -### Package `strings` functions ### - -*Keep this table sorted by Function in ascending order.* - -| Function | Friends | -| -------- | ------- | -| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | -| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | -| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | -| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | -| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | -| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | -| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | -| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | -| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | -| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | -| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | -| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | -| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | -| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | -| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | -| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | -| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | -| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | -| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | -| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | -| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | -| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | -| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | -| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | -| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | -| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | -| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | -| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | -| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | -| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | -| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | -| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | -| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | -| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | -| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | -| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | -| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | -| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | -| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | -| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | -| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | - -## License ## - -This library is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go deleted file mode 100644 index 2aff57aab4d6..000000000000 --- a/vendor/github.com/huandu/xstrings/common.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "bytes" -) - -const bufferMaxInitGrowSize = 2048 - -// Lazy initialize a buffer. -func allocBuffer(orig, cur string) *bytes.Buffer { - output := &bytes.Buffer{} - maxSize := len(orig) * 4 - - // Avoid to reserve too much memory at once. - if maxSize > bufferMaxInitGrowSize { - maxSize = bufferMaxInitGrowSize - } - - output.Grow(maxSize) - output.WriteString(orig[:len(orig)-len(cur)]) - return output -} diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go deleted file mode 100644 index 8253fa9c63b7..000000000000 --- a/vendor/github.com/huandu/xstrings/convert.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "bytes" - "math/rand" - "unicode" - "unicode/utf8" -) - -// ToCamelCase can convert all lower case characters behind underscores -// to upper case character. -// Underscore character will be removed in result except following cases. -// * More than 1 underscore. -// "a__b" => "A_B" -// * At the beginning of string. -// "_a" => "_A" -// * At the end of string. -// "ab_" => "Ab_" -func ToCamelCase(str string) string { - if len(str) == 0 { - return "" - } - - buf := &bytes.Buffer{} - var r0, r1 rune - var size int - - // leading '_' will appear in output. - for len(str) > 0 { - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if r0 != '_' { - break - } - - buf.WriteRune(r0) - } - - if len(str) == 0 { - return buf.String() - } - - r0 = unicode.ToUpper(r0) - - for len(str) > 0 { - r1 = r0 - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if r1 == '_' && r0 == '_' { - buf.WriteRune(r1) - continue - } - - if r1 == '_' { - r0 = unicode.ToUpper(r0) - } else { - r0 = unicode.ToLower(r0) - } - - if r1 != '_' { - buf.WriteRune(r1) - } - } - - buf.WriteRune(r0) - return buf.String() -} - -// ToSnakeCase can convert all upper case characters in a string to -// snake case format. -// -// Some samples. -// "FirstName" => "first_name" -// "HTTPServer" => "http_server" -// "NoHTTPS" => "no_https" -// "GO_PATH" => "go_path" -// "GO PATH" => "go_path" // space is converted to underscore. -// "GO-PATH" => "go_path" // hyphen is converted to underscore. -// "HTTP2XX" => "http_2xx" // insert an underscore before a number and after an alphabet. -// "http2xx" => "http_2xx" -// "HTTP20xOK" => "http_20x_ok" -func ToSnakeCase(str string) string { - return camelCaseToLowerCase(str, '_') -} - -// ToKebabCase can convert all upper case characters in a string to -// kebab case format. -// -// Some samples. -// "FirstName" => "first-name" -// "HTTPServer" => "http-server" -// "NoHTTPS" => "no-https" -// "GO_PATH" => "go-path" -// "GO PATH" => "go-path" // space is converted to '-'. -// "GO-PATH" => "go-path" // hyphen is converted to '-'. -// "HTTP2XX" => "http-2xx" // insert a '-' before a number and after an alphabet. -// "http2xx" => "http-2xx" -// "HTTP20xOK" => "http-20x-ok" -func ToKebabCase(str string) string { - return camelCaseToLowerCase(str, '-') -} - -func camelCaseToLowerCase(str string, connector rune) string { - if len(str) == 0 { - return "" - } - - buf := &bytes.Buffer{} - var prev, r0, r1 rune - var size int - - r0 = connector - - for len(str) > 0 { - prev = r0 - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - switch { - case r0 == utf8.RuneError: - buf.WriteRune(r0) - - case unicode.IsUpper(r0): - if prev != connector && !unicode.IsNumber(prev) { - buf.WriteRune(connector) - } - - buf.WriteRune(unicode.ToLower(r0)) - - if len(str) == 0 { - break - } - - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if !unicode.IsUpper(r0) { - buf.WriteRune(r0) - break - } - - // find next non-upper-case character and insert connector properly. - // it's designed to convert `HTTPServer` to `http_server`. - // if there are more than 2 adjacent upper case characters in a word, - // treat them as an abbreviation plus a normal word. - for len(str) > 0 { - r1 = r0 - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if r0 == utf8.RuneError { - buf.WriteRune(unicode.ToLower(r1)) - buf.WriteRune(r0) - break - } - - if !unicode.IsUpper(r0) { - if r0 == '_' || r0 == ' ' || r0 == '-' { - r0 = connector - - buf.WriteRune(unicode.ToLower(r1)) - } else if unicode.IsNumber(r0) { - // treat a number as an upper case rune - // so that both `http2xx` and `HTTP2XX` can be converted to `http_2xx`. - buf.WriteRune(unicode.ToLower(r1)) - buf.WriteRune(connector) - buf.WriteRune(r0) - } else { - buf.WriteRune(connector) - buf.WriteRune(unicode.ToLower(r1)) - buf.WriteRune(r0) - } - - break - } - - buf.WriteRune(unicode.ToLower(r1)) - } - - if len(str) == 0 || r0 == connector { - buf.WriteRune(unicode.ToLower(r0)) - } - - case unicode.IsNumber(r0): - if prev != connector && !unicode.IsNumber(prev) { - buf.WriteRune(connector) - } - - buf.WriteRune(r0) - - default: - if r0 == ' ' || r0 == '-' || r0 == '_' { - r0 = connector - } - - buf.WriteRune(r0) - } - } - - return buf.String() -} - -// SwapCase will swap characters case from upper to lower or lower to upper. -func SwapCase(str string) string { - var r rune - var size int - - buf := &bytes.Buffer{} - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - switch { - case unicode.IsUpper(r): - buf.WriteRune(unicode.ToLower(r)) - - case unicode.IsLower(r): - buf.WriteRune(unicode.ToUpper(r)) - - default: - buf.WriteRune(r) - } - - str = str[size:] - } - - return buf.String() -} - -// FirstRuneToUpper converts first rune to upper case if necessary. -func FirstRuneToUpper(str string) string { - if str == "" { - return str - } - - r, size := utf8.DecodeRuneInString(str) - - if !unicode.IsLower(r) { - return str - } - - buf := &bytes.Buffer{} - buf.WriteRune(unicode.ToUpper(r)) - buf.WriteString(str[size:]) - return buf.String() -} - -// FirstRuneToLower converts first rune to lower case if necessary. -func FirstRuneToLower(str string) string { - if str == "" { - return str - } - - r, size := utf8.DecodeRuneInString(str) - - if !unicode.IsUpper(r) { - return str - } - - buf := &bytes.Buffer{} - buf.WriteRune(unicode.ToLower(r)) - buf.WriteString(str[size:]) - return buf.String() -} - -// Shuffle randomizes runes in a string and returns the result. -// It uses default random source in `math/rand`. -func Shuffle(str string) string { - if str == "" { - return str - } - - runes := []rune(str) - index := 0 - - for i := len(runes) - 1; i > 0; i-- { - index = rand.Intn(i + 1) - - if i != index { - runes[i], runes[index] = runes[index], runes[i] - } - } - - return string(runes) -} - -// ShuffleSource randomizes runes in a string with given random source. -func ShuffleSource(str string, src rand.Source) string { - if str == "" { - return str - } - - runes := []rune(str) - index := 0 - r := rand.New(src) - - for i := len(runes) - 1; i > 0; i-- { - index = r.Intn(i + 1) - - if i != index { - runes[i], runes[index] = runes[index], runes[i] - } - } - - return string(runes) -} - -// Successor returns the successor to string. -// -// If there is one alphanumeric rune is found in string, increase the rune by 1. -// If increment generates a "carry", the rune to the left of it is incremented. -// This process repeats until there is no carry, adding an additional rune if necessary. -// -// If there is no alphanumeric rune, the rightmost rune will be increased by 1 -// regardless whether the result is a valid rune or not. -// -// Only following characters are alphanumeric. -// * a - z -// * A - Z -// * 0 - 9 -// -// Samples (borrowed from ruby's String#succ document): -// "abcd" => "abce" -// "THX1138" => "THX1139" -// "<>" => "<>" -// "1999zzz" => "2000aaa" -// "ZZZ9999" => "AAAA0000" -// "***" => "**+" -func Successor(str string) string { - if str == "" { - return str - } - - var r rune - var i int - carry := ' ' - runes := []rune(str) - l := len(runes) - lastAlphanumeric := l - - for i = l - 1; i >= 0; i-- { - r = runes[i] - - if ('a' <= r && r <= 'y') || - ('A' <= r && r <= 'Y') || - ('0' <= r && r <= '8') { - runes[i]++ - carry = ' ' - lastAlphanumeric = i - break - } - - switch r { - case 'z': - runes[i] = 'a' - carry = 'a' - lastAlphanumeric = i - - case 'Z': - runes[i] = 'A' - carry = 'A' - lastAlphanumeric = i - - case '9': - runes[i] = '0' - carry = '0' - lastAlphanumeric = i - } - } - - // Needs to add one character for carry. - if i < 0 && carry != ' ' { - buf := &bytes.Buffer{} - buf.Grow(l + 4) // Reserve enough space for write. - - if lastAlphanumeric != 0 { - buf.WriteString(str[:lastAlphanumeric]) - } - - buf.WriteRune(carry) - - for _, r = range runes[lastAlphanumeric:] { - buf.WriteRune(r) - } - - return buf.String() - } - - // No alphanumeric character. Simply increase last rune's value. - if lastAlphanumeric == l { - runes[l-1]++ - } - - return string(runes) -} diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go deleted file mode 100644 index f96e38703a3a..000000000000 --- a/vendor/github.com/huandu/xstrings/count.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "unicode" - "unicode/utf8" -) - -// Len returns str's utf8 rune length. -func Len(str string) int { - return utf8.RuneCountInString(str) -} - -// WordCount returns number of words in a string. -// -// Word is defined as a locale dependent string containing alphabetic characters, -// which may also contain but not start with `'` and `-` characters. -func WordCount(str string) int { - var r rune - var size, n int - - inWord := false - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - switch { - case isAlphabet(r): - if !inWord { - inWord = true - n++ - } - - case inWord && (r == '\'' || r == '-'): - // Still in word. - - default: - inWord = false - } - - str = str[size:] - } - - return n -} - -const minCJKCharacter = '\u3400' - -// Checks r is a letter but not CJK character. -func isAlphabet(r rune) bool { - if !unicode.IsLetter(r) { - return false - } - - switch { - // Quick check for non-CJK character. - case r < minCJKCharacter: - return true - - // Common CJK characters. - case r >= '\u4E00' && r <= '\u9FCC': - return false - - // Rare CJK characters. - case r >= '\u3400' && r <= '\u4D85': - return false - - // Rare and historic CJK characters. - case r >= '\U00020000' && r <= '\U0002B81D': - return false - } - - return true -} - -// Width returns string width in monotype font. -// Multi-byte characters are usually twice the width of single byte characters. -// -// Algorithm comes from `mb_strwidth` in PHP. -// http://php.net/manual/en/function.mb-strwidth.php -func Width(str string) int { - var r rune - var size, n int - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - n += RuneWidth(r) - str = str[size:] - } - - return n -} - -// RuneWidth returns character width in monotype font. -// Multi-byte characters are usually twice the width of single byte characters. -// -// Algorithm comes from `mb_strwidth` in PHP. -// http://php.net/manual/en/function.mb-strwidth.php -func RuneWidth(r rune) int { - switch { - case r == utf8.RuneError || r < '\x20': - return 0 - - case '\x20' <= r && r < '\u2000': - return 1 - - case '\u2000' <= r && r < '\uFF61': - return 2 - - case '\uFF61' <= r && r < '\uFFA0': - return 1 - - case '\uFFA0' <= r: - return 2 - } - - return 0 -} diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go deleted file mode 100644 index 1a6ef069f613..000000000000 --- a/vendor/github.com/huandu/xstrings/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -// Package xstrings is to provide string algorithms which are useful but not included in `strings` package. -// See project home page for details. https://github.com/huandu/xstrings -// -// Package xstrings assumes all strings are encoded in utf8. -package xstrings diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go deleted file mode 100644 index 2d02df1c042f..000000000000 --- a/vendor/github.com/huandu/xstrings/format.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "bytes" - "unicode/utf8" -) - -// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on -// current column and tabSize. -// The column number is reset to zero after each newline ('\n') occurring in the str. -// -// ExpandTabs uses RuneWidth to decide rune's width. -// For example, CJK characters will be treated as two characters. -// -// If tabSize <= 0, ExpandTabs panics with error. -// -// Samples: -// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k" -// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l" -// ExpandTabs("z中\t文\tw", 4) => "z中 文 w" -func ExpandTabs(str string, tabSize int) string { - if tabSize <= 0 { - panic("tab size must be positive") - } - - var r rune - var i, size, column, expand int - var output *bytes.Buffer - - orig := str - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - if r == '\t' { - expand = tabSize - column%tabSize - - if output == nil { - output = allocBuffer(orig, str) - } - - for i = 0; i < expand; i++ { - output.WriteByte(byte(' ')) - } - - column += expand - } else { - if r == '\n' { - column = 0 - } else { - column += RuneWidth(r) - } - - if output != nil { - output.WriteRune(r) - } - } - - str = str[size:] - } - - if output == nil { - return orig - } - - return output.String() -} - -// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length. -// If str's rune length is larger than length, str itself will be returned. -// -// If pad is an empty string, str will be returned. -// -// Samples: -// LeftJustify("hello", 4, " ") => "hello" -// LeftJustify("hello", 10, " ") => "hello " -// LeftJustify("hello", 10, "123") => "hello12312" -func LeftJustify(str string, length int, pad string) string { - l := Len(str) - - if l >= length || pad == "" { - return str - } - - remains := length - l - padLen := Len(pad) - - output := &bytes.Buffer{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - output.WriteString(str) - writePadString(output, pad, padLen, remains) - return output.String() -} - -// RightJustify returns a string with pad string at left side if str's rune length is smaller than length. -// If str's rune length is larger than length, str itself will be returned. -// -// If pad is an empty string, str will be returned. -// -// Samples: -// RightJustify("hello", 4, " ") => "hello" -// RightJustify("hello", 10, " ") => " hello" -// RightJustify("hello", 10, "123") => "12312hello" -func RightJustify(str string, length int, pad string) string { - l := Len(str) - - if l >= length || pad == "" { - return str - } - - remains := length - l - padLen := Len(pad) - - output := &bytes.Buffer{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - writePadString(output, pad, padLen, remains) - output.WriteString(str) - return output.String() -} - -// Center returns a string with pad string at both side if str's rune length is smaller than length. -// If str's rune length is larger than length, str itself will be returned. -// -// If pad is an empty string, str will be returned. -// -// Samples: -// Center("hello", 4, " ") => "hello" -// Center("hello", 10, " ") => " hello " -// Center("hello", 10, "123") => "12hello123" -func Center(str string, length int, pad string) string { - l := Len(str) - - if l >= length || pad == "" { - return str - } - - remains := length - l - padLen := Len(pad) - - output := &bytes.Buffer{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - writePadString(output, pad, padLen, remains/2) - output.WriteString(str) - writePadString(output, pad, padLen, (remains+1)/2) - return output.String() -} - -func writePadString(output *bytes.Buffer, pad string, padLen, remains int) { - var r rune - var size int - - repeats := remains / padLen - - for i := 0; i < repeats; i++ { - output.WriteString(pad) - } - - remains = remains % padLen - - if remains != 0 { - for i := 0; i < remains; i++ { - r, size = utf8.DecodeRuneInString(pad) - output.WriteRune(r) - pad = pad[size:] - } - } -} diff --git a/vendor/github.com/huandu/xstrings/go.mod b/vendor/github.com/huandu/xstrings/go.mod deleted file mode 100644 index 5866b3a8eb7a..000000000000 --- a/vendor/github.com/huandu/xstrings/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/huandu/xstrings \ No newline at end of file diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go deleted file mode 100644 index 0eefb43ed71d..000000000000 --- a/vendor/github.com/huandu/xstrings/manipulate.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "bytes" - "strings" - "unicode/utf8" -) - -// Reverse a utf8 encoded string. -func Reverse(str string) string { - var size int - - tail := len(str) - buf := make([]byte, tail) - s := buf - - for len(str) > 0 { - _, size = utf8.DecodeRuneInString(str) - tail -= size - s = append(s[:tail], []byte(str[:size])...) - str = str[size:] - } - - return string(buf) -} - -// Slice a string by rune. -// -// Start must satisfy 0 <= start <= rune length. -// -// End can be positive, zero or negative. -// If end >= 0, start and end must satisfy start <= end <= rune length. -// If end < 0, it means slice to the end of string. -// -// Otherwise, Slice will panic as out of range. -func Slice(str string, start, end int) string { - var size, startPos, endPos int - - origin := str - - if start < 0 || end > len(str) || (end >= 0 && start > end) { - panic("out of range") - } - - if end >= 0 { - end -= start - } - - for start > 0 && len(str) > 0 { - _, size = utf8.DecodeRuneInString(str) - start-- - startPos += size - str = str[size:] - } - - if end < 0 { - return origin[startPos:] - } - - endPos = startPos - - for end > 0 && len(str) > 0 { - _, size = utf8.DecodeRuneInString(str) - end-- - endPos += size - str = str[size:] - } - - if len(str) == 0 && (start > 0 || end > 0) { - panic("out of range") - } - - return origin[startPos:endPos] -} - -// Partition splits a string by sep into three parts. -// The return value is a slice of strings with head, match and tail. -// -// If str contains sep, for example "hello" and "l", Partition returns -// "he", "l", "lo" -// -// If str doesn't contain sep, for example "hello" and "x", Partition returns -// "hello", "", "" -func Partition(str, sep string) (head, match, tail string) { - index := strings.Index(str, sep) - - if index == -1 { - head = str - return - } - - head = str[:index] - match = str[index : index+len(sep)] - tail = str[index+len(sep):] - return -} - -// LastPartition splits a string by last instance of sep into three parts. -// The return value is a slice of strings with head, match and tail. -// -// If str contains sep, for example "hello" and "l", LastPartition returns -// "hel", "l", "o" -// -// If str doesn't contain sep, for example "hello" and "x", LastPartition returns -// "", "", "hello" -func LastPartition(str, sep string) (head, match, tail string) { - index := strings.LastIndex(str, sep) - - if index == -1 { - tail = str - return - } - - head = str[:index] - match = str[index : index+len(sep)] - tail = str[index+len(sep):] - return -} - -// Insert src into dst at given rune index. -// Index is counted by runes instead of bytes. -// -// If index is out of range of dst, panic with out of range. -func Insert(dst, src string, index int) string { - return Slice(dst, 0, index) + src + Slice(dst, index, -1) -} - -// Scrub scrubs invalid utf8 bytes with repl string. -// Adjacent invalid bytes are replaced only once. -func Scrub(str, repl string) string { - var buf *bytes.Buffer - var r rune - var size, pos int - var hasError bool - - origin := str - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - if r == utf8.RuneError { - if !hasError { - if buf == nil { - buf = &bytes.Buffer{} - } - - buf.WriteString(origin[:pos]) - hasError = true - } - } else if hasError { - hasError = false - buf.WriteString(repl) - - origin = origin[pos:] - pos = 0 - } - - pos += size - str = str[size:] - } - - if buf != nil { - buf.WriteString(origin) - return buf.String() - } - - // No invalid byte. - return origin -} - -// WordSplit splits a string into words. Returns a slice of words. -// If there is no word in a string, return nil. -// -// Word is defined as a locale dependent string containing alphabetic characters, -// which may also contain but not start with `'` and `-` characters. -func WordSplit(str string) []string { - var word string - var words []string - var r rune - var size, pos int - - inWord := false - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - switch { - case isAlphabet(r): - if !inWord { - inWord = true - word = str - pos = 0 - } - - case inWord && (r == '\'' || r == '-'): - // Still in word. - - default: - if inWord { - inWord = false - words = append(words, word[:pos]) - } - } - - pos += size - str = str[size:] - } - - if inWord { - words = append(words, word[:pos]) - } - - return words -} diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go deleted file mode 100644 index 66e23f86d030..000000000000 --- a/vendor/github.com/huandu/xstrings/translate.go +++ /dev/null @@ -1,547 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "bytes" - "unicode" - "unicode/utf8" -) - -type runeRangeMap struct { - FromLo rune // Lower bound of range map. - FromHi rune // An inclusive higher bound of range map. - ToLo rune - ToHi rune -} - -type runeDict struct { - Dict [unicode.MaxASCII + 1]rune -} - -type runeMap map[rune]rune - -// Translator can translate string with pre-compiled from and to patterns. -// If a from/to pattern pair needs to be used more than once, it's recommended -// to create a Translator and reuse it. -type Translator struct { - quickDict *runeDict // A quick dictionary to look up rune by index. Only available for latin runes. - runeMap runeMap // Rune map for translation. - ranges []*runeRangeMap // Ranges of runes. - mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. - reverted bool // If to pattern is empty, all matched characters will be deleted. - hasPattern bool -} - -// NewTranslator creates new Translator through a from/to pattern pair. -func NewTranslator(from, to string) *Translator { - tr := &Translator{} - - if from == "" { - return tr - } - - reverted := from[0] == '^' - deletion := len(to) == 0 - - if reverted { - from = from[1:] - } - - var fromStart, fromEnd, fromRangeStep rune - var toStart, toEnd, toRangeStep rune - var fromRangeSize, toRangeSize rune - var singleRunes []rune - - // Update the to rune range. - updateRange := func() { - // No more rune to read in the to rune pattern. - if toEnd == utf8.RuneError { - return - } - - if toRangeStep == 0 { - to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd) - return - } - - // Current range is not empty. Consume 1 rune from start. - if toStart != toEnd { - toStart += toRangeStep - return - } - - // No more rune. Repeat the last rune. - if to == "" { - toEnd = utf8.RuneError - return - } - - // Both start and end are used. Read two more runes from the to pattern. - to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) - } - - if deletion { - toStart = utf8.RuneError - toEnd = utf8.RuneError - } else { - // If from pattern is reverted, only the last rune in the to pattern will be used. - if reverted { - var size int - - for len(to) > 0 { - toStart, size = utf8.DecodeRuneInString(to) - to = to[size:] - } - - toEnd = utf8.RuneError - } else { - to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) - } - } - - fromEnd = utf8.RuneError - - for len(from) > 0 { - from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd) - - // fromStart is a single character. Just map it with a rune in the to pattern. - if fromRangeStep == 0 { - singleRunes = tr.addRune(fromStart, toStart, singleRunes) - updateRange() - continue - } - - for toEnd != utf8.RuneError && fromStart != fromEnd { - // If mapped rune is a single character instead of a range, simply shift first - // rune in the range. - if toRangeStep == 0 { - singleRunes = tr.addRune(fromStart, toStart, singleRunes) - updateRange() - fromStart += fromRangeStep - continue - } - - fromRangeSize = (fromEnd - fromStart) * fromRangeStep - toRangeSize = (toEnd - toStart) * toRangeStep - - // Not enough runes in the to pattern. Need to read more. - if fromRangeSize > toRangeSize { - fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes) - fromStart += fromRangeStep - updateRange() - - // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered - // as a single rune. - if fromStart == fromEnd { - singleRunes = tr.addRune(fromStart, toStart, singleRunes) - updateRange() - } - - continue - } - - fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes) - updateRange() - break - } - - if fromStart == fromEnd { - fromEnd = utf8.RuneError - continue - } - - fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) - fromEnd = utf8.RuneError - } - - if fromEnd != utf8.RuneError { - singleRunes = tr.addRune(fromEnd, toStart, singleRunes) - } - - tr.reverted = reverted - tr.mappedRune = -1 - tr.hasPattern = true - - // Translate RuneError only if in deletion or reverted mode. - if deletion || reverted { - tr.mappedRune = toStart - } - - return tr -} - -func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune { - if from <= unicode.MaxASCII { - if tr.quickDict == nil { - tr.quickDict = &runeDict{} - } - - tr.quickDict.Dict[from] = to - } else { - if tr.runeMap == nil { - tr.runeMap = make(runeMap) - } - - tr.runeMap[from] = to - } - - singleRunes = append(singleRunes, from) - return singleRunes -} - -func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) { - var r rune - var rrm *runeRangeMap - - if fromLo < fromHi { - rrm = &runeRangeMap{ - FromLo: fromLo, - FromHi: fromHi, - ToLo: toLo, - ToHi: toHi, - } - } else { - rrm = &runeRangeMap{ - FromLo: fromHi, - FromHi: fromLo, - ToLo: toHi, - ToHi: toLo, - } - } - - // If there is any single rune conflicts with this rune range, clear single rune record. - for _, r = range singleRunes { - if rrm.FromLo <= r && r <= rrm.FromHi { - if r <= unicode.MaxASCII { - tr.quickDict.Dict[r] = 0 - } else { - delete(tr.runeMap, r) - } - } - } - - tr.ranges = append(tr.ranges, rrm) - return fromHi, toHi -} - -func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) { - var r rune - var size int - - remaining = str - escaping := false - isRange := false - - for len(remaining) > 0 { - r, size = utf8.DecodeRuneInString(remaining) - remaining = remaining[size:] - - // Parse special characters. - if !escaping { - if r == '\\' { - escaping = true - continue - } - - if r == '-' { - // Ignore slash at beginning of string. - if last == utf8.RuneError { - continue - } - - start = last - isRange = true - continue - } - } - - escaping = false - - if last != utf8.RuneError { - // This is a range which start and end are the same. - // Considier it as a normal character. - if isRange && last == r { - isRange = false - continue - } - - start = last - end = r - - if isRange { - if start < end { - rangeStep = 1 - } else { - rangeStep = -1 - } - } - - return - } - - last = r - } - - start = last - end = utf8.RuneError - return -} - -// Translate str with a from/to pattern pair. -// -// See comment in Translate function for usage and samples. -func (tr *Translator) Translate(str string) string { - if !tr.hasPattern || str == "" { - return str - } - - var r rune - var size int - var needTr bool - - orig := str - - var output *bytes.Buffer - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - r, needTr = tr.TranslateRune(r) - - if needTr && output == nil { - output = allocBuffer(orig, str) - } - - if r != utf8.RuneError && output != nil { - output.WriteRune(r) - } - - str = str[size:] - } - - // No character is translated. - if output == nil { - return orig - } - - return output.String() -} - -// TranslateRune return translated rune and true if r matches the from pattern. -// If r doesn't match the pattern, original r is returned and translated is false. -func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) { - switch { - case tr.quickDict != nil: - if r <= unicode.MaxASCII { - result = tr.quickDict.Dict[r] - - if result != 0 { - translated = true - - if tr.mappedRune >= 0 { - result = tr.mappedRune - } - - break - } - } - - fallthrough - - case tr.runeMap != nil: - var ok bool - - if result, ok = tr.runeMap[r]; ok { - translated = true - - if tr.mappedRune >= 0 { - result = tr.mappedRune - } - - break - } - - fallthrough - - default: - var rrm *runeRangeMap - ranges := tr.ranges - - for i := len(ranges) - 1; i >= 0; i-- { - rrm = ranges[i] - - if rrm.FromLo <= r && r <= rrm.FromHi { - translated = true - - if tr.mappedRune >= 0 { - result = tr.mappedRune - break - } - - if rrm.ToLo < rrm.ToHi { - result = rrm.ToLo + r - rrm.FromLo - } else if rrm.ToLo > rrm.ToHi { - // ToHi can be smaller than ToLo if range is from higher to lower. - result = rrm.ToLo - r + rrm.FromLo - } else { - result = rrm.ToLo - } - - break - } - } - } - - if tr.reverted { - if !translated { - result = tr.mappedRune - } - - translated = !translated - } - - if !translated { - result = r - } - - return -} - -// HasPattern returns true if Translator has one pattern at least. -func (tr *Translator) HasPattern() bool { - return tr.hasPattern -} - -// Translate str with the characters defined in from replaced by characters defined in to. -// -// From and to are patterns representing a set of characters. Pattern is defined as following. -// -// * Special characters -// * '-' means a range of runes, e.g. -// * "a-z" means all characters from 'a' to 'z' inclusive; -// * "z-a" means all characters from 'z' to 'a' inclusive. -// * '^' as first character means a set of all runes excepted listed, e.g. -// * "^a-z" means all characters except 'a' to 'z' inclusive. -// * '\' escapes special characters. -// * Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'. -// -// Translate will try to find a 1:1 mapping from from to to. -// If to is smaller than from, last rune in to will be used to map "out of range" characters in from. -// -// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern. -// -// If the to pattern is an empty string, Translate works exactly the same as Delete. -// -// Samples: -// Translate("hello", "aeiou", "12345") => "h2ll4" -// Translate("hello", "a-z", "A-Z") => "HELLO" -// Translate("hello", "z-a", "a-z") => "svool" -// Translate("hello", "aeiou", "*") => "h*ll*" -// Translate("hello", "^l", "*") => "**ll*" -// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d" -func Translate(str, from, to string) string { - tr := NewTranslator(from, to) - return tr.Translate(str) -} - -// Delete runes in str matching the pattern. -// Pattern is defined in Translate function. -// -// Samples: -// Delete("hello", "aeiou") => "hll" -// Delete("hello", "a-k") => "llo" -// Delete("hello", "^a-k") => "he" -func Delete(str, pattern string) string { - tr := NewTranslator(pattern, "") - return tr.Translate(str) -} - -// Count how many runes in str match the pattern. -// Pattern is defined in Translate function. -// -// Samples: -// Count("hello", "aeiou") => 3 -// Count("hello", "a-k") => 3 -// Count("hello", "^a-k") => 2 -func Count(str, pattern string) int { - if pattern == "" || str == "" { - return 0 - } - - var r rune - var size int - var matched bool - - tr := NewTranslator(pattern, "") - cnt := 0 - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if _, matched = tr.TranslateRune(r); matched { - cnt++ - } - } - - return cnt -} - -// Squeeze deletes adjacent repeated runes in str. -// If pattern is not empty, only runes matching the pattern will be squeezed. -// -// Samples: -// Squeeze("hello", "") => "helo" -// Squeeze("hello", "m-z") => "hello" -// Squeeze("hello world", " ") => "hello world" -func Squeeze(str, pattern string) string { - var last, r rune - var size int - var skipSqueeze, matched bool - var tr *Translator - var output *bytes.Buffer - - orig := str - last = -1 - - if len(pattern) > 0 { - tr = NewTranslator(pattern, "") - } - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - // Need to squeeze the str. - if last == r && !skipSqueeze { - if tr != nil { - if _, matched = tr.TranslateRune(r); !matched { - skipSqueeze = true - } - } - - if output == nil { - output = allocBuffer(orig, str) - } - - if skipSqueeze { - output.WriteRune(r) - } - } else { - if output != nil { - output.WriteRune(r) - } - - last = r - skipSqueeze = false - } - - str = str[size:] - } - - if output == nil { - return orig - } - - return output.String() -} diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b44907a09..000000000000 --- a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298da2..000000000000 --- a/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index 02fc81e0626e..000000000000 --- a/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,238 +0,0 @@ -# Mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -[![GoDoc][3]][4] -[![GoCard][5]][6] -[![Build Status][1]][2] -[![Coverage Status][7]][8] -[![Sourcegraph][9]][10] -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield) - -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://goreportcard.com/badge/imdario/mergo -[6]: https://goreportcard.com/report/github.com/imdario/mergo -[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[8]: https://coveralls.io/github/imdario/mergo?branch=master -[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[10]: https://sourcegraph.com/github.com/imdario/mergo?badge - -### Latest release - -[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7). - -### Important note - -Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. - -If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) -Donate using Liberapay - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) - -## Installation - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). - -### Nice example - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v2 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" -) - -type timeTransfomer struct { -} - -func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index 6e9aa7baf354..000000000000 --- a/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mergo merges same-type structs and maps by setting default values in zero-value fields. - -Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Usage - -From my own work-in-progress project: - - type networkConfig struct { - Protocol string - Address string - ServerType string `json: "server_type"` - Port uint16 - } - - type FssnConfig struct { - Network networkConfig - } - - var fssnDefault = FssnConfig { - networkConfig { - "tcp", - "127.0.0.1", - "http", - 31560, - }, - } - - // Inside a function [...] - - if err := mergo.Merge(&config, fssnDefault); err != nil { - log.Fatal(err) - } - - // More code [...] - -*/ -package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index 3f5afa83a13c..000000000000 --- a/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index 71e083db243b..000000000000 --- a/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" -) - -func hasExportedField(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasExportedField(dst.Field(i)) - } else { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -type Config struct { - Overwrite bool - AppendSlice bool - TypeCheck bool - Transformers Transformers - overwriteWithEmptyValue bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - config.overwriteWithEmptyValue = false - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - - if config.Transformers != nil && !isEmptyValue(dst) { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasExportedField(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - srcMapElm := srcElement - dstMapElm := dstElement - if srcMapElm.CanInterface() { - srcMapElm = reflect.ValueOf(srcMapElm.Interface()) - if dstMapElm.IsValid() { - dstMapElm = reflect.ValueOf(dstMapElm.Interface()) - } - } - if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { - if typeCheck && srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = srcSlice - } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } - dst.SetMapIndex(key, dstSlice) - } - } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue - } - - if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { - dst.Set(src) - } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } - dst.Set(reflect.AppendSlice(dst, src)) - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if src.IsNil() { - break - } - - if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) { - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } - break - } - - if src.Kind() != reflect.Interface { - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - default: - if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index a82fea2fdccc..000000000000 --- a/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - ptr uintptr - typ reflect.Type - next *visit -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - return isEmptyValue(v.Elem()) - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - return // TODO refactor -} diff --git a/vendor/github.com/TykTechnologies/gorpc/LICENSE b/vendor/github.com/lonelycode/gorpc/LICENSE similarity index 100% rename from vendor/github.com/TykTechnologies/gorpc/LICENSE rename to vendor/github.com/lonelycode/gorpc/LICENSE diff --git a/vendor/github.com/TykTechnologies/gorpc/Makefile b/vendor/github.com/lonelycode/gorpc/Makefile similarity index 100% rename from vendor/github.com/TykTechnologies/gorpc/Makefile rename to vendor/github.com/lonelycode/gorpc/Makefile diff --git a/vendor/github.com/TykTechnologies/gorpc/README.md b/vendor/github.com/lonelycode/gorpc/README.md similarity index 100% rename from vendor/github.com/TykTechnologies/gorpc/README.md rename to vendor/github.com/lonelycode/gorpc/README.md diff --git a/vendor/github.com/TykTechnologies/gorpc/TODO b/vendor/github.com/lonelycode/gorpc/TODO similarity index 100% rename from vendor/github.com/TykTechnologies/gorpc/TODO rename to vendor/github.com/lonelycode/gorpc/TODO diff --git a/vendor/github.com/TykTechnologies/gorpc/client.go b/vendor/github.com/lonelycode/gorpc/client.go similarity index 99% rename from vendor/github.com/TykTechnologies/gorpc/client.go rename to vendor/github.com/lonelycode/gorpc/client.go index cba9e4c7bd8a..9780414ba417 100644 --- a/vendor/github.com/TykTechnologies/gorpc/client.go +++ b/vendor/github.com/lonelycode/gorpc/client.go @@ -3,7 +3,6 @@ package gorpc import ( "fmt" "io" - "net" "sync" "time" ) @@ -502,7 +501,7 @@ func (e *ClientError) Error() string { func clientHandler(c *Client) { defer c.stopWg.Done() - var conn net.Conn + var conn io.ReadWriteCloser var err error for { @@ -530,9 +529,9 @@ func clientHandler(c *Client) { } } -func clientHandleConnection(c *Client, conn net.Conn) { +func clientHandleConnection(c *Client, conn io.ReadWriteCloser) { if c.OnConnect != nil { - newConn, _, err := c.OnConnect(conn) + newConn, err := c.OnConnect(c.Addr, conn) if err != nil { c.LogError("gorpc.Client: [%s]. OnConnect error: [%s]", c.Addr, err) conn.Close() diff --git a/vendor/github.com/TykTechnologies/gorpc/common.go b/vendor/github.com/lonelycode/gorpc/common.go similarity index 96% rename from vendor/github.com/TykTechnologies/gorpc/common.go rename to vendor/github.com/lonelycode/gorpc/common.go index bb0823b036da..3a69dc1723af 100644 --- a/vendor/github.com/TykTechnologies/gorpc/common.go +++ b/vendor/github.com/lonelycode/gorpc/common.go @@ -2,8 +2,8 @@ package gorpc import ( "fmt" + "io" "log" - "net" "sync" "time" ) @@ -41,7 +41,7 @@ const ( // // The callback may be used for authentication/authorization and/or custom // transport wrapping. -type OnConnectFunc func(rwc net.Conn) (net.Conn, string, error) +type OnConnectFunc func(remoteAddr string, rwc io.ReadWriteCloser) (io.ReadWriteCloser, error) // LoggerFunc is an error logging function to pass to gorpc.SetErrorLogger(). type LoggerFunc func(format string, args ...interface{}) diff --git a/vendor/github.com/TykTechnologies/gorpc/conn_stats.go b/vendor/github.com/lonelycode/gorpc/conn_stats.go similarity index 100% rename from vendor/github.com/TykTechnologies/gorpc/conn_stats.go rename to vendor/github.com/lonelycode/gorpc/conn_stats.go diff --git a/vendor/github.com/TykTechnologies/gorpc/conn_stats_386.go b/vendor/github.com/lonelycode/gorpc/conn_stats_386.go similarity index 100% rename from vendor/github.com/TykTechnologies/gorpc/conn_stats_386.go rename to vendor/github.com/lonelycode/gorpc/conn_stats_386.go diff --git a/vendor/github.com/TykTechnologies/gorpc/conn_stats_generic.go b/vendor/github.com/lonelycode/gorpc/conn_stats_generic.go similarity index 100% rename from vendor/github.com/TykTechnologies/gorpc/conn_stats_generic.go rename to vendor/github.com/lonelycode/gorpc/conn_stats_generic.go diff --git a/vendor/github.com/TykTechnologies/gorpc/dispatcher.go b/vendor/github.com/lonelycode/gorpc/dispatcher.go similarity index 100% rename from vendor/github.com/TykTechnologies/gorpc/dispatcher.go rename to vendor/github.com/lonelycode/gorpc/dispatcher.go diff --git a/vendor/github.com/TykTechnologies/gorpc/doc.go b/vendor/github.com/lonelycode/gorpc/doc.go similarity index 100% rename from vendor/github.com/TykTechnologies/gorpc/doc.go rename to vendor/github.com/lonelycode/gorpc/doc.go diff --git a/vendor/github.com/TykTechnologies/gorpc/encoding.go b/vendor/github.com/lonelycode/gorpc/encoding.go similarity index 100% rename from vendor/github.com/TykTechnologies/gorpc/encoding.go rename to vendor/github.com/lonelycode/gorpc/encoding.go diff --git a/vendor/github.com/TykTechnologies/gorpc/server.go b/vendor/github.com/lonelycode/gorpc/server.go similarity index 97% rename from vendor/github.com/TykTechnologies/gorpc/server.go rename to vendor/github.com/lonelycode/gorpc/server.go index f38994d1a71f..cf2baa36da33 100644 --- a/vendor/github.com/TykTechnologies/gorpc/server.go +++ b/vendor/github.com/lonelycode/gorpc/server.go @@ -3,7 +3,6 @@ package gorpc import ( "fmt" "io" - "net" "runtime" "sync" "time" @@ -181,13 +180,14 @@ func (s *Server) Serve() error { func serverHandler(s *Server, workersCh chan struct{}) { defer s.stopWg.Done() - var conn net.Conn + var conn io.ReadWriteCloser + var clientAddr string var err error for { acceptChan := make(chan struct{}) go func() { - if conn, err = s.Listener.Accept(); err != nil { + if conn, clientAddr, err = s.Listener.Accept(); err != nil { s.LogError("gorpc.Server: [%s]. Cannot accept new connection: [%s]", s.Addr, err) time.Sleep(time.Second) } @@ -208,16 +208,15 @@ func serverHandler(s *Server, workersCh chan struct{}) { } s.stopWg.Add(1) - go serverHandleConnection(s, conn, workersCh) + go serverHandleConnection(s, conn, clientAddr, workersCh) } } -func serverHandleConnection(s *Server, conn net.Conn, workersCh chan struct{}) { +func serverHandleConnection(s *Server, conn io.ReadWriteCloser, clientAddr string, workersCh chan struct{}) { defer s.stopWg.Done() - var clientAddr string if s.OnConnect != nil { - newConn, clientAddr, err := s.OnConnect(conn) + newConn, err := s.OnConnect(clientAddr, conn) if err != nil { s.LogError("gorpc.Server: [%s]->[%s]. OnConnect error: [%s]", clientAddr, s.Addr, err) conn.Close() @@ -226,10 +225,6 @@ func serverHandleConnection(s *Server, conn net.Conn, workersCh chan struct{}) { conn = newConn } - if clientAddr == "" { - clientAddr = conn.RemoteAddr().String() - } - var enabledCompression bool var err error zChan := make(chan bool, 1) diff --git a/vendor/github.com/TykTechnologies/gorpc/transport.go b/vendor/github.com/lonelycode/gorpc/transport.go similarity index 89% rename from vendor/github.com/TykTechnologies/gorpc/transport.go rename to vendor/github.com/lonelycode/gorpc/transport.go index 108cd7163771..16c0e9cb196a 100644 --- a/vendor/github.com/TykTechnologies/gorpc/transport.go +++ b/vendor/github.com/lonelycode/gorpc/transport.go @@ -2,6 +2,7 @@ package gorpc import ( "crypto/tls" + "io" "net" "time" ) @@ -20,7 +21,7 @@ var ( // Otherwise gorpc may hang. // The conn implementation must call Flush() on underlying buffered // streams before returning from Write(). -type DialFunc func(addr string) (conn net.Conn, err error) +type DialFunc func(addr string) (conn io.ReadWriteCloser, err error) // Listener is an interface for custom listeners intended for the Server. type Listener interface { @@ -37,7 +38,7 @@ type Listener interface { // Otherwise gorpc may hang. // The conn implementation must call Flush() on underlying buffered // streams before returning from Write(). - Accept() (conn net.Conn, err error) + Accept() (conn io.ReadWriteCloser, clientAddr string, err error) // Close closes the listener. // All pending calls to Accept() must immediately return errors after @@ -46,7 +47,7 @@ type Listener interface { Close() error } -func defaultDial(addr string) (conn net.Conn, err error) { +func defaultDial(addr string) (conn io.ReadWriteCloser, err error) { return dialer.Dial("tcp", addr) } @@ -59,16 +60,16 @@ func (ln *defaultListener) Init(addr string) (err error) { return } -func (ln *defaultListener) Accept() (conn net.Conn, err error) { +func (ln *defaultListener) Accept() (conn io.ReadWriteCloser, clientAddr string, err error) { c, err := ln.L.Accept() if err != nil { - return nil, err + return nil, "", err } if err = setupKeepalive(c); err != nil { c.Close() - return nil, err + return nil, "", err } - return c, nil + return c, c.RemoteAddr().String(), nil } func (ln *defaultListener) Close() error { @@ -96,19 +97,19 @@ func (ln *netListener) Init(addr string) (err error) { return } -func (ln *netListener) Accept() (conn net.Conn, err error) { +func (ln *netListener) Accept() (conn io.ReadWriteCloser, clientAddr string, err error) { c, err := ln.L.Accept() if err != nil { - return nil, err + return nil, "", err } - return c, nil + return c, c.RemoteAddr().String(), nil } func (ln *netListener) Close() error { return ln.L.Close() } -func unixDial(addr string) (conn net.Conn, err error) { +func unixDial(addr string) (conn io.ReadWriteCloser, err error) { c, err := net.Dial("unix", addr) if err != nil { return nil, err @@ -197,7 +198,7 @@ func NewUnixServer(addr string, handler HandlerFunc) *Server { func NewTLSClient(addr string, cfg *tls.Config) *Client { return &Client{ Addr: addr, - Dial: func(addr string) (conn net.Conn, err error) { + Dial: func(addr string) (conn io.ReadWriteCloser, err error) { c, err := tls.DialWithDialer(dialer, "tcp", addr, cfg) if err != nil { return nil, err diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE deleted file mode 100644 index f9c841a51e0d..000000000000 --- a/vendor/github.com/mitchellh/reflectwalk/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md deleted file mode 100644 index ac82cd2e159f..000000000000 --- a/vendor/github.com/mitchellh/reflectwalk/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# reflectwalk - -reflectwalk is a Go library for "walking" a value in Go using reflection, -in the same way a directory tree can be "walked" on the filesystem. Walking -a complex structure can allow you to do manipulations on unknown structures -such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/go.mod b/vendor/github.com/mitchellh/reflectwalk/go.mod deleted file mode 100644 index 52bb7c469e93..000000000000 --- a/vendor/github.com/mitchellh/reflectwalk/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/mitchellh/reflectwalk diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go deleted file mode 100644 index 6a7f176117f9..000000000000 --- a/vendor/github.com/mitchellh/reflectwalk/location.go +++ /dev/null @@ -1,19 +0,0 @@ -package reflectwalk - -//go:generate stringer -type=Location location.go - -type Location uint - -const ( - None Location = iota - Map - MapKey - MapValue - Slice - SliceElem - Array - ArrayElem - Struct - StructField - WalkLoc -) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go deleted file mode 100644 index 70760cf4c705..000000000000 --- a/vendor/github.com/mitchellh/reflectwalk/location_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. - -package reflectwalk - -import "fmt" - -const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" - -var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} - -func (i Location) String() string { - if i >= Location(len(_Location_index)-1) { - return fmt.Sprintf("Location(%d)", i) - } - return _Location_name[_Location_index[i]:_Location_index[i+1]] -} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go deleted file mode 100644 index d7ab7b6d782a..000000000000 --- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go +++ /dev/null @@ -1,401 +0,0 @@ -// reflectwalk is a package that allows you to "walk" complex structures -// similar to how you may "walk" a filesystem: visiting every element one -// by one and calling callback functions allowing you to handle and manipulate -// those elements. -package reflectwalk - -import ( - "errors" - "reflect" -) - -// PrimitiveWalker implementations are able to handle primitive values -// within complex structures. Primitive values are numbers, strings, -// booleans, funcs, chans. -// -// These primitive values are often members of more complex -// structures (slices, maps, etc.) that are walkable by other interfaces. -type PrimitiveWalker interface { - Primitive(reflect.Value) error -} - -// InterfaceWalker implementations are able to handle interface values as they -// are encountered during the walk. -type InterfaceWalker interface { - Interface(reflect.Value) error -} - -// MapWalker implementations are able to handle individual elements -// found within a map structure. -type MapWalker interface { - Map(m reflect.Value) error - MapElem(m, k, v reflect.Value) error -} - -// SliceWalker implementations are able to handle slice elements found -// within complex structures. -type SliceWalker interface { - Slice(reflect.Value) error - SliceElem(int, reflect.Value) error -} - -// ArrayWalker implementations are able to handle array elements found -// within complex structures. -type ArrayWalker interface { - Array(reflect.Value) error - ArrayElem(int, reflect.Value) error -} - -// StructWalker is an interface that has methods that are called for -// structs when a Walk is done. -type StructWalker interface { - Struct(reflect.Value) error - StructField(reflect.StructField, reflect.Value) error -} - -// EnterExitWalker implementations are notified before and after -// they walk deeper into complex structures (into struct fields, -// into slice elements, etc.) -type EnterExitWalker interface { - Enter(Location) error - Exit(Location) error -} - -// PointerWalker implementations are notified when the value they're -// walking is a pointer or not. Pointer is called for _every_ value whether -// it is a pointer or not. -type PointerWalker interface { - PointerEnter(bool) error - PointerExit(bool) error -} - -// SkipEntry can be returned from walk functions to skip walking -// the value of this field. This is only valid in the following functions: -// -// - Struct: skips all fields from being walked -// - StructField: skips walking the struct value -// -var SkipEntry = errors.New("skip this entry") - -// Walk takes an arbitrary value and an interface and traverses the -// value, calling callbacks on the interface if they are supported. -// The interface should implement one or more of the walker interfaces -// in this package, such as PrimitiveWalker, StructWalker, etc. -func Walk(data, walker interface{}) (err error) { - v := reflect.ValueOf(data) - ew, ok := walker.(EnterExitWalker) - if ok { - err = ew.Enter(WalkLoc) - } - - if err == nil { - err = walk(v, walker) - } - - if ok && err == nil { - err = ew.Exit(WalkLoc) - } - - return -} - -func walk(v reflect.Value, w interface{}) (err error) { - // Determine if we're receiving a pointer and if so notify the walker. - // The logic here is convoluted but very important (tests will fail if - // almost any part is changed). I will try to explain here. - // - // First, we check if the value is an interface, if so, we really need - // to check the interface's VALUE to see whether it is a pointer. - // - // Check whether the value is then a pointer. If so, then set pointer - // to true to notify the user. - // - // If we still have a pointer or an interface after the indirections, then - // we unwrap another level - // - // At this time, we also set "v" to be the dereferenced value. This is - // because once we've unwrapped the pointer we want to use that value. - pointer := false - pointerV := v - - for { - if pointerV.Kind() == reflect.Interface { - if iw, ok := w.(InterfaceWalker); ok { - if err = iw.Interface(pointerV); err != nil { - return - } - } - - pointerV = pointerV.Elem() - } - - if pointerV.Kind() == reflect.Ptr { - pointer = true - v = reflect.Indirect(pointerV) - } - if pw, ok := w.(PointerWalker); ok { - if err = pw.PointerEnter(pointer); err != nil { - return - } - - defer func(pointer bool) { - if err != nil { - return - } - - err = pw.PointerExit(pointer) - }(pointer) - } - - if pointer { - pointerV = v - } - pointer = false - - // If we still have a pointer or interface we have to indirect another level. - switch pointerV.Kind() { - case reflect.Ptr, reflect.Interface: - continue - } - break - } - - // We preserve the original value here because if it is an interface - // type, we want to pass that directly into the walkPrimitive, so that - // we can set it. - originalV := v - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - k := v.Kind() - if k >= reflect.Int && k <= reflect.Complex128 { - k = reflect.Int - } - - switch k { - // Primitives - case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: - err = walkPrimitive(originalV, w) - return - case reflect.Map: - err = walkMap(v, w) - return - case reflect.Slice: - err = walkSlice(v, w) - return - case reflect.Struct: - err = walkStruct(v, w) - return - case reflect.Array: - err = walkArray(v, w) - return - default: - panic("unsupported type: " + k.String()) - } -} - -func walkMap(v reflect.Value, w interface{}) error { - ew, ewok := w.(EnterExitWalker) - if ewok { - ew.Enter(Map) - } - - if mw, ok := w.(MapWalker); ok { - if err := mw.Map(v); err != nil { - return err - } - } - - for _, k := range v.MapKeys() { - kv := v.MapIndex(k) - - if mw, ok := w.(MapWalker); ok { - if err := mw.MapElem(v, k, kv); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(MapKey) - } - - if err := walk(k, w); err != nil { - return err - } - - if ok { - ew.Exit(MapKey) - ew.Enter(MapValue) - } - - if err := walk(kv, w); err != nil { - return err - } - - if ok { - ew.Exit(MapValue) - } - } - - if ewok { - ew.Exit(Map) - } - - return nil -} - -func walkPrimitive(v reflect.Value, w interface{}) error { - if pw, ok := w.(PrimitiveWalker); ok { - return pw.Primitive(v) - } - - return nil -} - -func walkSlice(v reflect.Value, w interface{}) (err error) { - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(Slice) - } - - if sw, ok := w.(SliceWalker); ok { - if err := sw.Slice(v); err != nil { - return err - } - } - - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - - if sw, ok := w.(SliceWalker); ok { - if err := sw.SliceElem(i, elem); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(SliceElem) - } - - if err := walk(elem, w); err != nil { - return err - } - - if ok { - ew.Exit(SliceElem) - } - } - - ew, ok = w.(EnterExitWalker) - if ok { - ew.Exit(Slice) - } - - return nil -} - -func walkArray(v reflect.Value, w interface{}) (err error) { - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(Array) - } - - if aw, ok := w.(ArrayWalker); ok { - if err := aw.Array(v); err != nil { - return err - } - } - - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - - if aw, ok := w.(ArrayWalker); ok { - if err := aw.ArrayElem(i, elem); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(ArrayElem) - } - - if err := walk(elem, w); err != nil { - return err - } - - if ok { - ew.Exit(ArrayElem) - } - } - - ew, ok = w.(EnterExitWalker) - if ok { - ew.Exit(Array) - } - - return nil -} - -func walkStruct(v reflect.Value, w interface{}) (err error) { - ew, ewok := w.(EnterExitWalker) - if ewok { - ew.Enter(Struct) - } - - skip := false - if sw, ok := w.(StructWalker); ok { - err = sw.Struct(v) - if err == SkipEntry { - skip = true - err = nil - } - if err != nil { - return - } - } - - if !skip { - vt := v.Type() - for i := 0; i < vt.NumField(); i++ { - sf := vt.Field(i) - f := v.FieldByIndex([]int{i}) - - if sw, ok := w.(StructWalker); ok { - err = sw.StructField(sf, f) - - // SkipEntry just pretends this field doesn't even exist - if err == SkipEntry { - continue - } - - if err != nil { - return - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(StructField) - } - - err = walk(f, w) - if err != nil { - return - } - - if ok { - ew.Exit(StructField) - } - } - } - - if ewok { - ew.Exit(Struct) - } - - return nil -} diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md deleted file mode 100644 index 7c14febe1099..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md +++ /dev/null @@ -1,46 +0,0 @@ -Changes by Version -================== - -1.1.0 (2019-03-23) -------------------- - -Notable changes: -- The library is now released under Apache 2.0 license -- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159)) -- 'golang.org/x/net/context' is replaced with 'context' from the standard library - -List of all changes: - -- Export StartSpanFromContextWithTracer (#214) -- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) -- Use Set() instead of Add() in HTTPHeadersCarrier (#191) -- Update license to Apache 2.0 (#181) -- Replace 'golang.org/x/net/context' with 'context' (#176) -- Port of Python opentracing/harness/api_check.py to Go (#146) -- Fix race condition in MockSpan.Context() (#170) -- Add PeerHostIPv4.SetString() (#155) -- Add a Noop log field type to log to allow for optional fields (#150) - - -1.0.2 (2017-04-26) -------------------- - -- Add more semantic tags (#139) - - -1.0.1 (2017-02-06) -------------------- - -- Correct spelling in comments -- Address race in nextMockID() (#123) -- log: avoid panic marshaling nil error (#131) -- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) -- Drop Go 1.5 that fails in Travis (#129) -- Add convenience methods Key() and Value() to log.Field -- Add convenience methods to log.Field (2 years, 6 months ago) - -1.0.0 (2016-09-26) -------------------- - -- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec) - diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE deleted file mode 100644 index f0027349e830..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 The OpenTracing Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile deleted file mode 100644 index 62abb63f58de..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -.DEFAULT_GOAL := test-and-lint - -.PHONY: test-and-lint -test-and-lint: test lint - -.PHONY: test -test: - go test -v -cover -race ./... - -.PHONY: cover -cover: - go test -v -coverprofile=coverage.txt -covermode=atomic -race ./... - -.PHONY: lint -lint: - go fmt ./... - golint ./... - @# Run again with magic to exit non-zero if golint outputs anything. - @! (golint ./... | read dummy) - go vet ./... diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md deleted file mode 100644 index 6ef1d7c9d274..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/README.md +++ /dev/null @@ -1,171 +0,0 @@ -[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge) - -# OpenTracing API for Go - -This package is a Go platform API for OpenTracing. - -## Required Reading - -In order to understand the Go platform API, one must first be familiar with the -[OpenTracing project](https://opentracing.io) and -[terminology](https://opentracing.io/specification/) more specifically. - -## API overview for those adding instrumentation - -Everyday consumers of this `opentracing` package really only need to worry -about a couple of key abstractions: the `StartSpan` function, the `Span` -interface, and binding a `Tracer` at `main()`-time. Here are code snippets -demonstrating some important use cases. - -#### Singleton initialization - -The simplest starting point is `./default_tracer.go`. As early as possible, call - -```go - import "github.com/opentracing/opentracing-go" - import ".../some_tracing_impl" - - func main() { - opentracing.SetGlobalTracer( - // tracing impl specific: - some_tracing_impl.New(...), - ) - ... - } -``` - -#### Non-Singleton initialization - -If you prefer direct control to singletons, manage ownership of the -`opentracing.Tracer` implementation explicitly. - -#### Creating a Span given an existing Go `context.Context` - -If you use `context.Context` in your application, OpenTracing's Go library will -happily rely on it for `Span` propagation. To start a new (blocking child) -`Span`, you can use `StartSpanFromContext`. - -```go - func xyz(ctx context.Context, ...) { - ... - span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name") - defer span.Finish() - span.LogFields( - log.String("event", "soft error"), - log.String("type", "cache timeout"), - log.Int("waited.millis", 1500)) - ... - } -``` - -#### Starting an empty trace by creating a "root span" - -It's always possible to create a "root" `Span` with no parent or other causal -reference. - -```go - func xyz() { - ... - sp := opentracing.StartSpan("operation_name") - defer sp.Finish() - ... - } -``` - -#### Creating a (child) Span given an existing (parent) Span - -```go - func xyz(parentSpan opentracing.Span, ...) { - ... - sp := opentracing.StartSpan( - "operation_name", - opentracing.ChildOf(parentSpan.Context())) - defer sp.Finish() - ... - } -``` - -#### Serializing to the wire - -```go - func makeSomeRequest(ctx context.Context) ... { - if span := opentracing.SpanFromContext(ctx); span != nil { - httpClient := &http.Client{} - httpReq, _ := http.NewRequest("GET", "http://myservice/", nil) - - // Transmit the span's TraceContext as HTTP headers on our - // outbound request. - opentracing.GlobalTracer().Inject( - span.Context(), - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(httpReq.Header)) - - resp, err := httpClient.Do(httpReq) - ... - } - ... - } -``` - -#### Deserializing from the wire - -```go - http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - var serverSpan opentracing.Span - appSpecificOperationName := ... - wireContext, err := opentracing.GlobalTracer().Extract( - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(req.Header)) - if err != nil { - // Optionally record something about err here - } - - // Create the span referring to the RPC client if available. - // If wireContext == nil, a root span will be created. - serverSpan = opentracing.StartSpan( - appSpecificOperationName, - ext.RPCServerOption(wireContext)) - - defer serverSpan.Finish() - - ctx := opentracing.ContextWithSpan(context.Background(), serverSpan) - ... - } -``` - -#### Conditionally capture a field using `log.Noop` - -In some situations, you may want to dynamically decide whether or not -to log a field. For example, you may want to capture additional data, -such as a customer ID, in non-production environments: - -```go - func Customer(order *Order) log.Field { - if os.Getenv("ENVIRONMENT") == "dev" { - return log.String("customer", order.Customer.ID) - } - return log.Noop() - } -``` - -#### Goroutine-safety - -The entire public API is goroutine-safe and does not require external -synchronization. - -## API pointers for those implementing a tracing system - -Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`. - -## API compatibility - -For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. - -## Tracer test suite - -A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. - -## Licensing - -[Apache 2.0 License](./LICENSE). diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go deleted file mode 100644 index 52e889582a85..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/ext/tags.go +++ /dev/null @@ -1,210 +0,0 @@ -package ext - -import "github.com/opentracing/opentracing-go" - -// These constants define common tag names recommended for better portability across -// tracing systems and languages/platforms. -// -// The tag names are defined as typed strings, so that in addition to the usual use -// -// span.setTag(TagName, value) -// -// they also support value type validation via this additional syntax: -// -// TagName.Set(span, value) -// -var ( - ////////////////////////////////////////////////////////////////////// - // SpanKind (client/server or producer/consumer) - ////////////////////////////////////////////////////////////////////// - - // SpanKind hints at relationship between spans, e.g. client/server - SpanKind = spanKindTagName("span.kind") - - // SpanKindRPCClient marks a span representing the client-side of an RPC - // or other remote call - SpanKindRPCClientEnum = SpanKindEnum("client") - SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} - - // SpanKindRPCServer marks a span representing the server-side of an RPC - // or other remote call - SpanKindRPCServerEnum = SpanKindEnum("server") - SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} - - // SpanKindProducer marks a span representing the producer-side of a - // message bus - SpanKindProducerEnum = SpanKindEnum("producer") - SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} - - // SpanKindConsumer marks a span representing the consumer-side of a - // message bus - SpanKindConsumerEnum = SpanKindEnum("consumer") - SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} - - ////////////////////////////////////////////////////////////////////// - // Component name - ////////////////////////////////////////////////////////////////////// - - // Component is a low-cardinality identifier of the module, library, - // or package that is generating a span. - Component = stringTagName("component") - - ////////////////////////////////////////////////////////////////////// - // Sampling hint - ////////////////////////////////////////////////////////////////////// - - // SamplingPriority determines the priority of sampling this Span. - SamplingPriority = uint16TagName("sampling.priority") - - ////////////////////////////////////////////////////////////////////// - // Peer tags. These tags can be emitted by either client-side of - // server-side to describe the other side/service in a peer-to-peer - // communications, like an RPC call. - ////////////////////////////////////////////////////////////////////// - - // PeerService records the service name of the peer. - PeerService = stringTagName("peer.service") - - // PeerAddress records the address name of the peer. This may be a "ip:port", - // a bare "hostname", a FQDN or even a database DSN substring - // like "mysql://username@127.0.0.1:3306/dbname" - PeerAddress = stringTagName("peer.address") - - // PeerHostname records the host name of the peer - PeerHostname = stringTagName("peer.hostname") - - // PeerHostIPv4 records IP v4 host address of the peer - PeerHostIPv4 = ipv4Tag("peer.ipv4") - - // PeerHostIPv6 records IP v6 host address of the peer - PeerHostIPv6 = stringTagName("peer.ipv6") - - // PeerPort records port number of the peer - PeerPort = uint16TagName("peer.port") - - ////////////////////////////////////////////////////////////////////// - // HTTP Tags - ////////////////////////////////////////////////////////////////////// - - // HTTPUrl should be the URL of the request being handled in this segment - // of the trace, in standard URI format. The protocol is optional. - HTTPUrl = stringTagName("http.url") - - // HTTPMethod is the HTTP method of the request, and is case-insensitive. - HTTPMethod = stringTagName("http.method") - - // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the - // HTTP response. - HTTPStatusCode = uint16TagName("http.status_code") - - ////////////////////////////////////////////////////////////////////// - // DB Tags - ////////////////////////////////////////////////////////////////////// - - // DBInstance is database instance name. - DBInstance = stringTagName("db.instance") - - // DBStatement is a database statement for the given database type. - // It can be a query or a prepared statement (i.e., before substitution). - DBStatement = stringTagName("db.statement") - - // DBType is a database type. For any SQL database, "sql". - // For others, the lower-case database category, e.g. "redis" - DBType = stringTagName("db.type") - - // DBUser is a username for accessing database. - DBUser = stringTagName("db.user") - - ////////////////////////////////////////////////////////////////////// - // Message Bus Tag - ////////////////////////////////////////////////////////////////////// - - // MessageBusDestination is an address at which messages can be exchanged - MessageBusDestination = stringTagName("message_bus.destination") - - ////////////////////////////////////////////////////////////////////// - // Error Tag - ////////////////////////////////////////////////////////////////////// - - // Error indicates that operation represented by the span resulted in an error. - Error = boolTagName("error") -) - -// --- - -// SpanKindEnum represents common span types -type SpanKindEnum string - -type spanKindTagName string - -// Set adds a string tag to the `span` -func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { - span.SetTag(string(tag), value) -} - -type rpcServerOption struct { - clientContext opentracing.SpanContext -} - -func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { - if r.clientContext != nil { - opentracing.ChildOf(r.clientContext).Apply(o) - } - SpanKindRPCServer.Apply(o) -} - -// RPCServerOption returns a StartSpanOption appropriate for an RPC server span -// with `client` representing the metadata for the remote peer Span if available. -// In case client == nil, due to the client not being instrumented, this RPC -// server span will be a root span. -func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { - return rpcServerOption{client} -} - -// --- - -type stringTagName string - -// Set adds a string tag to the `span` -func (tag stringTagName) Set(span opentracing.Span, value string) { - span.SetTag(string(tag), value) -} - -// --- - -type uint32TagName string - -// Set adds a uint32 tag to the `span` -func (tag uint32TagName) Set(span opentracing.Span, value uint32) { - span.SetTag(string(tag), value) -} - -// --- - -type uint16TagName string - -// Set adds a uint16 tag to the `span` -func (tag uint16TagName) Set(span opentracing.Span, value uint16) { - span.SetTag(string(tag), value) -} - -// --- - -type boolTagName string - -// Add adds a bool tag to the `span` -func (tag boolTagName) Set(span opentracing.Span, value bool) { - span.SetTag(string(tag), value) -} - -type ipv4Tag string - -// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility -func (tag ipv4Tag) Set(span opentracing.Span, value uint32) { - span.SetTag(string(tag), value) -} - -// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" -func (tag ipv4Tag) SetString(span opentracing.Span, value string) { - span.SetTag(string(tag), value) -} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go deleted file mode 100644 index 4f7066a925cd..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/globaltracer.go +++ /dev/null @@ -1,42 +0,0 @@ -package opentracing - -type registeredTracer struct { - tracer Tracer - isRegistered bool -} - -var ( - globalTracer = registeredTracer{NoopTracer{}, false} -) - -// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by -// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an -// opentracing.Tracer instance) should call SetGlobalTracer as early as -// possible in main(), prior to calling the `StartSpan` global func below. -// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` -// (etc) globals are noops. -func SetGlobalTracer(tracer Tracer) { - globalTracer = registeredTracer{tracer, true} -} - -// GlobalTracer returns the global singleton `Tracer` implementation. -// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop -// implementation that drops all data handed to it. -func GlobalTracer() Tracer { - return globalTracer.tracer -} - -// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. -func StartSpan(operationName string, opts ...StartSpanOption) Span { - return globalTracer.tracer.StartSpan(operationName, opts...) -} - -// InitGlobalTracer is deprecated. Please use SetGlobalTracer. -func InitGlobalTracer(tracer Tracer) { - SetGlobalTracer(tracer) -} - -// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered -func IsGlobalTracerRegistered() bool { - return globalTracer.isRegistered -} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go deleted file mode 100644 index 08c00c04e82a..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/gocontext.go +++ /dev/null @@ -1,60 +0,0 @@ -package opentracing - -import "context" - -type contextKey struct{} - -var activeSpanKey = contextKey{} - -// ContextWithSpan returns a new `context.Context` that holds a reference to -// `span`'s SpanContext. -func ContextWithSpan(ctx context.Context, span Span) context.Context { - return context.WithValue(ctx, activeSpanKey, span) -} - -// SpanFromContext returns the `Span` previously associated with `ctx`, or -// `nil` if no such `Span` could be found. -// -// NOTE: context.Context != SpanContext: the former is Go's intra-process -// context propagation mechanism, and the latter houses OpenTracing's per-Span -// identity and baggage information. -func SpanFromContext(ctx context.Context) Span { - val := ctx.Value(activeSpanKey) - if sp, ok := val.(Span); ok { - return sp - } - return nil -} - -// StartSpanFromContext starts and returns a Span with `operationName`, using -// any Span found within `ctx` as a ChildOfRef. If no such parent could be -// found, StartSpanFromContext creates a root (parentless) Span. -// -// The second return value is a context.Context object built around the -// returned Span. -// -// Example usage: -// -// SomeFunction(ctx context.Context, ...) { -// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") -// defer sp.Finish() -// ... -// } -func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { - return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) -} - -// StartSpanFromContextWithTracer starts and returns a span with `operationName` -// using a span found within the context as a ChildOfRef. If that doesn't exist -// it creates a root span. It also returns a context.Context object built -// around the returned span. -// -// It's behavior is identical to StartSpanFromContext except that it takes an explicit -// tracer as opposed to using the global tracer. -func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { - if parentSpan := SpanFromContext(ctx); parentSpan != nil { - opts = append(opts, ChildOf(parentSpan.Context())) - } - span := tracer.StartSpan(operationName, opts...) - return span, ContextWithSpan(ctx, span) -} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go deleted file mode 100644 index 50feea341a73..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/log/field.go +++ /dev/null @@ -1,269 +0,0 @@ -package log - -import ( - "fmt" - "math" -) - -type fieldType int - -const ( - stringType fieldType = iota - boolType - intType - int32Type - uint32Type - int64Type - uint64Type - float32Type - float64Type - errorType - objectType - lazyLoggerType - noopType -) - -// Field instances are constructed via LogBool, LogString, and so on. -// Tracing implementations may then handle them via the Field.Marshal -// method. -// -// "heavily influenced by" (i.e., partially stolen from) -// https://github.com/uber-go/zap -type Field struct { - key string - fieldType fieldType - numericVal int64 - stringVal string - interfaceVal interface{} -} - -// String adds a string-valued key:value pair to a Span.LogFields() record -func String(key, val string) Field { - return Field{ - key: key, - fieldType: stringType, - stringVal: val, - } -} - -// Bool adds a bool-valued key:value pair to a Span.LogFields() record -func Bool(key string, val bool) Field { - var numericVal int64 - if val { - numericVal = 1 - } - return Field{ - key: key, - fieldType: boolType, - numericVal: numericVal, - } -} - -// Int adds an int-valued key:value pair to a Span.LogFields() record -func Int(key string, val int) Field { - return Field{ - key: key, - fieldType: intType, - numericVal: int64(val), - } -} - -// Int32 adds an int32-valued key:value pair to a Span.LogFields() record -func Int32(key string, val int32) Field { - return Field{ - key: key, - fieldType: int32Type, - numericVal: int64(val), - } -} - -// Int64 adds an int64-valued key:value pair to a Span.LogFields() record -func Int64(key string, val int64) Field { - return Field{ - key: key, - fieldType: int64Type, - numericVal: val, - } -} - -// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record -func Uint32(key string, val uint32) Field { - return Field{ - key: key, - fieldType: uint32Type, - numericVal: int64(val), - } -} - -// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record -func Uint64(key string, val uint64) Field { - return Field{ - key: key, - fieldType: uint64Type, - numericVal: int64(val), - } -} - -// Float32 adds a float32-valued key:value pair to a Span.LogFields() record -func Float32(key string, val float32) Field { - return Field{ - key: key, - fieldType: float32Type, - numericVal: int64(math.Float32bits(val)), - } -} - -// Float64 adds a float64-valued key:value pair to a Span.LogFields() record -func Float64(key string, val float64) Field { - return Field{ - key: key, - fieldType: float64Type, - numericVal: int64(math.Float64bits(val)), - } -} - -// Error adds an error with the key "error" to a Span.LogFields() record -func Error(err error) Field { - return Field{ - key: "error", - fieldType: errorType, - interfaceVal: err, - } -} - -// Object adds an object-valued key:value pair to a Span.LogFields() record -func Object(key string, obj interface{}) Field { - return Field{ - key: key, - fieldType: objectType, - interfaceVal: obj, - } -} - -// LazyLogger allows for user-defined, late-bound logging of arbitrary data -type LazyLogger func(fv Encoder) - -// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing -// implementation will call the LazyLogger function at an indefinite time in -// the future (after Lazy() returns). -func Lazy(ll LazyLogger) Field { - return Field{ - fieldType: lazyLoggerType, - interfaceVal: ll, - } -} - -// Noop creates a no-op log field that should be ignored by the tracer. -// It can be used to capture optional fields, for example those that should -// only be logged in non-production environment: -// -// func customerField(order *Order) log.Field { -// if os.Getenv("ENVIRONMENT") == "dev" { -// return log.String("customer", order.Customer.ID) -// } -// return log.Noop() -// } -// -// span.LogFields(log.String("event", "purchase"), customerField(order)) -// -func Noop() Field { - return Field{ - fieldType: noopType, - } -} - -// Encoder allows access to the contents of a Field (via a call to -// Field.Marshal). -// -// Tracer implementations typically provide an implementation of Encoder; -// OpenTracing callers typically do not need to concern themselves with it. -type Encoder interface { - EmitString(key, value string) - EmitBool(key string, value bool) - EmitInt(key string, value int) - EmitInt32(key string, value int32) - EmitInt64(key string, value int64) - EmitUint32(key string, value uint32) - EmitUint64(key string, value uint64) - EmitFloat32(key string, value float32) - EmitFloat64(key string, value float64) - EmitObject(key string, value interface{}) - EmitLazyLogger(value LazyLogger) -} - -// Marshal passes a Field instance through to the appropriate -// field-type-specific method of an Encoder. -func (lf Field) Marshal(visitor Encoder) { - switch lf.fieldType { - case stringType: - visitor.EmitString(lf.key, lf.stringVal) - case boolType: - visitor.EmitBool(lf.key, lf.numericVal != 0) - case intType: - visitor.EmitInt(lf.key, int(lf.numericVal)) - case int32Type: - visitor.EmitInt32(lf.key, int32(lf.numericVal)) - case int64Type: - visitor.EmitInt64(lf.key, int64(lf.numericVal)) - case uint32Type: - visitor.EmitUint32(lf.key, uint32(lf.numericVal)) - case uint64Type: - visitor.EmitUint64(lf.key, uint64(lf.numericVal)) - case float32Type: - visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) - case float64Type: - visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) - case errorType: - if err, ok := lf.interfaceVal.(error); ok { - visitor.EmitString(lf.key, err.Error()) - } else { - visitor.EmitString(lf.key, "") - } - case objectType: - visitor.EmitObject(lf.key, lf.interfaceVal) - case lazyLoggerType: - visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) - case noopType: - // intentionally left blank - } -} - -// Key returns the field's key. -func (lf Field) Key() string { - return lf.key -} - -// Value returns the field's value as interface{}. -func (lf Field) Value() interface{} { - switch lf.fieldType { - case stringType: - return lf.stringVal - case boolType: - return lf.numericVal != 0 - case intType: - return int(lf.numericVal) - case int32Type: - return int32(lf.numericVal) - case int64Type: - return int64(lf.numericVal) - case uint32Type: - return uint32(lf.numericVal) - case uint64Type: - return uint64(lf.numericVal) - case float32Type: - return math.Float32frombits(uint32(lf.numericVal)) - case float64Type: - return math.Float64frombits(uint64(lf.numericVal)) - case errorType, objectType, lazyLoggerType: - return lf.interfaceVal - case noopType: - return nil - default: - return nil - } -} - -// String returns a string representation of the key and value. -func (lf Field) String() string { - return fmt.Sprint(lf.key, ":", lf.Value()) -} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go deleted file mode 100644 index 3832feb5ceb2..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/log/util.go +++ /dev/null @@ -1,54 +0,0 @@ -package log - -import "fmt" - -// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice -// a la Span.LogFields(). -func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { - if len(keyValues)%2 != 0 { - return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) - } - fields := make([]Field, len(keyValues)/2) - for i := 0; i*2 < len(keyValues); i++ { - key, ok := keyValues[i*2].(string) - if !ok { - return nil, fmt.Errorf( - "non-string key (pair #%d): %T", - i, keyValues[i*2]) - } - switch typedVal := keyValues[i*2+1].(type) { - case bool: - fields[i] = Bool(key, typedVal) - case string: - fields[i] = String(key, typedVal) - case int: - fields[i] = Int(key, typedVal) - case int8: - fields[i] = Int32(key, int32(typedVal)) - case int16: - fields[i] = Int32(key, int32(typedVal)) - case int32: - fields[i] = Int32(key, typedVal) - case int64: - fields[i] = Int64(key, typedVal) - case uint: - fields[i] = Uint64(key, uint64(typedVal)) - case uint64: - fields[i] = Uint64(key, typedVal) - case uint8: - fields[i] = Uint32(key, uint32(typedVal)) - case uint16: - fields[i] = Uint32(key, uint32(typedVal)) - case uint32: - fields[i] = Uint32(key, typedVal) - case float32: - fields[i] = Float32(key, typedVal) - case float64: - fields[i] = Float64(key, typedVal) - default: - // When in doubt, coerce to a string - fields[i] = String(key, fmt.Sprint(typedVal)) - } - } - return fields, nil -} diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go deleted file mode 100644 index 0d32f692c410..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/noop.go +++ /dev/null @@ -1,64 +0,0 @@ -package opentracing - -import "github.com/opentracing/opentracing-go/log" - -// A NoopTracer is a trivial, minimum overhead implementation of Tracer -// for which all operations are no-ops. -// -// The primary use of this implementation is in libraries, such as RPC -// frameworks, that make tracing an optional feature controlled by the -// end user. A no-op implementation allows said libraries to use it -// as the default Tracer and to write instrumentation that does -// not need to keep checking if the tracer instance is nil. -// -// For the same reason, the NoopTracer is the default "global" tracer -// (see GlobalTracer and SetGlobalTracer functions). -// -// WARNING: NoopTracer does not support baggage propagation. -type NoopTracer struct{} - -type noopSpan struct{} -type noopSpanContext struct{} - -var ( - defaultNoopSpanContext = noopSpanContext{} - defaultNoopSpan = noopSpan{} - defaultNoopTracer = NoopTracer{} -) - -const ( - emptyString = "" -) - -// noopSpanContext: -func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} - -// noopSpan: -func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } -func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan } -func (n noopSpan) BaggageItem(key string) string { return emptyString } -func (n noopSpan) SetTag(key string, value interface{}) Span { return n } -func (n noopSpan) LogFields(fields ...log.Field) {} -func (n noopSpan) LogKV(keyVals ...interface{}) {} -func (n noopSpan) Finish() {} -func (n noopSpan) FinishWithOptions(opts FinishOptions) {} -func (n noopSpan) SetOperationName(operationName string) Span { return n } -func (n noopSpan) Tracer() Tracer { return defaultNoopTracer } -func (n noopSpan) LogEvent(event string) {} -func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} -func (n noopSpan) Log(data LogData) {} - -// StartSpan belongs to the Tracer interface. -func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { - return defaultNoopSpan -} - -// Inject belongs to the Tracer interface. -func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { - return nil -} - -// Extract belongs to the Tracer interface. -func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { - return nil, ErrSpanContextNotFound -} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go deleted file mode 100644 index b0c275eb05e4..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/propagation.go +++ /dev/null @@ -1,176 +0,0 @@ -package opentracing - -import ( - "errors" - "net/http" -) - -/////////////////////////////////////////////////////////////////////////////// -// CORE PROPAGATION INTERFACES: -/////////////////////////////////////////////////////////////////////////////// - -var ( - // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or - // Tracer.Extract() is not recognized by the Tracer implementation. - ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") - - // ErrSpanContextNotFound occurs when the `carrier` passed to - // Tracer.Extract() is valid and uncorrupted but has insufficient - // information to extract a SpanContext. - ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") - - // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to - // operate on a SpanContext which it is not prepared to handle (for - // example, since it was created by a different tracer implementation). - ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") - - // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() - // implementations expect a different type of `carrier` than they are - // given. - ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") - - // ErrSpanContextCorrupted occurs when the `carrier` passed to - // Tracer.Extract() is of the expected type but is corrupted. - ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") -) - -/////////////////////////////////////////////////////////////////////////////// -// BUILTIN PROPAGATION FORMATS: -/////////////////////////////////////////////////////////////////////////////// - -// BuiltinFormat is used to demarcate the values within package `opentracing` -// that are intended for use with the Tracer.Inject() and Tracer.Extract() -// methods. -type BuiltinFormat byte - -const ( - // Binary represents SpanContexts as opaque binary data. - // - // For Tracer.Inject(): the carrier must be an `io.Writer`. - // - // For Tracer.Extract(): the carrier must be an `io.Reader`. - Binary BuiltinFormat = iota - - // TextMap represents SpanContexts as key:value string pairs. - // - // Unlike HTTPHeaders, the TextMap format does not restrict the key or - // value character sets in any way. - // - // For Tracer.Inject(): the carrier must be a `TextMapWriter`. - // - // For Tracer.Extract(): the carrier must be a `TextMapReader`. - TextMap - - // HTTPHeaders represents SpanContexts as HTTP header string pairs. - // - // Unlike TextMap, the HTTPHeaders format requires that the keys and values - // be valid as HTTP headers as-is (i.e., character casing may be unstable - // and special characters are disallowed in keys, values should be - // URL-escaped, etc). - // - // For Tracer.Inject(): the carrier must be a `TextMapWriter`. - // - // For Tracer.Extract(): the carrier must be a `TextMapReader`. - // - // See HTTPHeadersCarrier for an implementation of both TextMapWriter - // and TextMapReader that defers to an http.Header instance for storage. - // For example, Inject(): - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // err := span.Tracer().Inject( - // span.Context(), opentracing.HTTPHeaders, carrier) - // - // Or Extract(): - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // clientContext, err := tracer.Extract( - // opentracing.HTTPHeaders, carrier) - // - HTTPHeaders -) - -// TextMapWriter is the Inject() carrier for the TextMap builtin format. With -// it, the caller can encode a SpanContext for propagation as entries in a map -// of unicode strings. -type TextMapWriter interface { - // Set a key:value pair to the carrier. Multiple calls to Set() for the - // same key leads to undefined behavior. - // - // NOTE: The backing store for the TextMapWriter may contain data unrelated - // to SpanContext. As such, Inject() and Extract() implementations that - // call the TextMapWriter and TextMapReader interfaces must agree on a - // prefix or other convention to distinguish their own key:value pairs. - Set(key, val string) -} - -// TextMapReader is the Extract() carrier for the TextMap builtin format. With it, -// the caller can decode a propagated SpanContext as entries in a map of -// unicode strings. -type TextMapReader interface { - // ForeachKey returns TextMap contents via repeated calls to the `handler` - // function. If any call to `handler` returns a non-nil error, ForeachKey - // terminates and returns that error. - // - // NOTE: The backing store for the TextMapReader may contain data unrelated - // to SpanContext. As such, Inject() and Extract() implementations that - // call the TextMapWriter and TextMapReader interfaces must agree on a - // prefix or other convention to distinguish their own key:value pairs. - // - // The "foreach" callback pattern reduces unnecessary copying in some cases - // and also allows implementations to hold locks while the map is read. - ForeachKey(handler func(key, val string) error) error -} - -// TextMapCarrier allows the use of regular map[string]string -// as both TextMapWriter and TextMapReader. -type TextMapCarrier map[string]string - -// ForeachKey conforms to the TextMapReader interface. -func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { - for k, v := range c { - if err := handler(k, v); err != nil { - return err - } - } - return nil -} - -// Set implements Set() of opentracing.TextMapWriter -func (c TextMapCarrier) Set(key, val string) { - c[key] = val -} - -// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. -// -// Example usage for server side: -// -// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) -// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) -// -// Example usage for client side: -// -// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) -// err := tracer.Inject( -// span.Context(), -// opentracing.HTTPHeaders, -// carrier) -// -type HTTPHeadersCarrier http.Header - -// Set conforms to the TextMapWriter interface. -func (c HTTPHeadersCarrier) Set(key, val string) { - h := http.Header(c) - h.Set(key, val) -} - -// ForeachKey conforms to the TextMapReader interface. -func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { - for k, vals := range c { - for _, v := range vals { - if err := handler(k, v); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go deleted file mode 100644 index 0d3fb5341838..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/span.go +++ /dev/null @@ -1,189 +0,0 @@ -package opentracing - -import ( - "time" - - "github.com/opentracing/opentracing-go/log" -) - -// SpanContext represents Span state that must propagate to descendant Spans and across process -// boundaries (e.g., a tuple). -type SpanContext interface { - // ForeachBaggageItem grants access to all baggage items stored in the - // SpanContext. - // The handler function will be called for each baggage key/value pair. - // The ordering of items is not guaranteed. - // - // The bool return value indicates if the handler wants to continue iterating - // through the rest of the baggage items; for example if the handler is trying to - // find some baggage item by pattern matching the name, it can return false - // as soon as the item is found to stop further iterations. - ForeachBaggageItem(handler func(k, v string) bool) -} - -// Span represents an active, un-finished span in the OpenTracing system. -// -// Spans are created by the Tracer interface. -type Span interface { - // Sets the end timestamp and finalizes Span state. - // - // With the exception of calls to Context() (which are always allowed), - // Finish() must be the last call made to any span instance, and to do - // otherwise leads to undefined behavior. - Finish() - // FinishWithOptions is like Finish() but with explicit control over - // timestamps and log data. - FinishWithOptions(opts FinishOptions) - - // Context() yields the SpanContext for this Span. Note that the return - // value of Context() is still valid after a call to Span.Finish(), as is - // a call to Span.Context() after a call to Span.Finish(). - Context() SpanContext - - // Sets or changes the operation name. - // - // Returns a reference to this Span for chaining. - SetOperationName(operationName string) Span - - // Adds a tag to the span. - // - // If there is a pre-existing tag set for `key`, it is overwritten. - // - // Tag values can be numeric types, strings, or bools. The behavior of - // other tag value types is undefined at the OpenTracing level. If a - // tracing system does not know how to handle a particular value type, it - // may ignore the tag, but shall not panic. - // - // Returns a reference to this Span for chaining. - SetTag(key string, value interface{}) Span - - // LogFields is an efficient and type-checked way to record key:value - // logging data about a Span, though the programming interface is a little - // more verbose than LogKV(). Here's an example: - // - // span.LogFields( - // log.String("event", "soft error"), - // log.String("type", "cache timeout"), - // log.Int("waited.millis", 1500)) - // - // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. - LogFields(fields ...log.Field) - - // LogKV is a concise, readable way to record key:value logging data about - // a Span, though unfortunately this also makes it less efficient and less - // type-safe than LogFields(). Here's an example: - // - // span.LogKV( - // "event", "soft error", - // "type", "cache timeout", - // "waited.millis", 1500) - // - // For LogKV (as opposed to LogFields()), the parameters must appear as - // key-value pairs, like - // - // span.LogKV(key1, val1, key2, val2, key3, val3, ...) - // - // The keys must all be strings. The values may be strings, numeric types, - // bools, Go error instances, or arbitrary structs. - // - // (Note to implementors: consider the log.InterleavedKVToFields() helper) - LogKV(alternatingKeyValues ...interface{}) - - // SetBaggageItem sets a key:value pair on this Span and its SpanContext - // that also propagates to descendants of this Span. - // - // SetBaggageItem() enables powerful functionality given a full-stack - // opentracing integration (e.g., arbitrary application data from a mobile - // app can make it, transparently, all the way into the depths of a storage - // system), and with it some powerful costs: use this feature with care. - // - // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to - // *future* causal descendants of the associated Span. - // - // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and - // value is copied into every local *and remote* child of the associated - // Span, and that can add up to a lot of network and cpu overhead. - // - // Returns a reference to this Span for chaining. - SetBaggageItem(restrictedKey, value string) Span - - // Gets the value for a baggage item given its key. Returns the empty string - // if the value isn't found in this Span. - BaggageItem(restrictedKey string) string - - // Provides access to the Tracer that created this Span. - Tracer() Tracer - - // Deprecated: use LogFields or LogKV - LogEvent(event string) - // Deprecated: use LogFields or LogKV - LogEventWithPayload(event string, payload interface{}) - // Deprecated: use LogFields or LogKV - Log(data LogData) -} - -// LogRecord is data associated with a single Span log. Every LogRecord -// instance must specify at least one Field. -type LogRecord struct { - Timestamp time.Time - Fields []log.Field -} - -// FinishOptions allows Span.FinishWithOptions callers to override the finish -// timestamp and provide log data via a bulk interface. -type FinishOptions struct { - // FinishTime overrides the Span's finish time, or implicitly becomes - // time.Now() if FinishTime.IsZero(). - // - // FinishTime must resolve to a timestamp that's >= the Span's StartTime - // (per StartSpanOptions). - FinishTime time.Time - - // LogRecords allows the caller to specify the contents of many LogFields() - // calls with a single slice. May be nil. - // - // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must - // be set explicitly). Also, they must be >= the Span's start timestamp and - // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the - // behavior of FinishWithOptions() is undefined. - // - // If specified, the caller hands off ownership of LogRecords at - // FinishWithOptions() invocation time. - // - // If specified, the (deprecated) BulkLogData must be nil or empty. - LogRecords []LogRecord - - // BulkLogData is DEPRECATED. - BulkLogData []LogData -} - -// LogData is DEPRECATED -type LogData struct { - Timestamp time.Time - Event string - Payload interface{} -} - -// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord -func (ld *LogData) ToLogRecord() LogRecord { - var literalTimestamp time.Time - if ld.Timestamp.IsZero() { - literalTimestamp = time.Now() - } else { - literalTimestamp = ld.Timestamp - } - rval := LogRecord{ - Timestamp: literalTimestamp, - } - if ld.Payload == nil { - rval.Fields = []log.Field{ - log.String("event", ld.Event), - } - } else { - rval.Fields = []log.Field{ - log.String("event", ld.Event), - log.Object("payload", ld.Payload), - } - } - return rval -} diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go deleted file mode 100644 index 715f0cedfb60..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/tracer.go +++ /dev/null @@ -1,304 +0,0 @@ -package opentracing - -import "time" - -// Tracer is a simple, thin interface for Span creation and SpanContext -// propagation. -type Tracer interface { - - // Create, start, and return a new Span with the given `operationName` and - // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows - // from the "functional options" pattern, per - // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) - // - // A Span with no SpanReference options (e.g., opentracing.ChildOf() or - // opentracing.FollowsFrom()) becomes the root of its own trace. - // - // Examples: - // - // var tracer opentracing.Tracer = ... - // - // // The root-span case: - // sp := tracer.StartSpan("GetFeed") - // - // // The vanilla child span case: - // sp := tracer.StartSpan( - // "GetFeed", - // opentracing.ChildOf(parentSpan.Context())) - // - // // All the bells and whistles: - // sp := tracer.StartSpan( - // "GetFeed", - // opentracing.ChildOf(parentSpan.Context()), - // opentracing.Tag{"user_agent", loggedReq.UserAgent}, - // opentracing.StartTime(loggedReq.Timestamp), - // ) - // - StartSpan(operationName string, opts ...StartSpanOption) Span - - // Inject() takes the `sm` SpanContext instance and injects it for - // propagation within `carrier`. The actual type of `carrier` depends on - // the value of `format`. - // - // OpenTracing defines a common set of `format` values (see BuiltinFormat), - // and each has an expected carrier type. - // - // Other packages may declare their own `format` values, much like the keys - // used by `context.Context` (see https://godoc.org/context#WithValue). - // - // Example usage (sans error handling): - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // err := tracer.Inject( - // span.Context(), - // opentracing.HTTPHeaders, - // carrier) - // - // NOTE: All opentracing.Tracer implementations MUST support all - // BuiltinFormats. - // - // Implementations may return opentracing.ErrUnsupportedFormat if `format` - // is not supported by (or not known by) the implementation. - // - // Implementations may return opentracing.ErrInvalidCarrier or any other - // implementation-specific error if the format is supported but injection - // fails anyway. - // - // See Tracer.Extract(). - Inject(sm SpanContext, format interface{}, carrier interface{}) error - - // Extract() returns a SpanContext instance given `format` and `carrier`. - // - // OpenTracing defines a common set of `format` values (see BuiltinFormat), - // and each has an expected carrier type. - // - // Other packages may declare their own `format` values, much like the keys - // used by `context.Context` (see - // https://godoc.org/golang.org/x/net/context#WithValue). - // - // Example usage (with StartSpan): - // - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) - // - // // ... assuming the ultimate goal here is to resume the trace with a - // // server-side Span: - // var serverSpan opentracing.Span - // if err == nil { - // span = tracer.StartSpan( - // rpcMethodName, ext.RPCServerOption(clientContext)) - // } else { - // span = tracer.StartSpan(rpcMethodName) - // } - // - // - // NOTE: All opentracing.Tracer implementations MUST support all - // BuiltinFormats. - // - // Return values: - // - A successful Extract returns a SpanContext instance and a nil error - // - If there was simply no SpanContext to extract in `carrier`, Extract() - // returns (nil, opentracing.ErrSpanContextNotFound) - // - If `format` is unsupported or unrecognized, Extract() returns (nil, - // opentracing.ErrUnsupportedFormat) - // - If there are more fundamental problems with the `carrier` object, - // Extract() may return opentracing.ErrInvalidCarrier, - // opentracing.ErrSpanContextCorrupted, or implementation-specific - // errors. - // - // See Tracer.Inject(). - Extract(format interface{}, carrier interface{}) (SpanContext, error) -} - -// StartSpanOptions allows Tracer.StartSpan() callers and implementors a -// mechanism to override the start timestamp, specify Span References, and make -// a single Tag or multiple Tags available at Span start time. -// -// StartSpan() callers should look at the StartSpanOption interface and -// implementations available in this package. -// -// Tracer implementations can convert a slice of `StartSpanOption` instances -// into a `StartSpanOptions` struct like so: -// -// func StartSpan(opName string, opts ...opentracing.StartSpanOption) { -// sso := opentracing.StartSpanOptions{} -// for _, o := range opts { -// o.Apply(&sso) -// } -// ... -// } -// -type StartSpanOptions struct { - // Zero or more causal references to other Spans (via their SpanContext). - // If empty, start a "root" Span (i.e., start a new trace). - References []SpanReference - - // StartTime overrides the Span's start time, or implicitly becomes - // time.Now() if StartTime.IsZero(). - StartTime time.Time - - // Tags may have zero or more entries; the restrictions on map values are - // identical to those for Span.SetTag(). May be nil. - // - // If specified, the caller hands off ownership of Tags at - // StartSpan() invocation time. - Tags map[string]interface{} -} - -// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. -// -// StartSpanOption borrows from the "functional options" pattern, per -// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis -type StartSpanOption interface { - Apply(*StartSpanOptions) -} - -// SpanReferenceType is an enum type describing different categories of -// relationships between two Spans. If Span-2 refers to Span-1, the -// SpanReferenceType describes Span-1 from Span-2's perspective. For example, -// ChildOfRef means that Span-1 created Span-2. -// -// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for -// completion; e.g., Span-2 may be part of a background job enqueued by Span-1, -// or Span-2 may be sitting in a distributed queue behind Span-1. -type SpanReferenceType int - -const ( - // ChildOfRef refers to a parent Span that caused *and* somehow depends - // upon the new child Span. Often (but not always), the parent Span cannot - // finish until the child Span does. - // - // An timing diagram for a ChildOfRef that's blocked on the new Span: - // - // [-Parent Span---------] - // [-Child Span----] - // - // See http://opentracing.io/spec/ - // - // See opentracing.ChildOf() - ChildOfRef SpanReferenceType = iota - - // FollowsFromRef refers to a parent Span that does not depend in any way - // on the result of the new child Span. For instance, one might use - // FollowsFromRefs to describe pipeline stages separated by queues, - // or a fire-and-forget cache insert at the tail end of a web request. - // - // A FollowsFromRef Span is part of the same logical trace as the new Span: - // i.e., the new Span is somehow caused by the work of its FollowsFromRef. - // - // All of the following could be valid timing diagrams for children that - // "FollowFrom" a parent. - // - // [-Parent Span-] [-Child Span-] - // - // - // [-Parent Span--] - // [-Child Span-] - // - // - // [-Parent Span-] - // [-Child Span-] - // - // See http://opentracing.io/spec/ - // - // See opentracing.FollowsFrom() - FollowsFromRef -) - -// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a -// referenced SpanContext. See the SpanReferenceType documentation for -// supported relationships. If SpanReference is created with -// ReferencedContext==nil, it has no effect. Thus it allows for a more concise -// syntax for starting spans: -// -// sc, _ := tracer.Extract(someFormat, someCarrier) -// span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) -// -// The `ChildOf(sc)` option above will not panic if sc == nil, it will just -// not add the parent span reference to the options. -type SpanReference struct { - Type SpanReferenceType - ReferencedContext SpanContext -} - -// Apply satisfies the StartSpanOption interface. -func (r SpanReference) Apply(o *StartSpanOptions) { - if r.ReferencedContext != nil { - o.References = append(o.References, r) - } -} - -// ChildOf returns a StartSpanOption pointing to a dependent parent span. -// If sc == nil, the option has no effect. -// -// See ChildOfRef, SpanReference -func ChildOf(sc SpanContext) SpanReference { - return SpanReference{ - Type: ChildOfRef, - ReferencedContext: sc, - } -} - -// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused -// the child Span but does not directly depend on its result in any way. -// If sc == nil, the option has no effect. -// -// See FollowsFromRef, SpanReference -func FollowsFrom(sc SpanContext) SpanReference { - return SpanReference{ - Type: FollowsFromRef, - ReferencedContext: sc, - } -} - -// StartTime is a StartSpanOption that sets an explicit start timestamp for the -// new Span. -type StartTime time.Time - -// Apply satisfies the StartSpanOption interface. -func (t StartTime) Apply(o *StartSpanOptions) { - o.StartTime = time.Time(t) -} - -// Tags are a generic map from an arbitrary string key to an opaque value type. -// The underlying tracing system is responsible for interpreting and -// serializing the values. -type Tags map[string]interface{} - -// Apply satisfies the StartSpanOption interface. -func (t Tags) Apply(o *StartSpanOptions) { - if o.Tags == nil { - o.Tags = make(map[string]interface{}) - } - for k, v := range t { - o.Tags[k] = v - } -} - -// Tag may be passed as a StartSpanOption to add a tag to new spans, -// or its Set method may be used to apply the tag to an existing Span, -// for example: -// -// tracer.StartSpan("opName", Tag{"Key", value}) -// -// or -// -// Tag{"key", value}.Set(span) -type Tag struct { - Key string - Value interface{} -} - -// Apply satisfies the StartSpanOption interface. -func (t Tag) Apply(o *StartSpanOptions) { - if o.Tags == nil { - o.Tags = make(map[string]interface{}) - } - o.Tags[t.Key] = t.Value -} - -// Set applies the tag to an existing Span. -func (t Tag) Set(s Span) { - s.SetTag(t.Key, t.Value) -} diff --git a/vendor/github.com/openzipkin/zipkin-go/LICENSE b/vendor/github.com/openzipkin/zipkin-go/LICENSE deleted file mode 100644 index 2ff7224635f0..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, -and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by -the copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all -other entities that control, are controlled by, or are under common -control with that entity. For the purposes of this definition, -"control" means (i) the power, direct or indirect, to cause the -direction or management of such entity, whether by contract or -otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity -exercising permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, -including but not limited to software source code, documentation -source, and configuration files. - -"Object" form shall mean any form resulting from mechanical -transformation or translation of a Source form, including but -not limited to compiled object code, generated documentation, -and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or -Object form, made available under the License, as indicated by a -copyright notice that is included in or attached to the work -(an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object -form, that is based on (or derived from) the Work and for which the -editorial revisions, annotations, elaborations, or other modifications -represent, as a whole, an original work of authorship. For the purposes -of this License, Derivative Works shall not include works that remain -separable from, or merely link (or bind by name) to the interfaces of, -the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including -the original version of the Work and any modifications or additions -to that Work or Derivative Works thereof, that is intentionally -submitted to Licensor for inclusion in the Work by the copyright owner -or by an individual or Legal Entity authorized to submit on behalf of -the copyright owner. For the purposes of this definition, "submitted" -means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, -and issue tracking systems that are managed by, or on behalf of, the -Licensor for the purpose of discussing and improving the Work, but -excluding communication that is conspicuously marked or otherwise -designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity -on behalf of whom a Contribution has been received by Licensor and -subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of -this License, each Contributor hereby grants to You a perpetual, -worldwide, non-exclusive, no-charge, royalty-free, irrevocable -copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the -Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of -this License, each Contributor hereby grants to You a perpetual, -worldwide, non-exclusive, no-charge, royalty-free, irrevocable -(except as stated in this section) patent license to make, have made, -use, offer to sell, sell, import, and otherwise transfer the Work, -where such license applies only to those patent claims licensable -by such Contributor that are necessarily infringed by their -Contribution(s) alone or by combination of their Contribution(s) -with the Work to which such Contribution(s) was submitted. If You -institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work -or a Contribution incorporated within the Work constitutes direct -or contributory patent infringement, then any patent licenses -granted to You under this License for that Work shall terminate -as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the -Work or Derivative Works thereof in any medium, with or without -modifications, and in Source or Object form, provided that You -meet the following conditions: - -(a) You must give any other recipients of the Work or -Derivative Works a copy of this License; and - -(b) You must cause any modified files to carry prominent notices -stating that You changed the files; and - -(c) You must retain, in the Source form of any Derivative Works -that You distribute, all copyright, patent, trademark, and -attribution notices from the Source form of the Work, -excluding those notices that do not pertain to any part of -the Derivative Works; and - -(d) If the Work includes a "NOTICE" text file as part of its -distribution, then any Derivative Works that You distribute must -include a readable copy of the attribution notices contained -within such NOTICE file, excluding those notices that do not -pertain to any part of the Derivative Works, in at least one -of the following places: within a NOTICE text file distributed -as part of the Derivative Works; within the Source form or -documentation, if provided along with the Derivative Works; or, -within a display generated by the Derivative Works, if and -wherever such third-party notices normally appear. The contents -of the NOTICE file are for informational purposes only and -do not modify the License. You may add Your own attribution -notices within Derivative Works that You distribute, alongside -or as an addendum to the NOTICE text from the Work, provided -that such additional attribution notices cannot be construed -as modifying the License. - -You may add Your own copyright statement to Your modifications and -may provide additional or different license terms and conditions -for use, reproduction, or distribution of Your modifications, or -for any such Derivative Works as a whole, provided Your use, -reproduction, and distribution of the Work otherwise complies with -the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, -any Contribution intentionally submitted for inclusion in the Work -by You to the Licensor shall be under the terms and conditions of -this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify -the terms of any separate license agreement you may have executed -with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade -names, trademarks, service marks, or product names of the Licensor, -except as required for reasonable and customary use in describing the -origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or -agreed to in writing, Licensor provides the Work (and each -Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -implied, including, without limitation, any warranties or conditions -of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A -PARTICULAR PURPOSE. You are solely responsible for determining the -appropriateness of using or redistributing the Work and assume any -risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, -whether in tort (including negligence), contract, or otherwise, -unless required by applicable law (such as deliberate and grossly -negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, -incidental, or consequential damages of any character arising as a -result of this License or out of the use or inability to use the -Work (including but not limited to damages for loss of goodwill, -work stoppage, computer failure or malfunction, or any and all -other commercial damages or losses), even if such Contributor -has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing -the Work or Derivative Works thereof, You may choose to offer, -and charge a fee for, acceptance of support, warranty, indemnity, -or other liability obligations and/or rights consistent with this -License. However, in accepting such obligations, You may act only -on Your own behalf and on Your sole responsibility, not on behalf -of any other Contributor, and only if You agree to indemnify, -defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason -of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - -To apply the Apache License to your work, attach the following -boilerplate notice, with the fields enclosed by brackets "{}" -replaced with your own identifying information. (Don't include -the brackets!) The text should be enclosed in the appropriate -comment syntax for the file format. We also recommend that a -file or class name and description of purpose be included on the -same "printed page" as the copyright notice for easier -identification within third-party archives. - -Copyright 2017 The OpenZipkin Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/openzipkin/zipkin-go/Makefile b/vendor/github.com/openzipkin/zipkin-go/Makefile deleted file mode 100644 index b4271c614041..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/Makefile +++ /dev/null @@ -1,29 +0,0 @@ - -.DEFAULT_GOAL := test - -.PHONY: test -test: - go test -v -race -cover ./... - -.PHONY: bench -bench: - go test -v -run - -bench . -benchmem ./... - -.PHONY: protoc -protoc: - protoc --go_out=. proto/v2/zipkin.proto - protoc --go_out=plugins=grpc:. proto/testing/service.proto - -.PHONY: lint -lint: - # Ignore grep's exit code since no match returns 1. - echo 'linting...' ; golint ./... - -.PHONY: vet -vet: - go vet ./... - -.PHONY: all -all: vet lint test bench - -.PHONY: example diff --git a/vendor/github.com/openzipkin/zipkin-go/README.md b/vendor/github.com/openzipkin/zipkin-go/README.md deleted file mode 100644 index a36416f8abf6..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/README.md +++ /dev/null @@ -1,102 +0,0 @@ -# Zipkin Library for Go - -[![Travis CI](https://travis-ci.org/openzipkin/zipkin-go.svg?branch=master)](https://travis-ci.org/openzipkin/zipkin-go) -[![CircleCI](https://circleci.com/gh/openzipkin/zipkin-go.svg?style=shield)](https://circleci.com/gh/openzipkin/zipkin-go) -[![Appveyor CI](https://ci.appveyor.com/api/projects/status/1d0e5k96g10ajl63/branch/master?svg=true)](https://ci.appveyor.com/project/basvanbeek/zipkin-go) -[![Coverage Status](https://img.shields.io/coveralls/github/openzipkin/zipkin-go.svg)](https://coveralls.io/github/openzipkin/zipkin-go?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/openzipkin/zipkin-go)](https://goreportcard.com/report/github.com/openzipkin/zipkin-go) -[![GoDoc](https://godoc.org/github.com/openzipkin/zipkin-go?status.svg)](https://godoc.org/github.com/openzipkin/zipkin-go) -[![Gitter chat](https://badges.gitter.im/openzipkin/zipkin.svg)](https://gitter.im/openzipkin/zipkin?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Sourcegraph](https://sourcegraph.com/github.com/openzipkin/zipkin-go/-/badge.svg)](https://sourcegraph.com/github.com/openzipkin/zipkin-go?badge) - -Zipkin Go is the official Go Tracer implementation for Zipkin, supported by the -OpenZipkin community. - -## package organization -`zipkin-go` is built with interoperability in mind within the OpenZipkin -community and even 3rd parties, the library consists of several packages. - -The main tracing implementation can be found in the root folder of this -repository. Reusable parts not considered core implementation or deemed -beneficiary for usage by others are placed in their own packages within this -repository. - -### model -This library implements the Zipkin V2 Span Model which is available in the model -package. It contains a Go data model compatible with the Zipkin V2 API and can -automatically sanitize, parse and (de)serialize to and from the required JSON -representation as used by the official Zipkin V2 Collectors. - -### propagation -The propagation package and B3 subpackage hold the logic for propagating -SpanContext (span identifiers and sampling flags) between services participating -in traces. Currently Zipkin B3 Propagation is supported for HTTP and GRPC. - -### middleware -The middleware subpackages contain officially supported middleware handlers and -tracing wrappers. - -#### http -An easy to use http.Handler middleware for tracing server side requests is -provided. This allows one to use this middleware in applications using -standard library servers as well as most available higher level frameworks. Some -frameworks will have their own instrumentation and middleware that maps better -for their ecosystem. - -For HTTP client operations `NewTransport` can return a `http.RoundTripper` -implementation that can either wrap the standard http.Client's Transport or a -custom provided one and add per request tracing. Since HTTP Requests can have -one or multiple redirects it is advisable to always enclose HTTP Client calls -with a `Span` either around the `*http.Client` call level or parent function -level. - -For convenience `NewClient` is provided which returns a HTTP Client which embeds -`*http.Client` and provides an `application span` around the HTTP calls when -calling the `DoWithAppSpan()` method. - -#### grpc -Easy to use grpc.StatsHandler middleware are provided for tracing gRPC server and -client requests. - -For a server, pass `NewServerHandler` when calling `NewServer`, e.g., - -```go -import ( - "google.golang.org/grpc" - zipkingrpc "github.com/openzipkin/zipkin-go/middleware/grpc" -) - -server = grpc.NewServer(grpc.StatsHandler(zipkingrpc.NewServerHandler(tracer))) -``` - -For a client, pass `NewClientHandler` when calling `Dial`, e.g., - -```go -import ( - "google.golang.org/grpc" - zipkingrpc "github.com/openzipkin/zipkin-go/middleware/grpc" -) - -conn, err = grpc.Dial(addr, grpc.WithStatsHandler(zipkingrpc.NewClientHandler(tracer))) -``` - -### reporter -The reporter package holds the interface which the various Reporter -implementations use. It is exported into its own package as it can be used by -3rd parties to use these Reporter packages in their own libraries for exporting -to the Zipkin ecosystem. The `zipkin-go` tracer also uses the interface to -accept 3rd party Reporter implementations. - -#### HTTP Reporter -Most common Reporter type used by Zipkin users transporting Spans to the Zipkin -server using JSON over HTTP. The reporter holds a buffer and reports to the -backend asynchronously. - -#### Kafka Reporter -High performance Reporter transporting Spans to the Zipkin server using a Kafka -Producer digesting JSON V2 Spans. The reporter uses the -[Sarama async producer](https://godoc.org/github.com/Shopify/sarama#AsyncProducer) -underneath. - -## usage and examples -[HTTP Server Example](example_httpserver_test.go) diff --git a/vendor/github.com/openzipkin/zipkin-go/appveyor.yml b/vendor/github.com/openzipkin/zipkin-go/appveyor.yml deleted file mode 100644 index 57489e797db9..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/appveyor.yml +++ /dev/null @@ -1,23 +0,0 @@ -version: v1.0.0.{build} - -platform: x64 - -clone_folder: c:\gopath\src\github.com\openzipkin\zipkin-go - -environment: - GOPATH: c:\gopath - GO111MODULE: on - GOFLAGS: -mod=readonly - -install: - - choco install rabbitmq --ignoredependencies -y - - echo %PATH% - - echo %GOPATH% - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version - - go env - -build_script: - - go vet ./... - - go test -v -race -cover ./... - - go test -v -run - -bench . -benchmem ./... diff --git a/vendor/github.com/openzipkin/zipkin-go/circle.yml b/vendor/github.com/openzipkin/zipkin-go/circle.yml deleted file mode 100644 index 808b54c9f6ed..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 2 -jobs: - build: - working_directory: /go/src/github.com/openzipkin/zipkin-go - parallelism: 1 - docker: - - image: circleci/golang - steps: - - checkout - - run: go get -t -v -d ./... - - run: make vet test bench diff --git a/vendor/github.com/openzipkin/zipkin-go/context.go b/vendor/github.com/openzipkin/zipkin-go/context.go deleted file mode 100644 index bd25ddcb37bf..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/context.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -import ( - "context" -) - -var defaultNoopSpan = &noopSpan{} - -// SpanFromContext retrieves a Zipkin Span from Go's context propagation -// mechanism if found. If not found, returns nil. -func SpanFromContext(ctx context.Context) Span { - if s, ok := ctx.Value(spanKey).(Span); ok { - return s - } - return nil -} - -// SpanOrNoopFromContext retrieves a Zipkin Span from Go's context propagation -// mechanism if found. If not found, returns a noopSpan. -// This function typically is used for modules that want to provide existing -// Zipkin spans with additional data, but can't guarantee that spans are -// properly propagated. It is preferred to use SpanFromContext() and test for -// Nil instead of using this function. -func SpanOrNoopFromContext(ctx context.Context) Span { - if s, ok := ctx.Value(spanKey).(Span); ok { - return s - } - return defaultNoopSpan -} - -// NewContext stores a Zipkin Span into Go's context propagation mechanism. -func NewContext(ctx context.Context, s Span) context.Context { - return context.WithValue(ctx, spanKey, s) -} - -type ctxKey struct{} - -var spanKey = ctxKey{} diff --git a/vendor/github.com/openzipkin/zipkin-go/doc.go b/vendor/github.com/openzipkin/zipkin-go/doc.go deleted file mode 100644 index 18cc62f824b6..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package zipkin implements a native Zipkin instrumentation library for Go. - -See https://zipkin.io for more information about Zipkin. -*/ -package zipkin diff --git a/vendor/github.com/openzipkin/zipkin-go/endpoint.go b/vendor/github.com/openzipkin/zipkin-go/endpoint.go deleted file mode 100644 index 4a1a6c705192..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/endpoint.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -import ( - "net" - "strconv" - "strings" - - "github.com/openzipkin/zipkin-go/model" -) - -// NewEndpoint creates a new endpoint given the provided serviceName and -// hostPort. -func NewEndpoint(serviceName string, hostPort string) (*model.Endpoint, error) { - e := &model.Endpoint{ - ServiceName: serviceName, - } - - if hostPort == "" || hostPort == ":0" { - if serviceName == "" { - // if all properties are empty we should not have an Endpoint object. - return nil, nil - } - return e, nil - } - - if strings.IndexByte(hostPort, ':') < 0 { - hostPort += ":0" - } - - host, port, err := net.SplitHostPort(hostPort) - if err != nil { - return nil, err - } - - p, err := strconv.ParseUint(port, 10, 16) - if err != nil { - return nil, err - } - e.Port = uint16(p) - - addrs, err := net.LookupIP(host) - if err != nil { - return nil, err - } - - for i := range addrs { - addr := addrs[i].To4() - if addr == nil { - // IPv6 - 16 bytes - if e.IPv6 == nil { - e.IPv6 = addrs[i].To16() - } - } else { - // IPv4 - 4 bytes - if e.IPv4 == nil { - e.IPv4 = addr - } - } - if e.IPv4 != nil && e.IPv6 != nil { - // Both IPv4 & IPv6 have been set, done... - break - } - } - - return e, nil -} diff --git a/vendor/github.com/openzipkin/zipkin-go/go.mod b/vendor/github.com/openzipkin/zipkin-go/go.mod deleted file mode 100644 index ed37c1c2cba8..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/go.mod +++ /dev/null @@ -1,25 +0,0 @@ -module github.com/openzipkin/zipkin-go - -require ( - github.com/Shopify/sarama v1.19.0 - github.com/Shopify/toxiproxy v2.1.4+incompatible // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/eapache/go-resiliency v1.1.0 // indirect - github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect - github.com/eapache/queue v1.1.0 // indirect - github.com/gogo/protobuf v1.2.0 - github.com/golang/protobuf v1.2.0 - github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect - github.com/gorilla/context v1.1.1 // indirect - github.com/gorilla/mux v1.6.2 - github.com/onsi/ginkgo v1.7.0 - github.com/onsi/gomega v1.4.3 - github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1 // indirect - github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a // indirect - github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 - golang.org/x/net v0.0.0-20190311183353-d8887717615a - golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect - google.golang.org/grpc v1.20.0 -) - -go 1.12 diff --git a/vendor/github.com/openzipkin/zipkin-go/go.sum b/vendor/github.com/openzipkin/zipkin-go/go.sum deleted file mode 100644 index 5274bc9f400a..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/go.sum +++ /dev/null @@ -1,76 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1 h1:VGcrWe3yk6o+t7BdVNy5UDPWa4OZuDWtE1W1ZbS7Kyw= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 h1:0ngsPmuP6XIjiFRNFYlvKwSr5zff2v+uPHaffZ6/M4k= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.20.0 h1:DlsSIrgEBuZAUFJcta2B5i/lzeHHbnfkNFAfFXLVFYQ= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go b/vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go deleted file mode 100644 index 3e857010fc99..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/idgenerator/idgenerator.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package idgenerator contains several Span and Trace ID generators which can be -used by the Zipkin tracer. Additional third party generators can be plugged in -if they adhere to the IDGenerator interface. -*/ -package idgenerator - -import ( - "math/rand" - "sync" - "time" - - "github.com/openzipkin/zipkin-go/model" -) - -var ( - seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano())) - // NewSource returns a new pseudo-random Source seeded with the given value. - // Unlike the default Source used by top-level functions, this source is not - // safe for concurrent use by multiple goroutines. Hence the need for a mutex. - seededIDLock sync.Mutex -) - -// IDGenerator interface can be used to provide the Zipkin Tracer with custom -// implementations to generate Span and Trace IDs. -type IDGenerator interface { - SpanID(traceID model.TraceID) model.ID // Generates a new Span ID - TraceID() model.TraceID // Generates a new Trace ID -} - -// NewRandom64 returns an ID Generator which can generate 64 bit trace and span -// id's -func NewRandom64() IDGenerator { - return &randomID64{} -} - -// NewRandom128 returns an ID Generator which can generate 128 bit trace and 64 -// bit span id's -func NewRandom128() IDGenerator { - return &randomID128{} -} - -// NewRandomTimestamped generates 128 bit time sortable traceid's and 64 bit -// spanid's. -func NewRandomTimestamped() IDGenerator { - return &randomTimestamped{} -} - -// randomID64 can generate 64 bit traceid's and 64 bit spanid's. -type randomID64 struct{} - -func (r *randomID64) TraceID() (id model.TraceID) { - seededIDLock.Lock() - id = model.TraceID{ - Low: uint64(seededIDGen.Int63()), - } - seededIDLock.Unlock() - return -} - -func (r *randomID64) SpanID(traceID model.TraceID) (id model.ID) { - if !traceID.Empty() { - return model.ID(traceID.Low) - } - seededIDLock.Lock() - id = model.ID(seededIDGen.Int63()) - seededIDLock.Unlock() - return -} - -// randomID128 can generate 128 bit traceid's and 64 bit spanid's. -type randomID128 struct{} - -func (r *randomID128) TraceID() (id model.TraceID) { - seededIDLock.Lock() - id = model.TraceID{ - High: uint64(seededIDGen.Int63()), - Low: uint64(seededIDGen.Int63()), - } - seededIDLock.Unlock() - return -} - -func (r *randomID128) SpanID(traceID model.TraceID) (id model.ID) { - if !traceID.Empty() { - return model.ID(traceID.Low) - } - seededIDLock.Lock() - id = model.ID(seededIDGen.Int63()) - seededIDLock.Unlock() - return -} - -// randomTimestamped can generate 128 bit time sortable traceid's compatible -// with AWS X-Ray and 64 bit spanid's. -type randomTimestamped struct{} - -func (t *randomTimestamped) TraceID() (id model.TraceID) { - seededIDLock.Lock() - id = model.TraceID{ - High: uint64(time.Now().Unix()<<32) + uint64(seededIDGen.Int31()), - Low: uint64(seededIDGen.Int63()), - } - seededIDLock.Unlock() - return -} - -func (t *randomTimestamped) SpanID(traceID model.TraceID) (id model.ID) { - if !traceID.Empty() { - return model.ID(traceID.Low) - } - seededIDLock.Lock() - id = model.ID(seededIDGen.Int63()) - seededIDLock.Unlock() - return -} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/annotation.go b/vendor/github.com/openzipkin/zipkin-go/model/annotation.go deleted file mode 100644 index 795bc4143f2a..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/model/annotation.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "time" -) - -// ErrValidTimestampRequired error -var ErrValidTimestampRequired = errors.New("valid annotation timestamp required") - -// Annotation associates an event that explains latency with a timestamp. -type Annotation struct { - Timestamp time.Time - Value string -} - -// MarshalJSON implements custom JSON encoding -func (a *Annotation) MarshalJSON() ([]byte, error) { - return json.Marshal(&struct { - Timestamp int64 `json:"timestamp"` - Value string `json:"value"` - }{ - Timestamp: a.Timestamp.Round(time.Microsecond).UnixNano() / 1e3, - Value: a.Value, - }) -} - -// UnmarshalJSON implements custom JSON decoding -func (a *Annotation) UnmarshalJSON(b []byte) error { - type Alias Annotation - annotation := &struct { - TimeStamp uint64 `json:"timestamp"` - *Alias - }{ - Alias: (*Alias)(a), - } - if err := json.Unmarshal(b, &annotation); err != nil { - return err - } - if annotation.TimeStamp < 1 { - return ErrValidTimestampRequired - } - a.Timestamp = time.Unix(0, int64(annotation.TimeStamp)*1e3) - return nil -} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/doc.go b/vendor/github.com/openzipkin/zipkin-go/model/doc.go deleted file mode 100644 index 1b11b4df795f..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/model/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package model contains the Zipkin V2 model which is used by the Zipkin Go -tracer implementation. - -Third party instrumentation libraries can use the model and transport packages -found in this Zipkin Go library to directly interface with the Zipkin Server or -Zipkin Collectors without the need to use the tracer implementation itself. -*/ -package model diff --git a/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go b/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go deleted file mode 100644 index 58880bd15772..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import "net" - -// Endpoint holds the network context of a node in the service graph. -type Endpoint struct { - ServiceName string `json:"serviceName,omitempty"` - IPv4 net.IP `json:"ipv4,omitempty"` - IPv6 net.IP `json:"ipv6,omitempty"` - Port uint16 `json:"port,omitempty"` -} - -// Empty returns if all Endpoint properties are empty / unspecified. -func (e *Endpoint) Empty() bool { - return e == nil || - (e.ServiceName == "" && e.Port == 0 && len(e.IPv4) == 0 && len(e.IPv6) == 0) -} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/kind.go b/vendor/github.com/openzipkin/zipkin-go/model/kind.go deleted file mode 100644 index 5d512ad90f2b..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/model/kind.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// Kind clarifies context of timestamp, duration and remoteEndpoint in a span. -type Kind string - -// Available Kind values -const ( - Undetermined Kind = "" - Client Kind = "CLIENT" - Server Kind = "SERVER" - Producer Kind = "PRODUCER" - Consumer Kind = "CONSUMER" -) diff --git a/vendor/github.com/openzipkin/zipkin-go/model/span.go b/vendor/github.com/openzipkin/zipkin-go/model/span.go deleted file mode 100644 index f428413f1a5f..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/model/span.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "time" -) - -// unmarshal errors -var ( - ErrValidTraceIDRequired = errors.New("valid traceId required") - ErrValidIDRequired = errors.New("valid span id required") - ErrValidDurationRequired = errors.New("valid duration required") -) - -// SpanContext holds the context of a Span. -type SpanContext struct { - TraceID TraceID `json:"traceId"` - ID ID `json:"id"` - ParentID *ID `json:"parentId,omitempty"` - Debug bool `json:"debug,omitempty"` - Sampled *bool `json:"-"` - Err error `json:"-"` -} - -// SpanModel structure. -// -// If using this library to instrument your application you will not need to -// directly access or modify this representation. The SpanModel is exported for -// use cases involving 3rd party Go instrumentation libraries desiring to -// export data to a Zipkin server using the Zipkin V2 Span model. -type SpanModel struct { - SpanContext - Name string `json:"name,omitempty"` - Kind Kind `json:"kind,omitempty"` - Timestamp time.Time `json:"-"` - Duration time.Duration `json:"-"` - Shared bool `json:"shared,omitempty"` - LocalEndpoint *Endpoint `json:"localEndpoint,omitempty"` - RemoteEndpoint *Endpoint `json:"remoteEndpoint,omitempty"` - Annotations []Annotation `json:"annotations,omitempty"` - Tags map[string]string `json:"tags,omitempty"` -} - -// MarshalJSON exports our Model into the correct format for the Zipkin V2 API. -func (s SpanModel) MarshalJSON() ([]byte, error) { - type Alias SpanModel - - var timestamp int64 - if !s.Timestamp.IsZero() { - if s.Timestamp.Unix() < 1 { - // Zipkin does not allow Timestamps before Unix epoch - return nil, ErrValidTimestampRequired - } - timestamp = s.Timestamp.Round(time.Microsecond).UnixNano() / 1e3 - } - - if s.Duration < time.Microsecond { - if s.Duration < 0 { - // negative duration is not allowed and signals a timing logic error - return nil, ErrValidDurationRequired - } else if s.Duration > 0 { - // sub microsecond durations are reported as 1 microsecond - s.Duration = 1 * time.Microsecond - } - } else { - // Duration will be rounded to nearest microsecond representation. - // - // NOTE: Duration.Round() is not available in Go 1.8 which we still support. - // To handle microsecond resolution rounding we'll add 500 nanoseconds to - // the duration. When truncated to microseconds in the call to marshal, it - // will be naturally rounded. See TestSpanDurationRounding in span_test.go - s.Duration += 500 * time.Nanosecond - } - - if s.LocalEndpoint.Empty() { - s.LocalEndpoint = nil - } - - if s.RemoteEndpoint.Empty() { - s.RemoteEndpoint = nil - } - - return json.Marshal(&struct { - T int64 `json:"timestamp,omitempty"` - D int64 `json:"duration,omitempty"` - Alias - }{ - T: timestamp, - D: s.Duration.Nanoseconds() / 1e3, - Alias: (Alias)(s), - }) -} - -// UnmarshalJSON imports our Model from a Zipkin V2 API compatible span -// representation. -func (s *SpanModel) UnmarshalJSON(b []byte) error { - type Alias SpanModel - span := &struct { - T uint64 `json:"timestamp,omitempty"` - D uint64 `json:"duration,omitempty"` - *Alias - }{ - Alias: (*Alias)(s), - } - if err := json.Unmarshal(b, &span); err != nil { - return err - } - if s.ID < 1 { - return ErrValidIDRequired - } - if span.T > 0 { - s.Timestamp = time.Unix(0, int64(span.T)*1e3) - } - s.Duration = time.Duration(span.D*1e3) * time.Nanosecond - if s.LocalEndpoint.Empty() { - s.LocalEndpoint = nil - } - - if s.RemoteEndpoint.Empty() { - s.RemoteEndpoint = nil - } - return nil -} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/span_id.go b/vendor/github.com/openzipkin/zipkin-go/model/span_id.go deleted file mode 100644 index 452dc871b20b..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/model/span_id.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// ID type -type ID uint64 - -// String outputs the 64-bit ID as hex string. -func (i ID) String() string { - return fmt.Sprintf("%016x", uint64(i)) -} - -// MarshalJSON serializes an ID type (SpanID, ParentSpanID) to HEX. -func (i ID) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf("%q", i.String())), nil -} - -// UnmarshalJSON deserializes an ID type (SpanID, ParentSpanID) from HEX. -func (i *ID) UnmarshalJSON(b []byte) (err error) { - var id uint64 - if len(b) < 3 { - return nil - } - id, err = strconv.ParseUint(string(b[1:len(b)-1]), 16, 64) - *i = ID(id) - return err -} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/traceid.go b/vendor/github.com/openzipkin/zipkin-go/model/traceid.go deleted file mode 100644 index 68d12d386c1c..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/model/traceid.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// TraceID is a 128 bit number internally stored as 2x uint64 (high & low). -// In case of 64 bit traceIDs, the value can be found in Low. -type TraceID struct { - High uint64 - Low uint64 -} - -// Empty returns if TraceID has zero value. -func (t TraceID) Empty() bool { - return t.Low == 0 && t.High == 0 -} - -// String outputs the 128-bit traceID as hex string. -func (t TraceID) String() string { - if t.High == 0 { - return fmt.Sprintf("%016x", t.Low) - } - return fmt.Sprintf("%016x%016x", t.High, t.Low) -} - -// TraceIDFromHex returns the TraceID from a hex string. -func TraceIDFromHex(h string) (t TraceID, err error) { - if len(h) > 16 { - if t.High, err = strconv.ParseUint(h[0:len(h)-16], 16, 64); err != nil { - return - } - t.Low, err = strconv.ParseUint(h[len(h)-16:], 16, 64) - return - } - t.Low, err = strconv.ParseUint(h, 16, 64) - return -} - -// MarshalJSON custom JSON serializer to export the TraceID in the required -// zero padded hex representation. -func (t TraceID) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf("%q", t.String())), nil -} - -// UnmarshalJSON custom JSON deserializer to retrieve the traceID from the hex -// encoded representation. -func (t *TraceID) UnmarshalJSON(traceID []byte) error { - if len(traceID) < 3 { - return ErrValidTraceIDRequired - } - // A valid JSON string is encoded wrapped in double quotes. We need to trim - // these before converting the hex payload. - tID, err := TraceIDFromHex(string(traceID[1 : len(traceID)-1])) - if err != nil { - return err - } - *t = tID - return nil -} diff --git a/vendor/github.com/openzipkin/zipkin-go/noop.go b/vendor/github.com/openzipkin/zipkin-go/noop.go deleted file mode 100644 index 2e5d0cd905b0..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/noop.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -import ( - "time" - - "github.com/openzipkin/zipkin-go/model" -) - -type noopSpan struct { - model.SpanContext -} - -func (n *noopSpan) Context() model.SpanContext { return n.SpanContext } - -func (n *noopSpan) SetName(string) {} - -func (*noopSpan) SetRemoteEndpoint(*model.Endpoint) {} - -func (*noopSpan) Annotate(time.Time, string) {} - -func (*noopSpan) Tag(string, string) {} - -func (*noopSpan) Finish() {} - -func (*noopSpan) Flush() {} diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/doc.go b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/doc.go deleted file mode 100644 index 27ce5e040e86..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package b3 implements serialization and deserialization logic for Zipkin -B3 Headers. -*/ -package b3 diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/grpc.go b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/grpc.go deleted file mode 100644 index a1b30fa41f27..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/grpc.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b3 - -import ( - "google.golang.org/grpc/metadata" - - "github.com/openzipkin/zipkin-go/model" - "github.com/openzipkin/zipkin-go/propagation" -) - -// ExtractGRPC will extract a span.Context from the gRPC Request metadata if -// found in B3 header format. -func ExtractGRPC(md *metadata.MD) propagation.Extractor { - return func() (*model.SpanContext, error) { - var ( - traceIDHeader = GetGRPCHeader(md, TraceID) - spanIDHeader = GetGRPCHeader(md, SpanID) - parentSpanIDHeader = GetGRPCHeader(md, ParentSpanID) - sampledHeader = GetGRPCHeader(md, Sampled) - flagsHeader = GetGRPCHeader(md, Flags) - ) - - return ParseHeaders( - traceIDHeader, spanIDHeader, parentSpanIDHeader, sampledHeader, - flagsHeader, - ) - } -} - -// InjectGRPC will inject a span.Context into gRPC metadata. -func InjectGRPC(md *metadata.MD) propagation.Injector { - return func(sc model.SpanContext) error { - if (model.SpanContext{}) == sc { - return ErrEmptyContext - } - - if sc.Debug { - setGRPCHeader(md, Flags, "1") - } else if sc.Sampled != nil { - // Debug is encoded as X-B3-Flags: 1. Since Debug implies Sampled, - // we don't send "X-B3-Sampled" if Debug is set. - if *sc.Sampled { - setGRPCHeader(md, Sampled, "1") - } else { - setGRPCHeader(md, Sampled, "0") - } - } - - if !sc.TraceID.Empty() && sc.ID > 0 { - // set identifiers - setGRPCHeader(md, TraceID, sc.TraceID.String()) - setGRPCHeader(md, SpanID, sc.ID.String()) - if sc.ParentID != nil { - setGRPCHeader(md, ParentSpanID, sc.ParentID.String()) - } - } - - return nil - } -} - -// GetGRPCHeader retrieves the last value found for a particular key. If key is -// not found it returns an empty string. -func GetGRPCHeader(md *metadata.MD, key string) string { - v := (*md)[key] - if len(v) < 1 { - return "" - } - return v[len(v)-1] -} - -func setGRPCHeader(md *metadata.MD, key, value string) { - (*md)[key] = append((*md)[key], value) -} diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/http.go b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/http.go deleted file mode 100644 index d987f94115f1..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/http.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b3 - -import ( - "net/http" - - "github.com/openzipkin/zipkin-go/model" - "github.com/openzipkin/zipkin-go/propagation" -) - -type InjectOption func(opts *InjectOptions) - -type InjectOptions struct { - shouldInjectSingleHeader bool - shouldInjectMultiHeader bool -} - -// WithSingleAndMultiHeader allows to include both single and multiple -// headers in the context injection -func WithSingleAndMultiHeader() InjectOption { - return func(opts *InjectOptions) { - opts.shouldInjectSingleHeader = true - opts.shouldInjectMultiHeader = true - } -} - -// WithSingleHeaderOnly allows to include only single header in the context -// injection -func WithSingleHeaderOnly() InjectOption { - return func(opts *InjectOptions) { - opts.shouldInjectSingleHeader = true - opts.shouldInjectMultiHeader = false - } -} - -// ExtractHTTP will extract a span.Context from the HTTP Request if found in -// B3 header format. -func ExtractHTTP(r *http.Request) propagation.Extractor { - return func() (*model.SpanContext, error) { - var ( - traceIDHeader = r.Header.Get(TraceID) - spanIDHeader = r.Header.Get(SpanID) - parentSpanIDHeader = r.Header.Get(ParentSpanID) - sampledHeader = r.Header.Get(Sampled) - flagsHeader = r.Header.Get(Flags) - singleHeader = r.Header.Get(Context) - ) - - var ( - sc *model.SpanContext - sErr error - mErr error - ) - if singleHeader != "" { - sc, sErr = ParseSingleHeader(singleHeader) - if sErr == nil { - return sc, nil - } - } - - sc, mErr = ParseHeaders( - traceIDHeader, spanIDHeader, parentSpanIDHeader, - sampledHeader, flagsHeader, - ) - - if mErr != nil && sErr != nil { - return nil, sErr - } - - return sc, mErr - } -} - -// InjectHTTP will inject a span.Context into a HTTP Request -func InjectHTTP(r *http.Request, opts ...InjectOption) propagation.Injector { - options := InjectOptions{shouldInjectMultiHeader: true} - for _, opt := range opts { - opt(&options) - } - - return func(sc model.SpanContext) error { - if (model.SpanContext{}) == sc { - return ErrEmptyContext - } - - if options.shouldInjectMultiHeader { - if sc.Debug { - r.Header.Set(Flags, "1") - } else if sc.Sampled != nil { - // Debug is encoded as X-B3-Flags: 1. Since Debug implies Sampled, - // so don't also send "X-B3-Sampled: 1". - if *sc.Sampled { - r.Header.Set(Sampled, "1") - } else { - r.Header.Set(Sampled, "0") - } - } - - if !sc.TraceID.Empty() && sc.ID > 0 { - r.Header.Set(TraceID, sc.TraceID.String()) - r.Header.Set(SpanID, sc.ID.String()) - if sc.ParentID != nil { - r.Header.Set(ParentSpanID, sc.ParentID.String()) - } - } - } - - if options.shouldInjectSingleHeader { - r.Header.Set(Context, BuildSingleHeader(sc)) - } - - return nil - } -} diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/shared.go b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/shared.go deleted file mode 100644 index 04bcae832dfa..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/shared.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b3 - -import "errors" - -// Common Header Extraction / Injection errors -var ( - ErrInvalidSampledByte = errors.New("invalid B3 Sampled found") - ErrInvalidSampledHeader = errors.New("invalid B3 Sampled header found") - ErrInvalidFlagsHeader = errors.New("invalid B3 Flags header found") - ErrInvalidTraceIDHeader = errors.New("invalid B3 TraceID header found") - ErrInvalidSpanIDHeader = errors.New("invalid B3 SpanID header found") - ErrInvalidParentSpanIDHeader = errors.New("invalid B3 ParentSpanID header found") - ErrInvalidScope = errors.New("require either both TraceID and SpanID or none") - ErrInvalidScopeParent = errors.New("ParentSpanID requires both TraceID and SpanID to be available") - ErrInvalidScopeParentSingle = errors.New("ParentSpanID requires TraceID, SpanID and Sampled to be available") - ErrEmptyContext = errors.New("empty request context") - ErrInvalidTraceIDValue = errors.New("invalid B3 TraceID value found") - ErrInvalidSpanIDValue = errors.New("invalid B3 SpanID value found") - ErrInvalidParentSpanIDValue = errors.New("invalid B3 ParentSpanID value found") -) - -// Default B3 Header keys -const ( - TraceID = "x-b3-traceid" - SpanID = "x-b3-spanid" - ParentSpanID = "x-b3-parentspanid" - Sampled = "x-b3-sampled" - Flags = "x-b3-flags" - Context = "b3" -) diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/spancontext.go b/vendor/github.com/openzipkin/zipkin-go/propagation/b3/spancontext.go deleted file mode 100644 index e3569e0d2ceb..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/propagation/b3/spancontext.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package b3 - -import ( - "strconv" - "strings" - - "github.com/openzipkin/zipkin-go/model" -) - -// ParseHeaders takes values found from B3 Headers and tries to reconstruct a -// SpanContext. -func ParseHeaders( - hdrTraceID, hdrSpanID, hdrParentSpanID, hdrSampled, hdrFlags string, -) (*model.SpanContext, error) { - var ( - err error - spanID uint64 - requiredCount int - sc = &model.SpanContext{} - ) - - // correct values for an existing sampled header are "0" and "1". - // For legacy support and being lenient to other tracing implementations we - // allow "true" and "false" as inputs for interop purposes. - switch strings.ToLower(hdrSampled) { - case "0", "false": - sampled := false - sc.Sampled = &sampled - case "1", "true": - sampled := true - sc.Sampled = &sampled - case "": - // sc.Sampled = nil - default: - return nil, ErrInvalidSampledHeader - } - - // The only accepted value for Flags is "1". This will set Debug to true. All - // other values and omission of header will be ignored. - if hdrFlags == "1" { - sc.Debug = true - sc.Sampled = nil - } - - if hdrTraceID != "" { - requiredCount++ - if sc.TraceID, err = model.TraceIDFromHex(hdrTraceID); err != nil { - return nil, ErrInvalidTraceIDHeader - } - } - - if hdrSpanID != "" { - requiredCount++ - if spanID, err = strconv.ParseUint(hdrSpanID, 16, 64); err != nil { - return nil, ErrInvalidSpanIDHeader - } - sc.ID = model.ID(spanID) - } - - if requiredCount != 0 && requiredCount != 2 { - return nil, ErrInvalidScope - } - - if hdrParentSpanID != "" { - if requiredCount == 0 { - return nil, ErrInvalidScopeParent - } - if spanID, err = strconv.ParseUint(hdrParentSpanID, 16, 64); err != nil { - return nil, ErrInvalidParentSpanIDHeader - } - parentSpanID := model.ID(spanID) - sc.ParentID = &parentSpanID - } - - return sc, nil -} - -// ParseSingleHeader takes values found from B3 Single Header and tries to reconstruct a -// SpanContext. -func ParseSingleHeader(contextHeader string) (*model.SpanContext, error) { - if contextHeader == "" { - return nil, ErrEmptyContext - } - - var ( - sc = model.SpanContext{} - sampling string - ) - - headerLen := len(contextHeader) - - if headerLen == 1 { - sampling = contextHeader - } else if headerLen == 16 || headerLen == 32 { - return nil, ErrInvalidScope - } else if headerLen >= 16+16+1 { - var high, low uint64 - pos := 0 - if string(contextHeader[16]) != "-" { - // traceID must be 128 bits - var err error - high, err = strconv.ParseUint(contextHeader[0:16], 16, 64) - if err != nil { - return nil, ErrInvalidTraceIDValue - } - pos = 16 - } - - low, err := strconv.ParseUint(contextHeader[pos+1:pos+16], 16, 64) - if err != nil { - return nil, ErrInvalidTraceIDValue - } - - sc.TraceID = model.TraceID{High: high, Low: low} - - rawID, err := strconv.ParseUint(contextHeader[pos+16+1:pos+16+1+16], 16, 64) - if err != nil { - return nil, ErrInvalidSpanIDValue - } - - sc.ID = model.ID(rawID) - - if headerLen > pos+16+1+16 { - if headerLen == pos+16+1+16+1 { - return nil, ErrInvalidSampledByte - } - - if headerLen == pos+16+1+16+1+1 { - sampling = string(contextHeader[pos+16+1+16+1]) - } else if headerLen == pos+16+1+16+1+16 { - return nil, ErrInvalidScopeParentSingle - } else if headerLen == pos+16+1+16+1+1+1+16 { - sampling = string(contextHeader[pos+16+1+16+1]) - - rawParentID, err := strconv.ParseUint(contextHeader[pos+16+1+16+1+1+1:], 16, 64) - if err != nil { - return nil, ErrInvalidParentSpanIDValue - } - - parentID := model.ID(rawParentID) - sc.ParentID = &parentID - } else { - return nil, ErrInvalidParentSpanIDValue - } - } - } else { - return nil, ErrInvalidTraceIDValue - } - switch sampling { - case "d": - sc.Debug = true - case "1": - trueVal := true - sc.Sampled = &trueVal - case "0": - falseVal := false - sc.Sampled = &falseVal - case "": - default: - return nil, ErrInvalidSampledByte - } - - return &sc, nil -} - -// BuildSingleHeader takes the values from the SpanContext and builds the B3 header -func BuildSingleHeader(sc model.SpanContext) string { - header := []string{} - if !sc.TraceID.Empty() && sc.ID > 0 { - header = append(header, sc.TraceID.String(), sc.ID.String()) - } - - if sc.Debug { - header = append(header, "d") - } else if sc.Sampled != nil { - if *sc.Sampled { - header = append(header, "1") - } else { - header = append(header, "0") - } - } - - if sc.ParentID != nil { - header = append(header, sc.ParentID.String()) - } - - return strings.Join(header, "-") -} diff --git a/vendor/github.com/openzipkin/zipkin-go/propagation/propagation.go b/vendor/github.com/openzipkin/zipkin-go/propagation/propagation.go deleted file mode 100644 index 067b28e8d8f9..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/propagation/propagation.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package propagation holds the required function signatures for Injection and -Extraction as used by the Zipkin Tracer. - -Subpackages of this package contain officially supported standard propagation -implementations. -*/ -package propagation - -import "github.com/openzipkin/zipkin-go/model" - -// Extractor function signature -type Extractor func() (*model.SpanContext, error) - -// Injector function signature -type Injector func(model.SpanContext) error diff --git a/vendor/github.com/openzipkin/zipkin-go/reporter/http/http.go b/vendor/github.com/openzipkin/zipkin-go/reporter/http/http.go deleted file mode 100644 index 3a48ce5f324d..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/reporter/http/http.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package http implements a HTTP reporter to send spans to Zipkin V2 collectors. -*/ -package http - -import ( - "bytes" - "log" - "net/http" - "os" - "sync" - "time" - - "github.com/openzipkin/zipkin-go/model" - "github.com/openzipkin/zipkin-go/reporter" -) - -// defaults -const ( - defaultTimeout = time.Second * 5 // timeout for http request in seconds - defaultBatchInterval = time.Second * 1 // BatchInterval in seconds - defaultBatchSize = 100 - defaultMaxBacklog = 1000 -) - -// httpReporter will send spans to a Zipkin HTTP Collector using Zipkin V2 API. -type httpReporter struct { - url string - client *http.Client - logger *log.Logger - batchInterval time.Duration - batchSize int - maxBacklog int - sendMtx *sync.Mutex - batchMtx *sync.Mutex - batch []*model.SpanModel - spanC chan *model.SpanModel - quit chan struct{} - shutdown chan error - reqCallback RequestCallbackFn - serializer reporter.SpanSerializer -} - -// Send implements reporter -func (r *httpReporter) Send(s model.SpanModel) { - r.spanC <- &s -} - -// Close implements reporter -func (r *httpReporter) Close() error { - close(r.quit) - return <-r.shutdown -} - -func (r *httpReporter) loop() { - var ( - nextSend = time.Now().Add(r.batchInterval) - ticker = time.NewTicker(r.batchInterval / 10) - tickerChan = ticker.C - ) - defer ticker.Stop() - - for { - select { - case span := <-r.spanC: - currentBatchSize := r.append(span) - if currentBatchSize >= r.batchSize { - nextSend = time.Now().Add(r.batchInterval) - go func() { - _ = r.sendBatch() - }() - } - case <-tickerChan: - if time.Now().After(nextSend) { - nextSend = time.Now().Add(r.batchInterval) - go func() { - _ = r.sendBatch() - }() - } - case <-r.quit: - r.shutdown <- r.sendBatch() - return - } - } -} - -func (r *httpReporter) append(span *model.SpanModel) (newBatchSize int) { - r.batchMtx.Lock() - - r.batch = append(r.batch, span) - if len(r.batch) > r.maxBacklog { - dispose := len(r.batch) - r.maxBacklog - r.logger.Printf("backlog too long, disposing %d spans", dispose) - r.batch = r.batch[dispose:] - } - newBatchSize = len(r.batch) - - r.batchMtx.Unlock() - return -} - -func (r *httpReporter) sendBatch() error { - // in order to prevent sending the same batch twice - r.sendMtx.Lock() - defer r.sendMtx.Unlock() - - // Select all current spans in the batch to be sent - r.batchMtx.Lock() - sendBatch := r.batch[:] - r.batchMtx.Unlock() - - if len(sendBatch) == 0 { - return nil - } - - body, err := r.serializer.Serialize(sendBatch) - if err != nil { - r.logger.Printf("failed when marshalling the spans batch: %s\n", err.Error()) - return err - } - - req, err := http.NewRequest("POST", r.url, bytes.NewReader(body)) - if err != nil { - r.logger.Printf("failed when creating the request: %s\n", err.Error()) - return err - } - req.Header.Set("Content-Type", r.serializer.ContentType()) - if r.reqCallback != nil { - r.reqCallback(req) - } - - resp, err := r.client.Do(req) - if err != nil { - r.logger.Printf("failed to send the request: %s\n", err.Error()) - return err - } - _ = resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode > 299 { - r.logger.Printf("failed the request with status code %d\n", resp.StatusCode) - } - - // Remove sent spans from the batch even if they were not saved - r.batchMtx.Lock() - r.batch = r.batch[len(sendBatch):] - r.batchMtx.Unlock() - - return nil -} - -// RequestCallbackFn receives the initialized request from the Collector before -// sending it over the wire. This allows one to plug in additional headers or -// do other customization. -type RequestCallbackFn func(*http.Request) - -// ReporterOption sets a parameter for the HTTP Reporter -type ReporterOption func(r *httpReporter) - -// Timeout sets maximum timeout for http request. -func Timeout(duration time.Duration) ReporterOption { - return func(r *httpReporter) { r.client.Timeout = duration } -} - -// BatchSize sets the maximum batch size, after which a collect will be -// triggered. The default batch size is 100 traces. -func BatchSize(n int) ReporterOption { - return func(r *httpReporter) { r.batchSize = n } -} - -// MaxBacklog sets the maximum backlog size. When batch size reaches this -// threshold, spans from the beginning of the batch will be disposed. -func MaxBacklog(n int) ReporterOption { - return func(r *httpReporter) { r.maxBacklog = n } -} - -// BatchInterval sets the maximum duration we will buffer traces before -// emitting them to the collector. The default batch interval is 1 second. -func BatchInterval(d time.Duration) ReporterOption { - return func(r *httpReporter) { r.batchInterval = d } -} - -// Client sets a custom http client to use. -func Client(client *http.Client) ReporterOption { - return func(r *httpReporter) { r.client = client } -} - -// RequestCallback registers a callback function to adjust the reporter -// *http.Request before it sends the request to Zipkin. -func RequestCallback(rc RequestCallbackFn) ReporterOption { - return func(r *httpReporter) { r.reqCallback = rc } -} - -// Logger sets the logger used to report errors in the collection -// process. -func Logger(l *log.Logger) ReporterOption { - return func(r *httpReporter) { r.logger = l } -} - -// Serializer sets the serialization function to use for sending span data to -// Zipkin. -func Serializer(serializer reporter.SpanSerializer) ReporterOption { - return func(r *httpReporter) { - if serializer != nil { - r.serializer = serializer - } - } -} - -// NewReporter returns a new HTTP Reporter. -// url should be the endpoint to send the spans to, e.g. -// http://localhost:9411/api/v2/spans -func NewReporter(url string, opts ...ReporterOption) reporter.Reporter { - r := httpReporter{ - url: url, - logger: log.New(os.Stderr, "", log.LstdFlags), - client: &http.Client{Timeout: defaultTimeout}, - batchInterval: defaultBatchInterval, - batchSize: defaultBatchSize, - maxBacklog: defaultMaxBacklog, - batch: []*model.SpanModel{}, - spanC: make(chan *model.SpanModel), - quit: make(chan struct{}, 1), - shutdown: make(chan error, 1), - sendMtx: &sync.Mutex{}, - batchMtx: &sync.Mutex{}, - serializer: reporter.JSONSerializer{}, - } - - for _, opt := range opts { - opt(&r) - } - - go r.loop() - - return &r -} diff --git a/vendor/github.com/openzipkin/zipkin-go/reporter/reporter.go b/vendor/github.com/openzipkin/zipkin-go/reporter/reporter.go deleted file mode 100644 index 921aff57560b..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/reporter/reporter.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package reporter holds the Reporter interface which is used by the Zipkin -Tracer to send finished spans. - -Subpackages of package reporter contain officially supported standard -reporter implementations. -*/ -package reporter - -import "github.com/openzipkin/zipkin-go/model" - -// Reporter interface can be used to provide the Zipkin Tracer with custom -// implementations to publish Zipkin Span data. -type Reporter interface { - Send(model.SpanModel) // Send Span data to the reporter - Close() error // Close the reporter -} - -type noopReporter struct{} - -func (r *noopReporter) Send(model.SpanModel) {} -func (r *noopReporter) Close() error { return nil } - -// NewNoopReporter returns a no-op Reporter implementation. -func NewNoopReporter() Reporter { - return &noopReporter{} -} diff --git a/vendor/github.com/openzipkin/zipkin-go/reporter/serializer.go b/vendor/github.com/openzipkin/zipkin-go/reporter/serializer.go deleted file mode 100644 index 6647e2b9f2cb..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/reporter/serializer.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package reporter - -import ( - "encoding/json" - - "github.com/openzipkin/zipkin-go/model" -) - -// SpanSerializer describes the methods needed for allowing to set Span encoding -// type for the various Zipkin transports. -type SpanSerializer interface { - Serialize([]*model.SpanModel) ([]byte, error) - ContentType() string -} - -// JSONSerializer implements the default JSON encoding SpanSerializer. -type JSONSerializer struct{} - -// Serialize takes an array of Zipkin SpanModel objects and returns a JSON -// encoding of it. -func (JSONSerializer) Serialize(spans []*model.SpanModel) ([]byte, error) { - return json.Marshal(spans) -} - -// ContentType returns the ContentType needed for this encoding. -func (JSONSerializer) ContentType() string { - return "application/json" -} diff --git a/vendor/github.com/openzipkin/zipkin-go/sample.go b/vendor/github.com/openzipkin/zipkin-go/sample.go deleted file mode 100644 index 6103c1384640..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/sample.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -import ( - "fmt" - "math" - "math/rand" - "sync" - "time" -) - -// Sampler functions return if a Zipkin span should be sampled, based on its -// traceID. -type Sampler func(id uint64) bool - -// NeverSample will always return false. If used by a service it will not allow -// the service to start traces but will still allow the service to participate -// in traces started upstream. -func NeverSample(_ uint64) bool { return false } - -// AlwaysSample will always return true. If used by a service it will always start -// traces if no upstream trace has been propagated. If an incoming upstream trace -// is not sampled the service will adhere to this and only propagate the context. -func AlwaysSample(_ uint64) bool { return true } - -// NewModuloSampler provides a generic type Sampler. -func NewModuloSampler(mod uint64) Sampler { - if mod < 2 { - return AlwaysSample - } - return func(id uint64) bool { - return (id % mod) == 0 - } -} - -// NewBoundarySampler is appropriate for high-traffic instrumentation who -// provision random trace ids, and make the sampling decision only once. -// It defends against nodes in the cluster selecting exactly the same ids. -func NewBoundarySampler(rate float64, salt int64) (Sampler, error) { - if rate == 0.0 { - return NeverSample, nil - } - if rate == 1.0 { - return AlwaysSample, nil - } - if rate < 0.0001 || rate > 1 { - return nil, fmt.Errorf("rate should be 0.0 or between 0.0001 and 1: was %f", rate) - } - - var ( - boundary = int64(rate * 10000) - usalt = uint64(salt) - ) - return func(id uint64) bool { - return int64(math.Abs(float64(id^usalt)))%10000 < boundary - }, nil -} - -// NewCountingSampler is appropriate for low-traffic instrumentation or -// those who do not provision random trace ids. It is not appropriate for -// collectors as the sampling decision isn't idempotent (consistent based -// on trace id). -func NewCountingSampler(rate float64) (Sampler, error) { - if rate == 0.0 { - return NeverSample, nil - } - if rate == 1.0 { - return AlwaysSample, nil - } - if rate < 0.01 || rate > 1 { - return nil, fmt.Errorf("rate should be 0.0 or between 0.01 and 1: was %f", rate) - } - var ( - i = 0 - outOf100 = int(rate*100 + math.Copysign(0.5, rate*100)) // for rounding float to int conversion instead of truncation - decisions = randomBitSet(100, outOf100, rand.New(rand.NewSource(time.Now().UnixNano()))) - mtx = &sync.Mutex{} - ) - - return func(_ uint64) bool { - mtx.Lock() - result := decisions[i] - i++ - if i == 100 { - i = 0 - } - mtx.Unlock() - return result - }, nil -} - -/** - * Reservoir sampling algorithm borrowed from Stack Overflow. - * - * http://stackoverflow.com/questions/12817946/generate-a-random-bitset-with-n-1s - */ -func randomBitSet(size int, cardinality int, rnd *rand.Rand) []bool { - result := make([]bool, size) - chosen := make([]int, cardinality) - var i int - for i = 0; i < cardinality; i++ { - chosen[i] = i - result[i] = true - } - for ; i < size; i++ { - j := rnd.Intn(i + 1) - if j < cardinality { - result[chosen[j]] = false - result[i] = true - chosen[j] = i - } - } - return result -} diff --git a/vendor/github.com/openzipkin/zipkin-go/span.go b/vendor/github.com/openzipkin/zipkin-go/span.go deleted file mode 100644 index a7101e1142a4..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/span.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -import ( - "time" - - "github.com/openzipkin/zipkin-go/model" -) - -// Span interface as returned by Tracer.StartSpan() -type Span interface { - // Context returns the Span's SpanContext. - Context() model.SpanContext - - // SetName updates the Span's name. - SetName(string) - - // SetRemoteEndpoint updates the Span's Remote Endpoint. - SetRemoteEndpoint(*model.Endpoint) - - // Annotate adds a timed event to the Span. - Annotate(time.Time, string) - - // Tag sets Tag with given key and value to the Span. If key already exists in - // the Span the value will be overridden except for error tags where the first - // value is persisted. - Tag(string, string) - - // Finish the Span and send to Reporter. If DelaySend option was used at - // Span creation time, Finish will not send the Span to the Reporter. It then - // becomes the user's responsibility to get the Span reported (by using - // span.Flush). - Finish() - - // Flush the Span to the Reporter (regardless of being finished or not). - // This can be used if the DelaySend SpanOption was set or when dealing with - // one-way RPC tracing where duration might not be measured. - Flush() -} diff --git a/vendor/github.com/openzipkin/zipkin-go/span_implementation.go b/vendor/github.com/openzipkin/zipkin-go/span_implementation.go deleted file mode 100644 index 72904a84f38e..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/span_implementation.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -import ( - "sync" - "sync/atomic" - "time" - - "github.com/openzipkin/zipkin-go/model" -) - -type spanImpl struct { - mtx sync.RWMutex - model.SpanModel - tracer *Tracer - mustCollect int32 // used as atomic bool (1 = true, 0 = false) - flushOnFinish bool -} - -func (s *spanImpl) Context() model.SpanContext { - return s.SpanContext -} - -func (s *spanImpl) SetName(name string) { - s.mtx.Lock() - s.Name = name - s.mtx.Unlock() -} - -func (s *spanImpl) SetRemoteEndpoint(e *model.Endpoint) { - s.mtx.Lock() - if e == nil { - s.RemoteEndpoint = nil - } else { - s.RemoteEndpoint = &model.Endpoint{} - *s.RemoteEndpoint = *e - } - s.mtx.Unlock() -} - -func (s *spanImpl) Annotate(t time.Time, value string) { - a := model.Annotation{ - Timestamp: t, - Value: value, - } - - s.mtx.Lock() - s.Annotations = append(s.Annotations, a) - s.mtx.Unlock() -} - -func (s *spanImpl) Tag(key, value string) { - s.mtx.Lock() - - if key == string(TagError) { - if _, found := s.Tags[key]; found { - s.mtx.Unlock() - return - } - } - - s.Tags[key] = value - s.mtx.Unlock() -} - -func (s *spanImpl) Finish() { - if atomic.CompareAndSwapInt32(&s.mustCollect, 1, 0) { - s.Duration = time.Since(s.Timestamp) - if s.flushOnFinish { - s.tracer.reporter.Send(s.SpanModel) - } - } -} - -func (s *spanImpl) FinishedWithDuration(d time.Duration) { - if atomic.CompareAndSwapInt32(&s.mustCollect, 1, 0) { - s.Duration = d - if s.flushOnFinish { - s.tracer.reporter.Send(s.SpanModel) - } - } -} - -func (s *spanImpl) Flush() { - if s.SpanModel.Debug || (s.SpanModel.Sampled != nil && *s.SpanModel.Sampled) { - s.tracer.reporter.Send(s.SpanModel) - } -} diff --git a/vendor/github.com/openzipkin/zipkin-go/span_options.go b/vendor/github.com/openzipkin/zipkin-go/span_options.go deleted file mode 100644 index 5ac60bf35b42..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/span_options.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -import ( - "time" - - "github.com/openzipkin/zipkin-go/model" -) - -// SpanOption allows for functional options to adjust behavior and payload of -// the Span to be created with tracer.StartSpan(). -type SpanOption func(t *Tracer, s *spanImpl) - -// Kind sets the kind of the span being created.. -func Kind(kind model.Kind) SpanOption { - return func(t *Tracer, s *spanImpl) { - s.Kind = kind - } -} - -// Parent will use provided SpanContext as parent to the span being created. -func Parent(sc model.SpanContext) SpanOption { - return func(t *Tracer, s *spanImpl) { - if sc.Err != nil { - // encountered an extraction error - switch t.extractFailurePolicy { - case ExtractFailurePolicyRestart: - case ExtractFailurePolicyError: - panic(s.SpanContext.Err) - case ExtractFailurePolicyTagAndRestart: - s.Tags["error.extract"] = sc.Err.Error() - default: - panic(ErrInvalidExtractFailurePolicy) - } - /* don't use provided SpanContext, but restart trace */ - return - } - s.SpanContext = sc - } -} - -// StartTime uses a given start time for the span being created. -func StartTime(start time.Time) SpanOption { - return func(t *Tracer, s *spanImpl) { - s.Timestamp = start - } -} - -// RemoteEndpoint sets the remote endpoint of the span being created. -func RemoteEndpoint(e *model.Endpoint) SpanOption { - return func(t *Tracer, s *spanImpl) { - s.RemoteEndpoint = e - } -} - -// Tags sets initial tags for the span being created. If default tracer tags -// are present they will be overwritten on key collisions. -func Tags(tags map[string]string) SpanOption { - return func(t *Tracer, s *spanImpl) { - for k, v := range tags { - s.Tags[k] = v - } - } -} - -// FlushOnFinish when set to false will disable span.Finish() to send the Span -// to the Reporter automatically (which is the default behavior). If set to -// false, having the Span be reported becomes the responsibility of the user. -// This is available if late tag data is expected to be only available after the -// required finish time of the Span. -func FlushOnFinish(b bool) SpanOption { - return func(t *Tracer, s *spanImpl) { - s.flushOnFinish = b - } -} diff --git a/vendor/github.com/openzipkin/zipkin-go/tags.go b/vendor/github.com/openzipkin/zipkin-go/tags.go deleted file mode 100644 index 650913c9ba69..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/tags.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -// Tag holds available types -type Tag string - -// Common Tag values -const ( - TagHTTPMethod Tag = "http.method" - TagHTTPPath Tag = "http.path" - TagHTTPUrl Tag = "http.url" - TagHTTPRoute Tag = "http.route" - TagHTTPStatusCode Tag = "http.status_code" - TagHTTPRequestSize Tag = "http.request.size" - TagHTTPResponseSize Tag = "http.response.size" - TagGRPCStatusCode Tag = "grpc.status_code" - TagSQLQuery Tag = "sql.query" - TagError Tag = "error" -) - -// Set a standard Tag with a payload on provided Span. -func (t Tag) Set(s Span, value string) { - s.Tag(string(t), value) -} diff --git a/vendor/github.com/openzipkin/zipkin-go/tracer.go b/vendor/github.com/openzipkin/zipkin-go/tracer.go deleted file mode 100644 index 0f294cf27740..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/tracer.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -import ( - "context" - "sync/atomic" - "time" - - "github.com/openzipkin/zipkin-go/idgenerator" - "github.com/openzipkin/zipkin-go/model" - "github.com/openzipkin/zipkin-go/propagation" - "github.com/openzipkin/zipkin-go/reporter" -) - -// Tracer is our Zipkin tracer implementation. It should be initialized using -// the NewTracer method. -type Tracer struct { - defaultTags map[string]string - extractFailurePolicy ExtractFailurePolicy - sampler Sampler - generate idgenerator.IDGenerator - reporter reporter.Reporter - localEndpoint *model.Endpoint - noop int32 // used as atomic bool (1 = true, 0 = false) - sharedSpans bool - unsampledNoop bool -} - -// NewTracer returns a new Zipkin Tracer. -func NewTracer(rep reporter.Reporter, opts ...TracerOption) (*Tracer, error) { - // set default tracer options - t := &Tracer{ - defaultTags: make(map[string]string), - extractFailurePolicy: ExtractFailurePolicyRestart, - sampler: AlwaysSample, - generate: idgenerator.NewRandom64(), - reporter: rep, - localEndpoint: nil, - noop: 0, - sharedSpans: true, - unsampledNoop: false, - } - - // if no reporter was provided we default to noop implementation. - if t.reporter == nil { - t.reporter = reporter.NewNoopReporter() - t.noop = 1 - } - - // process functional options - for _, opt := range opts { - if err := opt(t); err != nil { - return nil, err - } - } - - return t, nil -} - -// StartSpanFromContext creates and starts a span using the span found in -// context as parent. If no parent span is found a root span is created. -func (t *Tracer) StartSpanFromContext(ctx context.Context, name string, options ...SpanOption) (Span, context.Context) { - if parentSpan := SpanFromContext(ctx); parentSpan != nil { - options = append(options, Parent(parentSpan.Context())) - } - span := t.StartSpan(name, options...) - return span, NewContext(ctx, span) -} - -// StartSpan creates and starts a span. -func (t *Tracer) StartSpan(name string, options ...SpanOption) Span { - if atomic.LoadInt32(&t.noop) == 1 { - return &noopSpan{} - } - s := &spanImpl{ - SpanModel: model.SpanModel{ - Kind: model.Undetermined, - Name: name, - LocalEndpoint: t.localEndpoint, - Annotations: make([]model.Annotation, 0), - Tags: make(map[string]string), - }, - flushOnFinish: true, - tracer: t, - } - - // add default tracer tags to span - for k, v := range t.defaultTags { - s.Tag(k, v) - } - - // handle provided functional options - for _, option := range options { - option(t, s) - } - - if s.TraceID.Empty() { - // create root span - s.SpanContext.TraceID = t.generate.TraceID() - s.SpanContext.ID = t.generate.SpanID(s.SpanContext.TraceID) - } else { - // valid parent context found - if t.sharedSpans && s.Kind == model.Server { - // join span - s.Shared = true - } else { - // regular child span - parentID := s.SpanContext.ID - s.SpanContext.ParentID = &parentID - s.SpanContext.ID = t.generate.SpanID(model.TraceID{}) - } - } - - if !s.SpanContext.Debug && s.Sampled == nil { - // deferred sampled context found, invoke sampler - sampled := t.sampler(s.SpanContext.TraceID.Low) - s.SpanContext.Sampled = &sampled - if sampled { - s.mustCollect = 1 - } - } else { - if s.SpanContext.Debug || *s.Sampled { - s.mustCollect = 1 - } - } - - if t.unsampledNoop && s.mustCollect == 0 { - // trace not being sampled and noop requested - return &noopSpan{ - SpanContext: s.SpanContext, - } - } - - // add start time - if s.Timestamp.IsZero() { - s.Timestamp = time.Now() - } - - return s -} - -// Extract extracts a SpanContext using the provided Extractor function. -func (t *Tracer) Extract(extractor propagation.Extractor) (sc model.SpanContext) { - if atomic.LoadInt32(&t.noop) == 1 { - return - } - psc, err := extractor() - if psc != nil { - sc = *psc - } - sc.Err = err - return -} - -// SetNoop allows for killswitch behavior. If set to true the tracer will return -// noopSpans and all data is dropped. This allows operators to stop tracing in -// risk scenarios. Set back to false to resume tracing. -func (t *Tracer) SetNoop(noop bool) { - if noop { - atomic.CompareAndSwapInt32(&t.noop, 0, 1) - } else { - atomic.CompareAndSwapInt32(&t.noop, 1, 0) - } -} - -// LocalEndpoint returns a copy of the currently set local endpoint of the -// tracer instance. -func (t *Tracer) LocalEndpoint() *model.Endpoint { - if t.localEndpoint == nil { - return nil - } - ep := *t.localEndpoint - return &ep -} diff --git a/vendor/github.com/openzipkin/zipkin-go/tracer_options.go b/vendor/github.com/openzipkin/zipkin-go/tracer_options.go deleted file mode 100644 index 533c5e478ef1..000000000000 --- a/vendor/github.com/openzipkin/zipkin-go/tracer_options.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2019 The OpenZipkin Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -import ( - "errors" - - "github.com/openzipkin/zipkin-go/idgenerator" - "github.com/openzipkin/zipkin-go/model" -) - -// Tracer Option Errors -var ( - ErrInvalidEndpoint = errors.New("requires valid local endpoint") - ErrInvalidExtractFailurePolicy = errors.New("invalid extract failure policy provided") -) - -// ExtractFailurePolicy deals with Extraction errors -type ExtractFailurePolicy int - -// ExtractFailurePolicyOptions -const ( - ExtractFailurePolicyRestart ExtractFailurePolicy = iota - ExtractFailurePolicyError - ExtractFailurePolicyTagAndRestart -) - -// TracerOption allows for functional options to adjust behavior of the Tracer -// to be created with NewTracer(). -type TracerOption func(o *Tracer) error - -// WithLocalEndpoint sets the local endpoint of the tracer. -func WithLocalEndpoint(e *model.Endpoint) TracerOption { - return func(o *Tracer) error { - if e == nil { - o.localEndpoint = nil - return nil - } - ep := *e - o.localEndpoint = &ep - return nil - } -} - -// WithExtractFailurePolicy allows one to set the ExtractFailurePolicy. -func WithExtractFailurePolicy(p ExtractFailurePolicy) TracerOption { - return func(o *Tracer) error { - if p < 0 || p > ExtractFailurePolicyTagAndRestart { - return ErrInvalidExtractFailurePolicy - } - o.extractFailurePolicy = p - return nil - } -} - -// WithNoopSpan if set to true will switch to a NoopSpan implementation -// if the trace is not sampled. -func WithNoopSpan(unsampledNoop bool) TracerOption { - return func(o *Tracer) error { - o.unsampledNoop = unsampledNoop - return nil - } -} - -// WithSharedSpans allows to place client-side and server-side annotations -// for a RPC call in the same span (Zipkin V1 behavior) or different spans -// (more in line with other tracing solutions). By default this Tracer -// uses shared host spans (so client-side and server-side in the same span). -func WithSharedSpans(val bool) TracerOption { - return func(o *Tracer) error { - o.sharedSpans = val - return nil - } -} - -// WithSampler allows one to set a Sampler function -func WithSampler(sampler Sampler) TracerOption { - return func(o *Tracer) error { - o.sampler = sampler - return nil - } -} - -// WithTraceID128Bit if set to true will instruct the Tracer to start traces -// with 128 bit TraceID's. If set to false the Tracer will start traces with -// 64 bits. -func WithTraceID128Bit(val bool) TracerOption { - return func(o *Tracer) error { - if val { - o.generate = idgenerator.NewRandom128() - } else { - o.generate = idgenerator.NewRandom64() - } - return nil - } -} - -// WithIDGenerator allows one to set a custom ID Generator -func WithIDGenerator(generator idgenerator.IDGenerator) TracerOption { - return func(o *Tracer) error { - o.generate = generator - return nil - } -} - -// WithTags allows one to set default tags to be added to each created span -func WithTags(tags map[string]string) TracerOption { - return func(o *Tracer) error { - for k, v := range tags { - o.defaultTags[k] = v - } - return nil - } -} - -// WithNoopTracer allows one to start the Tracer as Noop implementation. -func WithNoopTracer(tracerNoop bool) TracerOption { - return func(o *Tracer) error { - if tracerNoop { - o.noop = 1 - } else { - o.noop = 0 - } - return nil - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md deleted file mode 100644 index ec4154f8b2b7..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md +++ /dev/null @@ -1,216 +0,0 @@ -Changes by Version -================== - -2.16.1 (unreleased) -------------------- - -- Nothing yet. - - -2.16.0 (2019-03-24) -------------------- - -- Add baggage to B3 codec (#319) -- Add support for 128bit trace ids to zipkin thrift spans. (#378) -- Update zipkin propagation logic to support 128bit traceIDs (#373) -- Accept "true" for the x-b3-sampled header (#356) - -- Allow setting of PoolSpans from Config object (#322) -- Make propagators public to allow wrapping (#379) -- Change default metric namespace to use relevant separator for the metric backend (#364) -- Change metrics prefix to jaeger_tracer and add descriptions (#346) -- Bump OpenTracing to ^1.1.x (#383) -- Upgrade jaeger-lib to v2.0.0 (#359) -- Avoid defer when generating random number (#358) -- Use a pool of rand.Source to reduce lock contention when creating span ids (#357) -- Make JAEGER_ENDPOINT take priority over JAEGER_AGENT_XXX (#342) - - -2.15.0 (2018-10-10) -------------------- - -- Fix FollowsFrom spans ignoring baggage/debug header from dummy parent context (#313) -- Make maximum annotation length configurable in tracer options (#318) -- Support more environment variables in configuration (#323) -- Print error on Sampler Query failure (#328) -- Add an HTTPOption to support custom http.RoundTripper (#333) -- Return an error when an HTTP error code is seen in zipkin HTTP transport (#331) - - -2.14.0 (2018-04-30) -------------------- - -- Support throttling for debug traces (#274) -- Remove dependency on Apache Thrift (#303) -- Remove dependency on tchannel (#295) (#294) -- Test with Go 1.9 (#298) - - -2.13.0 (2018-04-15) -------------------- - -- Use value receiver for config.NewTracer() (#283) -- Lock span during jaeger thrift conversion (#273) -- Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260) -- Added support for client configuration via env vars (#275) -- Allow overriding sampler in the Config (#270) - - -2.12.0 (2018-03-14) -------------------- - -- Use lock when retrieving span.Context() (#268) -- Add Configuration support for custom Injector and Extractor (#263) - - -2.11.2 (2018-01-12) -------------------- - -- Add Gopkg.toml to allow using the lib with `dep` - - -2.11.1 (2018-01-03) -------------------- - -- Do not enqueue spans after Reporter is closed (#235, #245) -- Change default flush interval to 1sec (#243) - - -2.11.0 (2017-11-27) -------------------- - -- Normalize metric names and tags to be compatible with Prometheus (#222) - - -2.10.0 (2017-11-14) -------------------- - -- Support custom tracing headers (#176) -- Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182) -- Do not coerce baggage keys to lower case (#196) -- Log span name when span cannot be reported (#198) -- Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219) - - -2.9.0 (2017-07-29) ------------------- - -- Pin thrift <= 0.10 (#179) -- Introduce a parallel interface ContribObserver (#159) - - -2.8.0 (2017-07-05) ------------------- - -- Drop `jaeger.` prefix from `jaeger.hostname` process-level tag -- Add options to set tracer tags - - -2.7.0 (2017-06-21) ------------------- - -- Fix rate limiter balance [#135](https://github.com/uber/jaeger-client-go/pull/135) [#140](https://github.com/uber/jaeger-client-go/pull/140) -- Default client to send Jaeger.thrift [#147](https://github.com/uber/jaeger-client-go/pull/147) -- Save baggage in span [#153](https://github.com/uber/jaeger-client-go/pull/153) -- Move reporter.queueLength to the top of the struct to guarantee 64bit alignment [#158](https://github.com/uber/jaeger-client-go/pull/158) -- Support HTTP transport with jaeger.thrift [#161](https://github.com/uber/jaeger-client-go/pull/161) - - -2.6.0 (2017-03-28) ------------------- - -- Add config option to initialize RPC Metrics feature - - -2.5.0 (2017-03-23) ------------------- - -- Split request latency metric by success/failure [#123](https://github.com/uber/jaeger-client-go/pull/123) -- Add mutex to adaptive sampler and fix race condition [#124](https://github.com/uber/jaeger-client-go/pull/124) -- Fix rate limiter panic [#125](https://github.com/uber/jaeger-client-go/pull/125) - - -2.4.0 (2017-03-21) ------------------- - -- Remove `_ms` suffix from request latency metric name [#121](https://github.com/uber/jaeger-client-go/pull/121) -- Rename all metrics to "request" and "http_request" and use tags for other dimensions [#121](https://github.com/uber/jaeger-client-go/pull/121) - - -2.3.0 (2017-03-20) ------------------- - -- Make Span type public to allow access to non-std methods for testing [#117](https://github.com/uber/jaeger-client-go/pull/117) -- Add a structured way to extract traces for logging with zap [#118](https://github.com/uber/jaeger-client-go/pull/118) - - -2.2.1 (2017-03-14) ------------------- - -- Fix panic caused by updating the remote sampler from adaptive sampler to any other sampler type (https://github.com/uber/jaeger-client-go/pull/111) - - -2.2.0 (2017-03-10) ------------------- - -- Introduce Observer and SpanObserver (https://github.com/uber/jaeger-client-go/pull/94) -- Add RPC metrics emitter as Observer/SpanObserver (https://github.com/uber/jaeger-client-go/pull/103) - - -2.1.2 (2017-02-27) -------------------- - -- Fix leaky bucket bug (https://github.com/uber/jaeger-client-go/pull/99) -- Fix zap logger Infof (https://github.com/uber/jaeger-client-go/pull/100) -- Add tracer initialization godoc examples - - -2.1.1 (2017-02-21) -------------------- - -- Fix inefficient usage of zap.Logger - - -2.1.0 (2017-02-17) -------------------- - -- Add adapter for zap.Logger (https://github.com/uber-go/zap) -- Move logging API to ./log/ package - - -2.0.0 (2017-02-08) -------------------- - -- Support Adaptive Sampling -- Support 128bit Trace IDs -- Change trace/span IDs from uint64 to strong types TraceID and SpanID -- Add Zipkin HTTP B3 Propagation format support #72 -- Rip out existing metrics and use github.com/uber/jaeger-lib/metrics -- Change API for tracer, reporter, sampler initialization - - -1.6.0 (2016-10-14) -------------------- - -- Add Zipkin HTTP transport -- Support external baggage via jaeger-baggage header -- Unpin Thrift version, keep to master - - -1.5.1 (2016-09-27) -------------------- - -- Relax dependency on opentracing to ^1 - - -1.5.0 (2016-09-27) -------------------- - -- Upgrade to opentracing-go 1.0 -- Support KV logging for Spans - - -1.4.0 (2016-09-14) -------------------- - -- Support debug traces via HTTP header "jaeger-debug-id" diff --git a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md deleted file mode 100644 index 7cf014a51edb..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md +++ /dev/null @@ -1,170 +0,0 @@ -# How to Contribute to Jaeger - -We'd love your help! - -Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub -pull requests. This document outlines some of the conventions on development -workflow, commit message formatting, contact points and other resources to make -it easier to get your contribution accepted. - -We gratefully welcome improvements to documentation as well as to code. - -# Certificate of Origin - -By contributing to this project you agree to the [Developer Certificate of -Origin](https://developercertificate.org/) (DCO). This document was created -by the Linux Kernel community and is a simple statement that you, as a -contributor, have the legal right to make the contribution. See the [DCO](DCO) -file for details. - -## Getting Started - -This library uses [glide](https://github.com/Masterminds/glide) to manage dependencies. - -To get started, make sure you clone the Git repository into the correct location -`github.com/uber/jaeger-client-go` relative to `$GOPATH`: - -``` -mkdir -p $GOPATH/src/github.com/uber -cd $GOPATH/src/github.com/uber -git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go -cd jaeger-client-go -``` - -Then install dependencies and run the tests: - -``` -git submodule update --init --recursive -glide install -make test -``` - -## Imports grouping - -This projects follows the following pattern for grouping imports in Go files: - * imports from standard library - * imports from other projects - * imports from `jaeger-client-go` project - -For example: - -```go -import ( - "fmt" - - "github.com/uber/jaeger-lib/metrics" - "go.uber.org/zap" - - "github.com/uber/jaeger-client-go/config" -) -``` - -## Making A Change - -*Before making any significant changes, please [open an -issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed -changes ahead of time will make the contribution process smooth for everyone. - -Once we've discussed your changes and you've got your code ready, make sure -that tests are passing (`make test` or `make cover`) and open your PR. Your -pull request is most likely to be accepted if it: - -* Includes tests for new functionality. -* Follows the guidelines in [Effective - Go](https://golang.org/doc/effective_go.html) and the [Go team's common code - review comments](https://github.com/golang/go/wiki/CodeReviewComments). -* Has a [good commit message](https://chris.beams.io/posts/git-commit/): - * Separate subject from body with a blank line - * Limit the subject line to 50 characters - * Capitalize the subject line - * Do not end the subject line with a period - * Use the imperative mood in the subject line - * Wrap the body at 72 characters - * Use the body to explain _what_ and _why_ instead of _how_ -* Each commit must be signed by the author ([see below](#sign-your-work)). - -## License - -By contributing your code, you agree to license your contribution under the terms -of the [Apache License](LICENSE). - -If you are adding a new file it should have a header like below. The easiest -way to add such header is to run `make fmt`. - -``` -// Copyright (c) 2017 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -``` - -## Sign your work - -The sign-off is a simple line at the end of the explanation for the -patch, which certifies that you wrote it or otherwise have the right to -pass it on as an open-source patch. The rules are pretty simple: if you -can certify the below (from -[developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -using your real name (sorry, no pseudonyms or anonymous contributions.) - -You can add the sign off when creating the git commit via `git commit -s`. - -If you want this to be automatic you can set up some aliases: - -``` -git config --add alias.amend "commit -s --amend" -git config --add alias.c "commit -s" -``` diff --git a/vendor/github.com/uber/jaeger-client-go/DCO b/vendor/github.com/uber/jaeger-client-go/DCO deleted file mode 100644 index 068953d4bd98..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/DCO +++ /dev/null @@ -1,37 +0,0 @@ -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. - diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock deleted file mode 100644 index 55d9ac030c9b..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock +++ /dev/null @@ -1,223 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "UT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - branch = "master" - digest = "1:4c4c33075b704791d6a7f09dfb55c66769e8a1dc6adf87026292d274fe8ad113" - name = "github.com/codahale/hdrhistogram" - packages = ["."] - pruneopts = "UT" - revision = "3a0bb77429bd3a61596f5e8a3172445844342120" - -[[projects]] - branch = "master" - digest = "1:a382acd6150713655ded76ab5fbcbc7924a7808dab4312dda5d1f23dd8ce5277" - name = "github.com/crossdock/crossdock-go" - packages = [ - ".", - "assert", - "require", - ] - pruneopts = "UT" - revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:318f1c959a8a740366fce4b1e1eb2fd914036b4af58fbd0a003349b305f118ad" - name = "github.com/golang/protobuf" - packages = ["proto"] - pruneopts = "UT" - revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" - version = "v1.3.1" - -[[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:727b8f567a30d0739d6c26b9472b3422b351c93cf62095164c845a54b16fc18e" - name = "github.com/opentracing/opentracing-go" - packages = [ - ".", - "ext", - "harness", - "log", - ] - pruneopts = "UT" - revision = "659c90643e714681897ec2521c60567dd21da733" - version = "v1.1.0" - -[[projects]] - digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "UT" - revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" - version = "v0.8.1" - -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "UT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:b6221ec0f8903b556e127c449e7106b63e6867170c2d10a7c058623d086f2081" - name = "github.com/prometheus/client_golang" - packages = ["prometheus"] - pruneopts = "UT" - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "UT" - revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" - -[[projects]] - digest = "1:35cf6bdf68db765988baa9c4f10cc5d7dda1126a54bd62e252dbcd0b1fc8da90" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "UT" - revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250" - version = "v0.2.0" - -[[projects]] - branch = "master" - digest = "1:c31163bd62461e0c5f7ddc7363e39ef8d9e929693e77b5c11c709b05f9cb9219" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "iostats", - "nfs", - "xfs", - ] - pruneopts = "UT" - revision = "55ae3d9d557340b5bc24cd8aa5f6fa2c2ab31352" - -[[projects]] - digest = "1:8ff03ccc603abb0d7cce94d34b613f5f6251a9e1931eba1a3f9888a9029b055c" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require", - "suite", - ] - pruneopts = "UT" - revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053" - version = "v1.3.0" - -[[projects]] - digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d" - name = "github.com/uber-go/atomic" - packages = ["."] - pruneopts = "UT" - revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" - version = "v1.3.2" - -[[projects]] - digest = "1:f5c5ad1e08141e18aee1b9c37729d93d06805840421ccfc9d407787ffe969ce6" - name = "github.com/uber/jaeger-lib" - packages = [ - "metrics", - "metrics/metricstest", - "metrics/prometheus", - ] - pruneopts = "UT" - revision = "0e30338a695636fe5bcf7301e8030ce8dd2a8530" - version = "v2.0.0" - -[[projects]] - digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d" - name = "go.uber.org/atomic" - packages = ["."] - pruneopts = "UT" - revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" - version = "v1.3.2" - -[[projects]] - digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" - name = "go.uber.org/multierr" - packages = ["."] - pruneopts = "UT" - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" - -[[projects]] - digest = "1:c52caf7bd44f92e54627a31b85baf06a68333a196b3d8d241480a774733dcf8b" - name = "go.uber.org/zap" - packages = [ - ".", - "buffer", - "internal/bufferpool", - "internal/color", - "internal/exit", - "zapcore", - ] - pruneopts = "UT" - revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" - version = "v1.9.1" - -[[projects]] - branch = "master" - digest = "1:f8b491a7c25030a895a0e579742d07136e6958e77ef2d46e769db8eec4e58fcd" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - ] - pruneopts = "UT" - revision = "addf6b3196f61cd44ce5a76657913698c73479d0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/crossdock/crossdock-go", - "github.com/opentracing/opentracing-go", - "github.com/opentracing/opentracing-go/ext", - "github.com/opentracing/opentracing-go/harness", - "github.com/opentracing/opentracing-go/log", - "github.com/pkg/errors", - "github.com/prometheus/client_golang/prometheus", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/require", - "github.com/stretchr/testify/suite", - "github.com/uber-go/atomic", - "github.com/uber/jaeger-lib/metrics", - "github.com/uber/jaeger-lib/metrics/metricstest", - "github.com/uber/jaeger-lib/metrics/prometheus", - "go.uber.org/zap", - "go.uber.org/zap/zapcore", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml deleted file mode 100644 index 067f15a9247d..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml +++ /dev/null @@ -1,31 +0,0 @@ -[[constraint]] - name = "github.com/crossdock/crossdock-go" - branch = "master" - -[[constraint]] - name = "github.com/opentracing/opentracing-go" - version = "^1.1" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "0.8.0" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "^1.1.3" - -[[constraint]] - name = "github.com/uber-go/atomic" - version = "^1" - -[[constraint]] - name = "github.com/uber/jaeger-lib" - version = "^2.0" - -[[constraint]] - name = "go.uber.org/zap" - version = "^1" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/github.com/uber/jaeger-client-go/LICENSE b/vendor/github.com/uber/jaeger-client-go/LICENSE deleted file mode 100644 index 261eeb9e9f8b..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile deleted file mode 100644 index 1b10c0964c70..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/Makefile +++ /dev/null @@ -1,123 +0,0 @@ -PROJECT_ROOT=github.com/uber/jaeger-client-go -PACKAGES := $(shell glide novendor | grep -v -e ./thrift-gen/... -e ./thrift/...) -# all .go files that don't exist in hidden directories -ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \ - -e ".*/\..*" \ - -e ".*/_.*" \ - -e ".*/mocks.*") - --include crossdock/rules.mk - -export GO15VENDOREXPERIMENT=1 - -RACE=-race -GOTEST=go test -v $(RACE) -GOLINT=golint -GOVET=go vet -GOFMT=gofmt -FMT_LOG=fmt.log -LINT_LOG=lint.log - -THRIFT_VER=0.9.3 -THRIFT_IMG=thrift:$(THRIFT_VER) -THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift -THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift" -THRIFT_GEN_DIR=thrift-gen - -PASS=$(shell printf "\033[32mPASS\033[0m") -FAIL=$(shell printf "\033[31mFAIL\033[0m") -COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/'' - -.DEFAULT_GOAL := test-and-lint - -.PHONY: test-and-lint -test-and-lint: test fmt lint - -.PHONY: test -test: -ifeq ($(USE_DEP),true) - dep check -endif - bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)" - -.PHONY: fmt -fmt: - $(GOFMT) -e -s -l -w $(ALL_SRC) - ./scripts/updateLicenses.sh - -.PHONY: lint -lint: - $(GOVET) $(PACKAGES) - @cat /dev/null > $(LINT_LOG) - @$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;) - @[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false) - @$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG) - ./scripts/updateLicenses.sh >> $(FMT_LOG) - @[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false) - - -.PHONY: install -install: - glide --version || go get github.com/Masterminds/glide -ifeq ($(USE_DEP),true) - dep ensure -else - glide install -endif - - -.PHONY: cover -cover: - ./scripts/cover.sh $(shell go list $(PACKAGES)) - go tool cover -html=cover.out -o cover.html - - -# This is not part of the regular test target because we don't want to slow it -# down. -.PHONY: test-examples -test-examples: - make -C examples - -# TODO at the moment we're not generating tchan_*.go files -thrift: idl-submodule thrift-image - $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift - $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift - $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift - $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift - $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift - $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift - sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go - sed -i '' 's|"jaeger"|"$(PROJECT_ROOT)/thrift-gen/jaeger"|g' $(THRIFT_GEN_DIR)/agent/*.go - sed -i '' 's|"github.com/apache/thrift/lib/go/thrift"|"github.com/uber/jaeger-client-go/thrift"|g' \ - $(THRIFT_GEN_DIR)/*/*.go crossdock/thrift/tracetest/*.go - rm -rf thrift-gen/*/*-remote - rm -rf crossdock/thrift/*/*-remote - rm -rf thrift-gen/jaeger/collector.go - -idl-submodule: - git submodule init - git submodule update - -thrift-image: - $(THRIFT) -version - -.PHONY: install-dep-ci -install-dep-ci: - - curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $$GOPATH/bin/dep - - chmod +x $$GOPATH/bin/dep - -.PHONY: install-ci -install-ci: install-dep-ci install - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover - go get golang.org/x/lint/golint - -.PHONY: test-ci -test-ci: - @./scripts/cover.sh $(shell go list $(PACKAGES)) -ifeq ($(CI_SKIP_LINT),true) - echo 'skipping lint' -else - make lint -endif diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md deleted file mode 100644 index 6d9546e5b3d7..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/README.md +++ /dev/null @@ -1,270 +0,0 @@ -[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url] - -# Jaeger Bindings for Go OpenTracing API - -Instrumentation library that implements an -[OpenTracing](http://opentracing.io) Tracer for Jaeger (https://jaegertracing.io). - -**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release. - * :white_check_mark: `import "github.com/uber/jaeger-client-go"` - * :x: `import "github.com/jaegertracing/jaeger-client-go"` - -## How to Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md). - -## Installation - -We recommended using a dependency manager like [glide](https://github.com/Masterminds/glide) -and [semantic versioning](http://semver.org/) when including this library into an application. -For example, Jaeger backend imports this library like this: - -```yaml -- package: github.com/uber/jaeger-client-go - version: ^2.7.0 -``` - -If you instead want to use the latest version in `master`, you can pull it via `go get`. -Note that during `go get` you may see build errors due to incompatible dependencies, which is why -we recommend using semantic versions for dependencies. The error may be fixed by running -`make install` (it will install `glide` if you don't have it): - -```shell -go get -u github.com/uber/jaeger-client-go/ -cd $GOPATH/src/github.com/uber/jaeger-client-go/ -git submodule update --init --recursive -make install -``` - -## Initialization - -See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples) -and [config/example_test.go](./config/example_test.go). - -### Environment variables - -The tracer can be initialized with values coming from environment variables. None of the env vars are required -and all of them can be overriden via direct setting of the property on the configuration object. - -Property| Description ---- | --- -JAEGER_SERVICE_NAME | The service name -JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP -JAEGER_AGENT_PORT | The port for communicating with agent via UDP -JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces -JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint -JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint -JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans -JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size -JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. "500ms" or "2s" ([valid units][timeunits]) -JAEGER_SAMPLER_TYPE | The sampler type -JAEGER_SAMPLER_PARAM | The sampler parameter (number) -JAEGER_SAMPLER_MANAGER_HOST_PORT | The HTTP endpoint when using the remote sampler, i.e. http://jaeger-agent:5778/sampling -JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of -JAEGER_SAMPLER_REFRESH_INTERVAL | How often the remotely controlled sampler will poll jaeger-agent for the appropriate sampling strategy, with units, e.g. "1m" or "30s" ([valid units][timeunits]) -JAEGER_TAGS | A comma separated list of `name = value` tracer level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:default}`, where the `:default` is optional, and identifies a value to be used if the environment variable cannot be found -JAEGER_DISABLED | Whether the tracer is disabled or not. If true, the default `opentracing.NoopTracer` is used. -JAEGER_RPC_METRICS | Whether to store RPC metrics - -By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and -`JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces -to the endpoint via `HTTP`, making the `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT` unused. If `JAEGER_ENDPOINT` is -secured, HTTP basic authentication can be performed by setting the `JAEGER_USER` and `JAEGER_PASSWORD` environment -variables. - -### Closing the tracer via `io.Closer` - -The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance. -It is recommended to structure your `main()` so that it calls the `Close()` function on the closer -before exiting, e.g. - -```go -tracer, closer, err := cfg.NewTracer(...) -defer closer.Close() -``` - -This is especially useful for command-line tools that enable tracing, as well as -for the long-running apps that support graceful shutdown. For example, if your deployment -system sends SIGTERM instead of killing the process and you trap that signal to do a graceful -exit, then having `defer closer.Closer()` ensures that all buffered spans are flushed. - -### Metrics & Monitoring - -The tracer emits a number of different metrics, defined in -[metrics.go](metrics.go). The monitoring backend is expected to support -tag-based metric names, e.g. instead of `statsd`-style string names -like `counters.my-service.jaeger.spans.started.sampled`, the metrics -are defined by a short name and a collection of key/value tags, for -example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go) -file for the full list and descriptions of emitted metrics. - -The monitoring backend is represented by the `metrics.Factory` interface from package -[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics). An implementation -of that interface can be passed as an option to either the Configuration object or the Tracer -constructor, for example: - -```go -import ( - "github.com/uber/jaeger-client-go/config" - "github.com/uber/jaeger-lib/metrics/prometheus" -) - - metricsFactory := prometheus.New() - tracer, closer, err := config.Configuration{ - ServiceName: "your-service-name", - }.NewTracer( - config.Metrics(metricsFactory), - ) -``` - -By default, a no-op `metrics.NullFactory` is used. - -### Logging - -The tracer can be configured with an optional logger, which will be -used to log communication errors, or log spans if a logging reporter -option is specified in the configuration. The logging API is abstracted -by the [Logger](logger.go) interface. A logger instance implementing -this interface can be set on the `Config` object before calling the -`New` method. - -Besides the [zap](https://github.com/uber-go/zap) implementation -bundled with this package there is also a [go-kit](https://github.com/go-kit/kit) -one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository. - -## Instrumentation for Tracing - -Since this tracer is fully compliant with OpenTracing API 1.0, -all code instrumentation should only use the API itself, as described -in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation. - -## Features - -### Reporters - -A "reporter" is a component that receives the finished spans and reports -them to somewhere. Under normal circumstances, the Tracer -should use the default `RemoteReporter`, which sends the spans out of -process via configurable "transport". For testing purposes, one can -use an `InMemoryReporter` that accumulates spans in a buffer and -allows to retrieve them for later verification. Also available are -`NullReporter`, a no-op reporter that does nothing, a `LoggingReporter` -which logs all finished spans using their `String()` method, and a -`CompositeReporter` that can be used to combine more than one reporter -into one, e.g. to attach a logging reporter to the main remote reporter. - -### Span Reporting Transports - -The remote reporter uses "transports" to actually send the spans out -of process. Currently the supported transports include: - * [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP, - * [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP. - -### Sampling - -The tracer does not record all spans, but only those that have the -sampling bit set in the `flags`. When a new trace is started and a new -unique ID is generated, a sampling decision is made whether this trace -should be sampled. The sampling decision is propagated to all downstream -calls via the `flags` field of the trace context. The following samplers -are available: - 1. `RemotelyControlledSampler` uses one of the other simpler samplers - and periodically updates it by polling an external server. This - allows dynamic control of the sampling strategies. - 1. `ConstSampler` always makes the same sampling decision for all - trace IDs. it can be configured to either sample all traces, or - to sample none. - 1. `ProbabilisticSampler` uses a fixed sampling rate as a probability - for a given trace to be sampled. The actual decision is made by - comparing the trace ID with a random number multiplied by the - sampling rate. - 1. `RateLimitingSampler` can be used to allow only a certain fixed - number of traces to be sampled per second. - -### Baggage Injection - -The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added -to the span context and propagated throughout the trace. An external process can inject baggage -by setting the special HTTP Header `jaeger-baggage` on a request: - -```sh -curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com -``` - -Baggage can also be programatically set inside your service: - -```go -if span := opentracing.SpanFromContext(ctx); span != nil { - span.SetBaggageItem("key", "value") -} -``` - -Another service downstream of that can retrieve the baggage in a similar way: - -```go -if span := opentracing.SpanFromContext(ctx); span != nil { - val := span.BaggageItem("key") - println(val) -} -``` - -### Debug Traces (Forced Sampling) - -#### Programmatically - -The OpenTracing API defines a `sampling.priority` standard tag that -can be used to affect the sampling of a span and its children: - -```go -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" -) - -span := opentracing.SpanFromContext(ctx) -ext.SamplingPriority.Set(span, 1) -``` - -#### Via HTTP Headers - -Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`, -which can be set in the incoming request, e.g. - -```sh -curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com -``` - -When Jaeger sees this header in the request that otherwise has no -tracing context, it ensures that the new trace started for this -request will be sampled in the "debug" mode (meaning it should survive -all downsampling that might happen in the collection pipeline), and the -root span will have a tag as if this statement was executed: - -```go -span.SetTag("jaeger-debug-id", "some-correlation-id") -``` - -This allows using Jaeger UI to find the trace by this tag. - -### Zipkin HTTP B3 compatible header propagation - -Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used -by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin). - -However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up. - -## License - -[Apache 2.0 License](LICENSE). - - -[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg -[doc]: https://godoc.org/github.com/uber/jaeger-client-go -[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master -[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go -[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go -[ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg -[ot-url]: http://opentracing.io -[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item -[timeunits]: https://golang.org/pkg/time/#ParseDuration diff --git a/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/vendor/github.com/uber/jaeger-client-go/RELEASE.md deleted file mode 100644 index 115e49ab8ad3..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/RELEASE.md +++ /dev/null @@ -1,11 +0,0 @@ -# Release Process - -1. Create a PR "Preparing for release X.Y.Z" against master branch - * Alter CHANGELOG.md from ` (unreleased)` to ` (YYYY-MM-DD)` - * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z` -2. Create a release "Release X.Y.Z" on Github - * Create Tag `vX.Y.Z` - * Copy CHANGELOG.md into the release notes -3. Create a PR "Back to development" against master branch - * Add ` (unreleased)` to CHANGELOG.md - * Update `JaegerClientVersion` in constants.go to `Go-dev` diff --git a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go deleted file mode 100644 index 1037ca0e861d..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "github.com/opentracing/opentracing-go/log" - - "github.com/uber/jaeger-client-go/internal/baggage" -) - -// baggageSetter is an actor that can set a baggage value on a Span given certain -// restrictions (eg. maxValueLength). -type baggageSetter struct { - restrictionManager baggage.RestrictionManager - metrics *Metrics -} - -func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter { - return &baggageSetter{ - restrictionManager: restrictionManager, - metrics: metrics, - } -} - -// (NB) span should hold the lock before making this call -func (s *baggageSetter) setBaggage(span *Span, key, value string) { - var truncated bool - var prevItem string - restriction := s.restrictionManager.GetRestriction(span.serviceName(), key) - if !restriction.KeyAllowed() { - s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed()) - s.metrics.BaggageUpdateFailure.Inc(1) - return - } - if len(value) > restriction.MaxValueLength() { - truncated = true - value = value[:restriction.MaxValueLength()] - s.metrics.BaggageTruncate.Inc(1) - } - prevItem = span.context.baggage[key] - s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed()) - span.context = span.context.WithBaggageItem(key, value) - s.metrics.BaggageUpdateSuccess.Inc(1) -} - -func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) { - if !span.context.IsSampled() { - return - } - fields := []log.Field{ - log.String("event", "baggage"), - log.String("key", key), - log.String("value", value), - } - if prevItem != "" { - fields = append(fields, log.String("override", "true")) - } - if truncated { - fields = append(fields, log.String("truncated", "true")) - } - if !valid { - fields = append(fields, log.String("invalid", "true")) - } - span.logFieldsNoLocking(fields...) -} diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go deleted file mode 100644 index 3201250874b0..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/config/config.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright (c) 2017-2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "errors" - "fmt" - "io" - "strings" - "time" - - "github.com/opentracing/opentracing-go" - - "github.com/uber/jaeger-client-go" - "github.com/uber/jaeger-client-go/internal/baggage/remote" - throttler "github.com/uber/jaeger-client-go/internal/throttler/remote" - "github.com/uber/jaeger-client-go/rpcmetrics" - "github.com/uber/jaeger-client-go/transport" - "github.com/uber/jaeger-lib/metrics" -) - -const defaultSamplingProbability = 0.001 - -// Configuration configures and creates Jaeger Tracer -type Configuration struct { - // ServiceName specifies the service name to use on the tracer. - // Can be provided via environment variable named JAEGER_SERVICE_NAME - ServiceName string `yaml:"serviceName"` - - // Disabled can be provided via environment variable named JAEGER_DISABLED - Disabled bool `yaml:"disabled"` - - // RPCMetrics can be provided via environment variable named JAEGER_RPC_METRICS - RPCMetrics bool `yaml:"rpc_metrics"` - - // Tags can be provided via environment variable named JAEGER_TAGS - Tags []opentracing.Tag `yaml:"tags"` - - Sampler *SamplerConfig `yaml:"sampler"` - Reporter *ReporterConfig `yaml:"reporter"` - Headers *jaeger.HeadersConfig `yaml:"headers"` - BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"` - Throttler *ThrottlerConfig `yaml:"throttler"` -} - -// SamplerConfig allows initializing a non-default sampler. All fields are optional. -type SamplerConfig struct { - // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote - // Can be set by exporting an environment variable named JAEGER_SAMPLER_TYPE - Type string `yaml:"type"` - - // Param is a value passed to the sampler. - // Valid values for Param field are: - // - for "const" sampler, 0 or 1 for always false/true respectively - // - for "probabilistic" sampler, a probability between 0 and 1 - // - for "rateLimiting" sampler, the number of spans per second - // - for "remote" sampler, param is the same as for "probabilistic" - // and indicates the initial sampling rate before the actual one - // is received from the mothership. - // Can be set by exporting an environment variable named JAEGER_SAMPLER_PARAM - Param float64 `yaml:"param"` - - // SamplingServerURL is the address of jaeger-agent's HTTP sampling server - // Can be set by exporting an environment variable named JAEGER_SAMPLER_MANAGER_HOST_PORT - SamplingServerURL string `yaml:"samplingServerURL"` - - // MaxOperations is the maximum number of operations that the sampler - // will keep track of. If an operation is not tracked, a default probabilistic - // sampler will be used rather than the per operation specific sampler. - // Can be set by exporting an environment variable named JAEGER_SAMPLER_MAX_OPERATIONS - MaxOperations int `yaml:"maxOperations"` - - // SamplingRefreshInterval controls how often the remotely controlled sampler will poll - // jaeger-agent for the appropriate sampling strategy. - // Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL - SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"` -} - -// ReporterConfig configures the reporter. All fields are optional. -type ReporterConfig struct { - // QueueSize controls how many spans the reporter can keep in memory before it starts dropping - // new spans. The queue is continuously drained by a background go-routine, as fast as spans - // can be sent out of process. - // Can be set by exporting an environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE - QueueSize int `yaml:"queueSize"` - - // BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full. - // It is generally not useful, as it only matters for very low traffic services. - // Can be set by exporting an environment variable named JAEGER_REPORTER_FLUSH_INTERVAL - BufferFlushInterval time.Duration - - // LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter - // and logs all submitted spans. Main Configuration.Logger must be initialized in the code - // for this option to have any effect. - // Can be set by exporting an environment variable named JAEGER_REPORTER_LOG_SPANS - LogSpans bool `yaml:"logSpans"` - - // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address - // Can be set by exporting an environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT - LocalAgentHostPort string `yaml:"localAgentHostPort"` - - // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL - // Can be set by exporting an environment variable named JAEGER_ENDPOINT - CollectorEndpoint string `yaml:"collectorEndpoint"` - - // User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector. - // Can be set by exporting an environment variable named JAEGER_USER - User string `yaml:"user"` - - // Password instructs reporter to include a password for basic http authentication when sending spans to - // jaeger-collector. Can be set by exporting an environment variable named JAEGER_PASSWORD - Password string `yaml:"password"` -} - -// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist -// certain baggage keys. All fields are optional. -type BaggageRestrictionsConfig struct { - // DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction - // manager. If true, the manager will not allow any baggage to be written until baggage restrictions have - // been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage - // restrictions have been retrieved from jaeger-agent. - DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"` - - // HostPort is the hostPort of jaeger-agent's baggage restrictions server - HostPort string `yaml:"hostPort"` - - // RefreshInterval controls how often the baggage restriction manager will poll - // jaeger-agent for the most recent baggage restrictions. - RefreshInterval time.Duration `yaml:"refreshInterval"` -} - -// ThrottlerConfig configures the throttler which can be used to throttle the -// rate at which the client may send debug requests. -type ThrottlerConfig struct { - // HostPort of jaeger-agent's credit server. - HostPort string `yaml:"hostPort"` - - // RefreshInterval controls how often the throttler will poll jaeger-agent - // for more throttling credits. - RefreshInterval time.Duration `yaml:"refreshInterval"` - - // SynchronousInitialization determines whether or not the throttler should - // synchronously fetch credits from the agent when an operation is seen for - // the first time. This should be set to true if the client will be used by - // a short lived service that needs to ensure that credits are fetched - // upfront such that sampling or throttling occurs. - SynchronousInitialization bool `yaml:"synchronousInitialization"` -} - -type nullCloser struct{} - -func (*nullCloser) Close() error { return nil } - -// New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers -// before shutdown. -// -// Deprecated: use NewTracer() function -func (c Configuration) New( - serviceName string, - options ...Option, -) (opentracing.Tracer, io.Closer, error) { - if serviceName != "" { - c.ServiceName = serviceName - } - - return c.NewTracer(options...) -} - -// NewTracer returns a new tracer based on the current configuration, using the given options, -// and a closer func that can be used to flush buffers before shutdown. -func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) { - if c.ServiceName == "" { - return nil, nil, errors.New("no service name provided") - } - - if c.Disabled { - return &opentracing.NoopTracer{}, &nullCloser{}, nil - } - opts := applyOptions(options...) - tracerMetrics := jaeger.NewMetrics(opts.metrics, nil) - if c.RPCMetrics { - Observer( - rpcmetrics.NewObserver( - opts.metrics.Namespace(metrics.NSOptions{Name: "jaeger-rpc", Tags: map[string]string{"component": "jaeger"}}), - rpcmetrics.DefaultNameNormalizer, - ), - )(&opts) // adds to c.observers - } - if c.Sampler == nil { - c.Sampler = &SamplerConfig{ - Type: jaeger.SamplerTypeRemote, - Param: defaultSamplingProbability, - } - } - if c.Reporter == nil { - c.Reporter = &ReporterConfig{} - } - - sampler := opts.sampler - if sampler == nil { - s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics) - if err != nil { - return nil, nil, err - } - sampler = s - } - - reporter := opts.reporter - if reporter == nil { - r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger) - if err != nil { - return nil, nil, err - } - reporter = r - } - - tracerOptions := []jaeger.TracerOption{ - jaeger.TracerOptions.Metrics(tracerMetrics), - jaeger.TracerOptions.Logger(opts.logger), - jaeger.TracerOptions.CustomHeaderKeys(c.Headers), - jaeger.TracerOptions.Gen128Bit(opts.gen128Bit), - jaeger.TracerOptions.PoolSpans(opts.poolSpans), - jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan), - jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength), - } - - for _, tag := range opts.tags { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value)) - } - - for _, tag := range c.Tags { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value)) - } - - for _, obs := range opts.observers { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs)) - } - - for _, cobs := range opts.contribObservers { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs)) - } - - for format, injector := range opts.injectors { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector)) - } - - for format, extractor := range opts.extractors { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor)) - } - - if c.BaggageRestrictions != nil { - mgr := remote.NewRestrictionManager( - c.ServiceName, - remote.Options.Metrics(tracerMetrics), - remote.Options.Logger(opts.logger), - remote.Options.HostPort(c.BaggageRestrictions.HostPort), - remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval), - remote.Options.DenyBaggageOnInitializationFailure( - c.BaggageRestrictions.DenyBaggageOnInitializationFailure, - ), - ) - tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr)) - } - - if c.Throttler != nil { - debugThrottler := throttler.NewThrottler( - c.ServiceName, - throttler.Options.Metrics(tracerMetrics), - throttler.Options.Logger(opts.logger), - throttler.Options.HostPort(c.Throttler.HostPort), - throttler.Options.RefreshInterval(c.Throttler.RefreshInterval), - throttler.Options.SynchronousInitialization( - c.Throttler.SynchronousInitialization, - ), - ) - - tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler)) - } - - tracer, closer := jaeger.NewTracer( - c.ServiceName, - sampler, - reporter, - tracerOptions..., - ) - - return tracer, closer, nil -} - -// InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer. -// It returns a closer func that can be used to flush buffers before shutdown. -func (c Configuration) InitGlobalTracer( - serviceName string, - options ...Option, -) (io.Closer, error) { - if c.Disabled { - return &nullCloser{}, nil - } - tracer, closer, err := c.New(serviceName, options...) - if err != nil { - return nil, err - } - opentracing.SetGlobalTracer(tracer) - return closer, nil -} - -// NewSampler creates a new sampler based on the configuration -func (sc *SamplerConfig) NewSampler( - serviceName string, - metrics *jaeger.Metrics, -) (jaeger.Sampler, error) { - samplerType := strings.ToLower(sc.Type) - if samplerType == jaeger.SamplerTypeConst { - return jaeger.NewConstSampler(sc.Param != 0), nil - } - if samplerType == jaeger.SamplerTypeProbabilistic { - if sc.Param >= 0 && sc.Param <= 1.0 { - return jaeger.NewProbabilisticSampler(sc.Param) - } - return nil, fmt.Errorf( - "Invalid Param for probabilistic sampler: %v. Expecting value between 0 and 1", - sc.Param, - ) - } - if samplerType == jaeger.SamplerTypeRateLimiting { - return jaeger.NewRateLimitingSampler(sc.Param), nil - } - if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" { - sc2 := *sc - sc2.Type = jaeger.SamplerTypeProbabilistic - initSampler, err := sc2.NewSampler(serviceName, nil) - if err != nil { - return nil, err - } - options := []jaeger.SamplerOption{ - jaeger.SamplerOptions.Metrics(metrics), - jaeger.SamplerOptions.InitialSampler(initSampler), - jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL), - } - if sc.MaxOperations != 0 { - options = append(options, jaeger.SamplerOptions.MaxOperations(sc.MaxOperations)) - } - if sc.SamplingRefreshInterval != 0 { - options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval)) - } - return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil - } - return nil, fmt.Errorf("Unknown sampler type %v", sc.Type) -} - -// NewReporter instantiates a new reporter that submits spans to the collector -func (rc *ReporterConfig) NewReporter( - serviceName string, - metrics *jaeger.Metrics, - logger jaeger.Logger, -) (jaeger.Reporter, error) { - sender, err := rc.newTransport() - if err != nil { - return nil, err - } - reporter := jaeger.NewRemoteReporter( - sender, - jaeger.ReporterOptions.QueueSize(rc.QueueSize), - jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval), - jaeger.ReporterOptions.Logger(logger), - jaeger.ReporterOptions.Metrics(metrics)) - if rc.LogSpans && logger != nil { - logger.Infof("Initializing logging reporter\n") - reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter) - } - return reporter, err -} - -func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) { - switch { - case rc.CollectorEndpoint != "" && rc.User != "" && rc.Password != "": - return transport.NewHTTPTransport(rc.CollectorEndpoint, transport.HTTPBatchSize(1), - transport.HTTPBasicAuth(rc.User, rc.Password)), nil - case rc.CollectorEndpoint != "": - return transport.NewHTTPTransport(rc.CollectorEndpoint, transport.HTTPBatchSize(1)), nil - default: - return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0) - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go deleted file mode 100644 index ff70ae12c28b..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/config/config_env.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "fmt" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - - "github.com/uber/jaeger-client-go" -) - -const ( - // environment variable names - envServiceName = "JAEGER_SERVICE_NAME" - envDisabled = "JAEGER_DISABLED" - envRPCMetrics = "JAEGER_RPC_METRICS" - envTags = "JAEGER_TAGS" - envSamplerType = "JAEGER_SAMPLER_TYPE" - envSamplerParam = "JAEGER_SAMPLER_PARAM" - envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" - envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS" - envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL" - envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE" - envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL" - envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS" - envEndpoint = "JAEGER_ENDPOINT" - envUser = "JAEGER_USER" - envPassword = "JAEGER_PASSWORD" - envAgentHost = "JAEGER_AGENT_HOST" - envAgentPort = "JAEGER_AGENT_PORT" -) - -// FromEnv uses environment variables to set the tracer's Configuration -func FromEnv() (*Configuration, error) { - c := &Configuration{} - - if e := os.Getenv(envServiceName); e != "" { - c.ServiceName = e - } - - if e := os.Getenv(envRPCMetrics); e != "" { - if value, err := strconv.ParseBool(e); err == nil { - c.RPCMetrics = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e) - } - } - - if e := os.Getenv(envDisabled); e != "" { - if value, err := strconv.ParseBool(e); err == nil { - c.Disabled = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e) - } - } - - if e := os.Getenv(envTags); e != "" { - c.Tags = parseTags(e) - } - - if s, err := samplerConfigFromEnv(); err == nil { - c.Sampler = s - } else { - return nil, errors.Wrap(err, "cannot obtain sampler config from env") - } - - if r, err := reporterConfigFromEnv(); err == nil { - c.Reporter = r - } else { - return nil, errors.Wrap(err, "cannot obtain reporter config from env") - } - - return c, nil -} - -// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables -func samplerConfigFromEnv() (*SamplerConfig, error) { - sc := &SamplerConfig{} - - if e := os.Getenv(envSamplerType); e != "" { - sc.Type = e - } - - if e := os.Getenv(envSamplerParam); e != "" { - if value, err := strconv.ParseFloat(e, 64); err == nil { - sc.Param = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e) - } - } - - if e := os.Getenv(envSamplerManagerHostPort); e != "" { - sc.SamplingServerURL = e - } - - if e := os.Getenv(envSamplerMaxOperations); e != "" { - if value, err := strconv.ParseInt(e, 10, 0); err == nil { - sc.MaxOperations = int(value) - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e) - } - } - - if e := os.Getenv(envSamplerRefreshInterval); e != "" { - if value, err := time.ParseDuration(e); err == nil { - sc.SamplingRefreshInterval = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e) - } - } - - return sc, nil -} - -// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables -func reporterConfigFromEnv() (*ReporterConfig, error) { - rc := &ReporterConfig{} - - if e := os.Getenv(envReporterMaxQueueSize); e != "" { - if value, err := strconv.ParseInt(e, 10, 0); err == nil { - rc.QueueSize = int(value) - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e) - } - } - - if e := os.Getenv(envReporterFlushInterval); e != "" { - if value, err := time.ParseDuration(e); err == nil { - rc.BufferFlushInterval = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e) - } - } - - if e := os.Getenv(envReporterLogSpans); e != "" { - if value, err := strconv.ParseBool(e); err == nil { - rc.LogSpans = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e) - } - } - - if e := os.Getenv(envEndpoint); e != "" { - u, err := url.ParseRequestURI(e) - if err != nil { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envEndpoint, e) - } - rc.CollectorEndpoint = u.String() - user := os.Getenv(envUser) - pswd := os.Getenv(envPassword) - if user != "" && pswd == "" || user == "" && pswd != "" { - return nil, errors.Errorf("you must set %s and %s env vars together", envUser, envPassword) - } - rc.User = user - rc.Password = pswd - } else { - host := jaeger.DefaultUDPSpanServerHost - if e := os.Getenv(envAgentHost); e != "" { - host = e - } - - port := jaeger.DefaultUDPSpanServerPort - if e := os.Getenv(envAgentPort); e != "" { - if value, err := strconv.ParseInt(e, 10, 0); err == nil { - port = int(value) - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e) - } - } - rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port) - } - - return rc, nil -} - -// parseTags parses the given string into a collection of Tags. -// Spec for this value: -// - comma separated list of key=value -// - value can be specified using the notation ${envVar:defaultValue}, where `envVar` -// is an environment variable and `defaultValue` is the value to use in case the env var is not set -func parseTags(sTags string) []opentracing.Tag { - pairs := strings.Split(sTags, ",") - tags := make([]opentracing.Tag, 0) - for _, p := range pairs { - kv := strings.SplitN(p, "=", 2) - k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1]) - - if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") { - ed := strings.SplitN(v[2:len(v)-1], ":", 2) - e, d := ed[0], ed[1] - v = os.Getenv(e) - if v == "" && d != "" { - v = d - } - } - - tag := opentracing.Tag{Key: k, Value: v} - tags = append(tags, tag) - } - - return tags -} diff --git a/vendor/github.com/uber/jaeger-client-go/config/options.go b/vendor/github.com/uber/jaeger-client-go/config/options.go deleted file mode 100644 index 322691beaa17..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/config/options.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - opentracing "github.com/opentracing/opentracing-go" - "github.com/uber/jaeger-lib/metrics" - - "github.com/uber/jaeger-client-go" -) - -// Option is a function that sets some option on the client. -type Option func(c *Options) - -// Options control behavior of the client. -type Options struct { - metrics metrics.Factory - logger jaeger.Logger - reporter jaeger.Reporter - sampler jaeger.Sampler - contribObservers []jaeger.ContribObserver - observers []jaeger.Observer - gen128Bit bool - poolSpans bool - zipkinSharedRPCSpan bool - maxTagValueLength int - tags []opentracing.Tag - injectors map[interface{}]jaeger.Injector - extractors map[interface{}]jaeger.Extractor -} - -// Metrics creates an Option that initializes Metrics in the tracer, -// which is used to emit statistics about spans. -func Metrics(factory metrics.Factory) Option { - return func(c *Options) { - c.metrics = factory - } -} - -// Logger can be provided to log Reporter errors, as well as to log spans -// if Reporter.LogSpans is set to true. -func Logger(logger jaeger.Logger) Option { - return func(c *Options) { - c.logger = logger - } -} - -// Reporter can be provided explicitly to override the configuration. -// Useful for testing, e.g. by passing InMemoryReporter. -func Reporter(reporter jaeger.Reporter) Option { - return func(c *Options) { - c.reporter = reporter - } -} - -// Sampler can be provided explicitly to override the configuration. -func Sampler(sampler jaeger.Sampler) Option { - return func(c *Options) { - c.sampler = sampler - } -} - -// Observer can be registered with the Tracer to receive notifications about new Spans. -func Observer(observer jaeger.Observer) Option { - return func(c *Options) { - c.observers = append(c.observers, observer) - } -} - -// ContribObserver can be registered with the Tracer to receive notifications -// about new spans. -func ContribObserver(observer jaeger.ContribObserver) Option { - return func(c *Options) { - c.contribObservers = append(c.contribObservers, observer) - } -} - -// Gen128Bit specifies whether to generate 128bit trace IDs. -func Gen128Bit(gen128Bit bool) Option { - return func(c *Options) { - c.gen128Bit = gen128Bit - } -} - -// PoolSpans specifies whether to pool spans -func PoolSpans(poolSpans bool) Option { - return func(c *Options) { - c.poolSpans = poolSpans - } -} - -// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client -// and server spans a la zipkin. If false, client and server spans will be assigned -// different IDs. -func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option { - return func(c *Options) { - c.zipkinSharedRPCSpan = zipkinSharedRPCSpan - } -} - -// MaxTagValueLength can be provided to override the default max tag value length. -func MaxTagValueLength(maxTagValueLength int) Option { - return func(c *Options) { - c.maxTagValueLength = maxTagValueLength - } -} - -// Tag creates an option that adds a tracer-level tag. -func Tag(key string, value interface{}) Option { - return func(c *Options) { - c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value}) - } -} - -// Injector registers an Injector with the given format. -func Injector(format interface{}, injector jaeger.Injector) Option { - return func(c *Options) { - c.injectors[format] = injector - } -} - -// Extractor registers an Extractor with the given format. -func Extractor(format interface{}, extractor jaeger.Extractor) Option { - return func(c *Options) { - c.extractors[format] = extractor - } -} - -func applyOptions(options ...Option) Options { - opts := Options{ - injectors: make(map[interface{}]jaeger.Injector), - extractors: make(map[interface{}]jaeger.Extractor), - } - for _, option := range options { - option(&opts) - } - if opts.metrics == nil { - opts.metrics = metrics.NullFactory - } - if opts.logger == nil { - opts.logger = jaeger.NullLogger - } - return opts -} diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go deleted file mode 100644 index e98ab14528f7..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/constants.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -const ( - // JaegerClientVersion is the version of the client library reported as Span tag. - JaegerClientVersion = "Go-2.16.1dev" - - // JaegerClientVersionTagKey is the name of the tag used to report client version. - JaegerClientVersionTagKey = "jaeger.version" - - // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which, - // if found in the carrier, forces the trace to be sampled as "debug" trace. - // The value of the header is recorded as the tag on the root span, so that the - // trace can be found in the UI using this value as a correlation ID. - JaegerDebugHeader = "jaeger-debug-id" - - // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage. - // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where - // a root span does not exist. - JaegerBaggageHeader = "jaeger-baggage" - - // TracerHostnameTagKey used to report host name of the process. - TracerHostnameTagKey = "hostname" - - // TracerIPTagKey used to report ip of the process. - TracerIPTagKey = "ip" - - // TracerUUIDTagKey used to report UUID of the client process. - TracerUUIDTagKey = "client-uuid" - - // SamplerTypeTagKey reports which sampler was used on the root span. - SamplerTypeTagKey = "sampler.type" - - // SamplerParamTagKey reports the parameter of the sampler, like sampling probability. - SamplerParamTagKey = "sampler.param" - - // TraceContextHeaderName is the http header name used to propagate tracing context. - // This must be in lower-case to avoid mismatches when decoding incoming headers. - TraceContextHeaderName = "uber-trace-id" - - // TracerStateHeaderName is deprecated. - // Deprecated: use TraceContextHeaderName - TracerStateHeaderName = TraceContextHeaderName - - // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage. - // This must be in lower-case to avoid mismatches when decoding incoming headers. - TraceBaggageHeaderPrefix = "uberctx-" - - // SamplerTypeConst is the type of sampler that always makes the same decision. - SamplerTypeConst = "const" - - // SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy. - SamplerTypeRemote = "remote" - - // SamplerTypeProbabilistic is the type of sampler that samples traces - // with a certain fixed probability. - SamplerTypeProbabilistic = "probabilistic" - - // SamplerTypeRateLimiting is the type of sampler that samples - // only up to a fixed number of traces per second. - SamplerTypeRateLimiting = "ratelimiting" - - // SamplerTypeLowerBound is the type of sampler that samples - // at least a fixed number of traces per second. - SamplerTypeLowerBound = "lowerbound" - - // DefaultUDPSpanServerHost is the default host to send the spans to, via UDP - DefaultUDPSpanServerHost = "localhost" - - // DefaultUDPSpanServerPort is the default port to send the spans to, via UDP - DefaultUDPSpanServerPort = 6831 - - // DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value. - DefaultMaxTagValueLength = 256 -) diff --git a/vendor/github.com/uber/jaeger-client-go/context.go b/vendor/github.com/uber/jaeger-client-go/context.go deleted file mode 100644 index 90045f4fc278..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/context.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -const ( - flagSampled = byte(1) - flagDebug = byte(2) -) - -var ( - errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state") - errMalformedTracerStateString = errors.New("String does not match tracer state format") - - emptyContext = SpanContext{} -) - -// TraceID represents unique 128bit identifier of a trace -type TraceID struct { - High, Low uint64 -} - -// SpanID represents unique 64bit identifier of a span -type SpanID uint64 - -// SpanContext represents propagated span identity and state -type SpanContext struct { - // traceID represents globally unique ID of the trace. - // Usually generated as a random number. - traceID TraceID - - // spanID represents span ID that must be unique within its trace, - // but does not have to be globally unique. - spanID SpanID - - // parentID refers to the ID of the parent span. - // Should be 0 if the current span is a root span. - parentID SpanID - - // flags is a bitmap containing such bits as 'sampled' and 'debug'. - flags byte - - // Distributed Context baggage. The is a snapshot in time. - baggage map[string]string - - // debugID can be set to some correlation ID when the context is being - // extracted from a TextMap carrier. - // - // See JaegerDebugHeader in constants.go - debugID string -} - -// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext -func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { - for k, v := range c.baggage { - if !handler(k, v) { - break - } - } -} - -// IsSampled returns whether this trace was chosen for permanent storage -// by the sampling mechanism of the tracer. -func (c SpanContext) IsSampled() bool { - return (c.flags & flagSampled) == flagSampled -} - -// IsDebug indicates whether sampling was explicitly requested by the service. -func (c SpanContext) IsDebug() bool { - return (c.flags & flagDebug) == flagDebug -} - -// IsValid indicates whether this context actually represents a valid trace. -func (c SpanContext) IsValid() bool { - return c.traceID.IsValid() && c.spanID != 0 -} - -func (c SpanContext) String() string { - if c.traceID.High == 0 { - return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags) - } - return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags) -} - -// ContextFromString reconstructs the Context encoded in a string -func ContextFromString(value string) (SpanContext, error) { - var context SpanContext - if value == "" { - return emptyContext, errEmptyTracerStateString - } - parts := strings.Split(value, ":") - if len(parts) != 4 { - return emptyContext, errMalformedTracerStateString - } - var err error - if context.traceID, err = TraceIDFromString(parts[0]); err != nil { - return emptyContext, err - } - if context.spanID, err = SpanIDFromString(parts[1]); err != nil { - return emptyContext, err - } - if context.parentID, err = SpanIDFromString(parts[2]); err != nil { - return emptyContext, err - } - flags, err := strconv.ParseUint(parts[3], 10, 8) - if err != nil { - return emptyContext, err - } - context.flags = byte(flags) - return context, nil -} - -// TraceID returns the trace ID of this span context -func (c SpanContext) TraceID() TraceID { - return c.traceID -} - -// SpanID returns the span ID of this span context -func (c SpanContext) SpanID() SpanID { - return c.spanID -} - -// ParentID returns the parent span ID of this span context -func (c SpanContext) ParentID() SpanID { - return c.parentID -} - -// NewSpanContext creates a new instance of SpanContext -func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext { - flags := byte(0) - if sampled { - flags = flagSampled - } - return SpanContext{ - traceID: traceID, - spanID: spanID, - parentID: parentID, - flags: flags, - baggage: baggage} -} - -// CopyFrom copies data from ctx into this context, including span identity and baggage. -// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing. -func (c *SpanContext) CopyFrom(ctx *SpanContext) { - c.traceID = ctx.traceID - c.spanID = ctx.spanID - c.parentID = ctx.parentID - c.flags = ctx.flags - if l := len(ctx.baggage); l > 0 { - c.baggage = make(map[string]string, l) - for k, v := range ctx.baggage { - c.baggage[k] = v - } - } else { - c.baggage = nil - } -} - -// WithBaggageItem creates a new context with an extra baggage item. -func (c SpanContext) WithBaggageItem(key, value string) SpanContext { - var newBaggage map[string]string - if c.baggage == nil { - newBaggage = map[string]string{key: value} - } else { - newBaggage = make(map[string]string, len(c.baggage)+1) - for k, v := range c.baggage { - newBaggage[k] = v - } - newBaggage[key] = value - } - // Use positional parameters so the compiler will help catch new fields. - return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""} -} - -// isDebugIDContainerOnly returns true when the instance of the context is only -// used to return the debug/correlation ID from extract() method. This happens -// in the situation when "jaeger-debug-id" header is passed in the carrier to -// the extract() method, but the request otherwise has no span context in it. -// Previously this would've returned opentracing.ErrSpanContextNotFound from the -// extract method, but now it returns a dummy context with only debugID filled in. -// -// See JaegerDebugHeader in constants.go -// See TextMapPropagator#Extract -func (c *SpanContext) isDebugIDContainerOnly() bool { - return !c.traceID.IsValid() && c.debugID != "" -} - -// ------- TraceID ------- - -func (t TraceID) String() string { - if t.High == 0 { - return fmt.Sprintf("%x", t.Low) - } - return fmt.Sprintf("%x%016x", t.High, t.Low) -} - -// TraceIDFromString creates a TraceID from a hexadecimal string -func TraceIDFromString(s string) (TraceID, error) { - var hi, lo uint64 - var err error - if len(s) > 32 { - return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s) - } else if len(s) > 16 { - hiLen := len(s) - 16 - if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil { - return TraceID{}, err - } - if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil { - return TraceID{}, err - } - } else { - if lo, err = strconv.ParseUint(s, 16, 64); err != nil { - return TraceID{}, err - } - } - return TraceID{High: hi, Low: lo}, nil -} - -// IsValid checks if the trace ID is valid, i.e. not zero. -func (t TraceID) IsValid() bool { - return t.High != 0 || t.Low != 0 -} - -// ------- SpanID ------- - -func (s SpanID) String() string { - return fmt.Sprintf("%x", uint64(s)) -} - -// SpanIDFromString creates a SpanID from a hexadecimal string -func SpanIDFromString(s string) (SpanID, error) { - if len(s) > 16 { - return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s) - } - id, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return SpanID(0), err - } - return SpanID(id), nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go deleted file mode 100644 index 4ce1881f3b83..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - opentracing "github.com/opentracing/opentracing-go" -) - -// ContribObserver can be registered with the Tracer to receive notifications -// about new Spans. Modelled after github.com/opentracing-contrib/go-observer. -type ContribObserver interface { - // Create and return a span observer. Called when a span starts. - // If the Observer is not interested in the given span, it must return (nil, false). - // E.g : - // func StartSpan(opName string, opts ...opentracing.StartSpanOption) { - // var sp opentracing.Span - // sso := opentracing.StartSpanOptions{} - // if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok { - // // we have a valid SpanObserver - // } - // ... - // } - OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) -} - -// ContribSpanObserver is created by the Observer and receives notifications -// about other Span events. This interface is meant to match -// github.com/opentracing-contrib/go-observer, via duck typing, without -// directly importing the go-observer package. -type ContribSpanObserver interface { - OnSetOperationName(operationName string) - OnSetTag(key string, value interface{}) - OnFinish(options opentracing.FinishOptions) -} - -// wrapper observer for the old observers (see observer.go) -type oldObserver struct { - obs Observer -} - -func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) { - spanObserver := o.obs.OnStartSpan(operationName, options) - return spanObserver, spanObserver != nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go deleted file mode 100644 index 4f5549033d5d..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package jaeger implements an OpenTracing (http://opentracing.io) Tracer. -It is currently using Zipkin-compatible data model and can be directly -itegrated with Zipkin backend (http://zipkin.io). - -For integration instructions please refer to the README: - -https://github.com/uber/jaeger-client-go/blob/master/README.md -*/ -package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/glide.lock b/vendor/github.com/uber/jaeger-client-go/glide.lock deleted file mode 100644 index af659ca0e66a..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/glide.lock +++ /dev/null @@ -1,90 +0,0 @@ -hash: 92cc8f956428fc65bee07d809a752f34376aece141c934eff02aefa08d450b72 -updated: 2019-03-23T18:26:09.960887-04:00 -imports: -- name: github.com/beorn7/perks - version: 3a771d992973f24aa725d07868b467d1ddfceafb - subpackages: - - quantile -- name: github.com/codahale/hdrhistogram - version: 3a0bb77429bd3a61596f5e8a3172445844342120 -- name: github.com/crossdock/crossdock-go - version: 049aabb0122b03bc9bd30cab8f3f91fb60166361 - subpackages: - - assert - - require -- name: github.com/davecgh/go-spew - version: d8f796af33cc11cb798c1aaeb27a4ebc5099927d - subpackages: - - spew -- name: github.com/golang/protobuf - version: bbd03ef6da3a115852eaf24c8a1c46aeb39aa175 - subpackages: - - proto -- name: github.com/matttproud/golang_protobuf_extensions - version: c12348ce28de40eed0136aa2b644d0ee0650e56c - subpackages: - - pbutil -- name: github.com/opentracing/opentracing-go - version: 659c90643e714681897ec2521c60567dd21da733 - subpackages: - - ext - - harness - - log -- name: github.com/pkg/errors - version: ba968bfe8b2f7e042a574c888954fccecfa385b4 -- name: github.com/pmezard/go-difflib - version: 792786c7400a136282c1664665ae0a8db921c6c2 - subpackages: - - difflib -- name: github.com/prometheus/client_golang - version: c5b7fccd204277076155f10851dad72b76a49317 - subpackages: - - prometheus -- name: github.com/prometheus/client_model - version: 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c - subpackages: - - go -- name: github.com/prometheus/common - version: 38c53a9f4bfcd932d1b00bfc65e256a7fba6b37a - subpackages: - - expfmt - - internal/bitbucket.org/ww/goautoneg - - model -- name: github.com/prometheus/procfs - version: 780932d4fbbe0e69b84c34c20f5c8d0981e109ea - subpackages: - - internal/util - - nfs - - xfs -- name: github.com/stretchr/testify - version: f35b8ab0b5a2cef36673838d662e249dd9c94686 - subpackages: - - assert - - require - - suite -- name: github.com/uber/jaeger-lib - version: 0e30338a695636fe5bcf7301e8030ce8dd2a8530 - subpackages: - - metrics - - metrics/metricstest - - metrics/prometheus -- name: go.uber.org/atomic - version: 1ea20fb1cbb1cc08cbd0d913a96dead89aa18289 -- name: go.uber.org/multierr - version: 3c4937480c32f4c13a875a1829af76c98ca3d40a -- name: go.uber.org/zap - version: ff33455a0e382e8a81d14dd7c922020b6b5e7982 - subpackages: - - buffer - - internal/bufferpool - - internal/color - - internal/exit - - zapcore -- name: golang.org/x/net - version: 49bb7cea24b1df9410e1712aa6433dae904ff66a - subpackages: - - context - - context/ctxhttp -testImports: -- name: github.com/uber-go/atomic - version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8 diff --git a/vendor/github.com/uber/jaeger-client-go/glide.yaml b/vendor/github.com/uber/jaeger-client-go/glide.yaml deleted file mode 100644 index b3e5b80bc019..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/glide.yaml +++ /dev/null @@ -1,22 +0,0 @@ -package: github.com/uber/jaeger-client-go -import: -- package: github.com/opentracing/opentracing-go - version: ^1.1 - subpackages: - - ext - - log -- package: github.com/crossdock/crossdock-go -- package: github.com/uber/jaeger-lib - version: ^2.0.0 - subpackages: - - metrics -- package: github.com/pkg/errors - version: ~0.8.0 -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert - - require - - suite -- package: github.com/prometheus/client_golang - version: v0.8.0 diff --git a/vendor/github.com/uber/jaeger-client-go/header.go b/vendor/github.com/uber/jaeger-client-go/header.go deleted file mode 100644 index 5da70351d91d..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/header.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -// HeadersConfig contains the values for the header keys that Jaeger will use. -// These values may be either custom or default depending on whether custom -// values were provided via a configuration. -type HeadersConfig struct { - // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which, - // if found in the carrier, forces the trace to be sampled as "debug" trace. - // The value of the header is recorded as the tag on the root span, so that the - // trace can be found in the UI using this value as a correlation ID. - JaegerDebugHeader string `yaml:"jaegerDebugHeader"` - - // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage. - // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where - // a root span does not exist. - JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"` - - // TraceContextHeaderName is the http header name used to propagate tracing context. - // This must be in lower-case to avoid mismatches when decoding incoming headers. - TraceContextHeaderName string `yaml:"TraceContextHeaderName"` - - // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage. - // This must be in lower-case to avoid mismatches when decoding incoming headers. - TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"` -} - -// ApplyDefaults sets missing configuration keys to default values -func (c *HeadersConfig) ApplyDefaults() *HeadersConfig { - if c.JaegerBaggageHeader == "" { - c.JaegerBaggageHeader = JaegerBaggageHeader - } - if c.JaegerDebugHeader == "" { - c.JaegerDebugHeader = JaegerDebugHeader - } - if c.TraceBaggageHeaderPrefix == "" { - c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix - } - if c.TraceContextHeaderName == "" { - c.TraceContextHeaderName = TraceContextHeaderName - } - return c -} - -func getDefaultHeadersConfig() *HeadersConfig { - return &HeadersConfig{ - JaegerDebugHeader: JaegerDebugHeader, - JaegerBaggageHeader: JaegerBaggageHeader, - TraceContextHeaderName: TraceContextHeaderName, - TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix, - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go deleted file mode 100644 index 745729319ff0..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "time" - - "github.com/uber/jaeger-client-go" -) - -const ( - defaultMaxValueLength = 2048 - defaultRefreshInterval = time.Minute - defaultHostPort = "localhost:5778" -) - -// Option is a function that sets some option on the RestrictionManager -type Option func(options *options) - -// Options is a factory for all available options -var Options options - -type options struct { - denyBaggageOnInitializationFailure bool - metrics *jaeger.Metrics - logger jaeger.Logger - hostPort string - refreshInterval time.Duration -} - -// DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager. -// If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage -// restrictions have been retrieved from agent. -// If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage -// restrictions have been retrieved from agent. -func (options) DenyBaggageOnInitializationFailure(b bool) Option { - return func(o *options) { - o.denyBaggageOnInitializationFailure = b - } -} - -// Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics. -func (options) Metrics(m *jaeger.Metrics) Option { - return func(o *options) { - o.metrics = m - } -} - -// Logger creates an Option that sets the logger used by the RestrictionManager. -func (options) Logger(logger jaeger.Logger) Option { - return func(o *options) { - o.logger = logger - } -} - -// HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions. -func (options) HostPort(hostPort string) Option { - return func(o *options) { - o.hostPort = hostPort - } -} - -// RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for -// the baggage restrictions. -func (options) RefreshInterval(refreshInterval time.Duration) Option { - return func(o *options) { - o.refreshInterval = refreshInterval - } -} - -func applyOptions(o ...Option) options { - opts := options{} - for _, option := range o { - option(&opts) - } - if opts.metrics == nil { - opts.metrics = jaeger.NewNullMetrics() - } - if opts.logger == nil { - opts.logger = jaeger.NullLogger - } - if opts.hostPort == "" { - opts.hostPort = defaultHostPort - } - if opts.refreshInterval == 0 { - opts.refreshInterval = defaultRefreshInterval - } - return opts -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go deleted file mode 100644 index a56515acab86..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "fmt" - "net/url" - "sync" - "time" - - "github.com/uber/jaeger-client-go/internal/baggage" - thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage" - "github.com/uber/jaeger-client-go/utils" -) - -type httpBaggageRestrictionManagerProxy struct { - url string -} - -func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy { - v := url.Values{} - v.Set("service", serviceName) - return &httpBaggageRestrictionManagerProxy{ - url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()), - } -} - -func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(serviceName string) ([]*thrift.BaggageRestriction, error) { - var out []*thrift.BaggageRestriction - if err := utils.GetJSON(s.url, &out); err != nil { - return nil, err - } - return out, nil -} - -// RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent -type RestrictionManager struct { - options - - mux sync.RWMutex - serviceName string - restrictions map[string]*baggage.Restriction - thriftProxy thrift.BaggageRestrictionManager - pollStopped sync.WaitGroup - stopPoll chan struct{} - invalidRestriction *baggage.Restriction - validRestriction *baggage.Restriction - - // Determines if the manager has successfully retrieved baggage restrictions from agent - initialized bool -} - -// NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest -// baggage restrictions. -func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager { - // TODO there is a developing use case where a single tracer can generate traces on behalf of many services. - // restrictionsMap will need to exist per service - opts := applyOptions(options...) - m := &RestrictionManager{ - serviceName: serviceName, - options: opts, - restrictions: make(map[string]*baggage.Restriction), - thriftProxy: newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName), - stopPoll: make(chan struct{}), - invalidRestriction: baggage.NewRestriction(false, 0), - validRestriction: baggage.NewRestriction(true, defaultMaxValueLength), - } - m.pollStopped.Add(1) - go m.pollManager() - return m -} - -// isReady returns true if the manager has retrieved baggage restrictions from the remote source. -func (m *RestrictionManager) isReady() bool { - m.mux.RLock() - defer m.mux.RUnlock() - return m.initialized -} - -// GetRestriction implements RestrictionManager#GetRestriction. -func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction { - m.mux.RLock() - defer m.mux.RUnlock() - if !m.initialized { - if m.denyBaggageOnInitializationFailure { - return m.invalidRestriction - } - return m.validRestriction - } - if restriction, ok := m.restrictions[key]; ok { - return restriction - } - return m.invalidRestriction -} - -// Close stops remote polling and closes the RemoteRestrictionManager. -func (m *RestrictionManager) Close() error { - close(m.stopPoll) - m.pollStopped.Wait() - return nil -} - -func (m *RestrictionManager) pollManager() { - defer m.pollStopped.Done() - // attempt to initialize baggage restrictions - if err := m.updateRestrictions(); err != nil { - m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error())) - } - ticker := time.NewTicker(m.refreshInterval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - if err := m.updateRestrictions(); err != nil { - m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error())) - } - case <-m.stopPoll: - return - } - } -} - -func (m *RestrictionManager) updateRestrictions() error { - restrictions, err := m.thriftProxy.GetBaggageRestrictions(m.serviceName) - if err != nil { - m.metrics.BaggageRestrictionsUpdateFailure.Inc(1) - return err - } - newRestrictions := m.parseRestrictions(restrictions) - m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1) - m.mux.Lock() - defer m.mux.Unlock() - m.initialized = true - m.restrictions = newRestrictions - return nil -} - -func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction { - setters := make(map[string]*baggage.Restriction, len(restrictions)) - for _, restriction := range restrictions { - setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength)) - } - return setters -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go deleted file mode 100644 index c16a5c566291..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package baggage - -const ( - defaultMaxValueLength = 2048 -) - -// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value. -type Restriction struct { - keyAllowed bool - maxValueLength int -} - -// NewRestriction returns a new Restriction. -func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction { - return &Restriction{ - keyAllowed: keyAllowed, - maxValueLength: maxValueLength, - } -} - -// KeyAllowed returns whether the baggage key for this restriction is allowed. -func (r *Restriction) KeyAllowed() bool { - return r.keyAllowed -} - -// MaxValueLength returns the max length for the baggage value. -func (r *Restriction) MaxValueLength() int { - return r.maxValueLength -} - -// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager -// will return a Restriction for a specific baggage key which will determine whether the baggage -// key is allowed for the current service and any other applicable restrictions on the baggage -// value. -type RestrictionManager interface { - GetRestriction(service, key string) *Restriction -} - -// DefaultRestrictionManager allows any baggage key. -type DefaultRestrictionManager struct { - defaultRestriction *Restriction -} - -// NewDefaultRestrictionManager returns a DefaultRestrictionManager. -func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager { - if maxValueLength == 0 { - maxValueLength = defaultMaxValueLength - } - return &DefaultRestrictionManager{ - defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength}, - } -} - -// GetRestriction implements RestrictionManager#GetRestriction. -func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction { - return m.defaultRestriction -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go deleted file mode 100644 index 0e10b8a5aa8e..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spanlog - -import ( - "encoding/json" - "fmt" - - "github.com/opentracing/opentracing-go/log" -) - -type fieldsAsMap map[string]string - -// MaterializeWithJSON converts log Fields into JSON string -// TODO refactor into pluggable materializer -func MaterializeWithJSON(logFields []log.Field) ([]byte, error) { - fields := fieldsAsMap(make(map[string]string, len(logFields))) - for _, field := range logFields { - field.Marshal(fields) - } - if event, ok := fields["event"]; ok && len(fields) == 1 { - return []byte(event), nil - } - return json.Marshal(fields) -} - -func (ml fieldsAsMap) EmitString(key, value string) { - ml[key] = value -} - -func (ml fieldsAsMap) EmitBool(key string, value bool) { - ml[key] = fmt.Sprintf("%t", value) -} - -func (ml fieldsAsMap) EmitInt(key string, value int) { - ml[key] = fmt.Sprintf("%d", value) -} - -func (ml fieldsAsMap) EmitInt32(key string, value int32) { - ml[key] = fmt.Sprintf("%d", value) -} - -func (ml fieldsAsMap) EmitInt64(key string, value int64) { - ml[key] = fmt.Sprintf("%d", value) -} - -func (ml fieldsAsMap) EmitUint32(key string, value uint32) { - ml[key] = fmt.Sprintf("%d", value) -} - -func (ml fieldsAsMap) EmitUint64(key string, value uint64) { - ml[key] = fmt.Sprintf("%d", value) -} - -func (ml fieldsAsMap) EmitFloat32(key string, value float32) { - ml[key] = fmt.Sprintf("%f", value) -} - -func (ml fieldsAsMap) EmitFloat64(key string, value float64) { - ml[key] = fmt.Sprintf("%f", value) -} - -func (ml fieldsAsMap) EmitObject(key string, value interface{}) { - ml[key] = fmt.Sprintf("%+v", value) -} - -func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) { - value(ml) -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go deleted file mode 100644 index f52c322fb69a..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "time" - - "github.com/uber/jaeger-client-go" -) - -const ( - defaultHostPort = "localhost:5778" - defaultRefreshInterval = time.Second * 5 -) - -// Option is a function that sets some option on the Throttler -type Option func(options *options) - -// Options is a factory for all available options -var Options options - -type options struct { - metrics *jaeger.Metrics - logger jaeger.Logger - hostPort string - refreshInterval time.Duration - synchronousInitialization bool -} - -// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics. -func (options) Metrics(m *jaeger.Metrics) Option { - return func(o *options) { - o.metrics = m - } -} - -// Logger creates an Option that sets the logger used by the Throttler. -func (options) Logger(logger jaeger.Logger) Option { - return func(o *options) { - o.logger = logger - } -} - -// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits. -func (options) HostPort(hostPort string) Option { - return func(o *options) { - o.hostPort = hostPort - } -} - -// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for -// credits. -func (options) RefreshInterval(refreshInterval time.Duration) Option { - return func(o *options) { - o.refreshInterval = refreshInterval - } -} - -// SynchronousInitialization creates an Option that determines whether the throttler should synchronously -// fetch credits from the agent when an operation is seen for the first time. This should be set to true -// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront -// such that sampling or throttling occurs. -func (options) SynchronousInitialization(b bool) Option { - return func(o *options) { - o.synchronousInitialization = b - } -} - -func applyOptions(o ...Option) options { - opts := options{} - for _, option := range o { - option(&opts) - } - if opts.metrics == nil { - opts.metrics = jaeger.NewNullMetrics() - } - if opts.logger == nil { - opts.logger = jaeger.NullLogger - } - if opts.hostPort == "" { - opts.hostPort = defaultHostPort - } - if opts.refreshInterval == 0 { - opts.refreshInterval = defaultRefreshInterval - } - return opts -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go deleted file mode 100644 index 20f434fe4956..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "fmt" - "net/url" - "sync" - "sync/atomic" - "time" - - "github.com/pkg/errors" - - "github.com/uber/jaeger-client-go" - "github.com/uber/jaeger-client-go/utils" -) - -const ( - // minimumCredits is the minimum amount of credits necessary to not be throttled. - // i.e. if currentCredits > minimumCredits, then the operation will not be throttled. - minimumCredits = 1.0 -) - -var ( - errorUUIDNotSet = errors.New("Throttler UUID must be set") -) - -type operationBalance struct { - Operation string `json:"operation"` - Balance float64 `json:"balance"` -} - -type creditResponse struct { - Balances []operationBalance `json:"balances"` -} - -type httpCreditManagerProxy struct { - hostPort string -} - -func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy { - return &httpCreditManagerProxy{ - hostPort: hostPort, - } -} - -// N.B. Operations list must not be empty. -func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) { - params := url.Values{} - params.Set("service", serviceName) - params.Set("uuid", uuid) - for _, op := range operations { - params.Add("operations", op) - } - var resp creditResponse - if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil { - return nil, errors.Wrap(err, "Failed to receive credits from agent") - } - return &resp, nil -} - -// Throttler retrieves credits from agent and uses it to throttle operations. -type Throttler struct { - options - - mux sync.RWMutex - service string - uuid atomic.Value - creditManager *httpCreditManagerProxy - credits map[string]float64 // map of operation->credits - close chan struct{} - stopped sync.WaitGroup -} - -// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle -// the service. -func NewThrottler(service string, options ...Option) *Throttler { - opts := applyOptions(options...) - creditManager := newHTTPCreditManagerProxy(opts.hostPort) - t := &Throttler{ - options: opts, - creditManager: creditManager, - service: service, - credits: make(map[string]float64), - close: make(chan struct{}), - } - t.stopped.Add(1) - go t.pollManager() - return t -} - -// IsAllowed implements Throttler#IsAllowed. -func (t *Throttler) IsAllowed(operation string) bool { - t.mux.Lock() - defer t.mux.Unlock() - value, ok := t.credits[operation] - if !ok || value == 0 { - if !ok { - // NOTE: This appears to be a no-op at first glance, but it stores - // the operation key in the map. Necessary for functionality of - // Throttler#operations method. - t.credits[operation] = 0 - } - if !t.synchronousInitialization { - t.metrics.ThrottledDebugSpans.Inc(1) - return false - } - // If it is the first time this operation is being checked, synchronously fetch - // the credits. - credits, err := t.fetchCredits([]string{operation}) - if err != nil { - // Failed to receive credits from agent, try again next time - t.logger.Error("Failed to fetch credits: " + err.Error()) - return false - } - if len(credits.Balances) == 0 { - // This shouldn't happen but just in case - return false - } - for _, opBalance := range credits.Balances { - t.credits[opBalance.Operation] += opBalance.Balance - } - } - return t.isAllowed(operation) -} - -// Close stops the throttler from fetching credits from remote. -func (t *Throttler) Close() error { - close(t.close) - t.stopped.Wait() - return nil -} - -// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote -// requests are made. -func (t *Throttler) SetProcess(process jaeger.Process) { - if process.UUID != "" { - t.uuid.Store(process.UUID) - } -} - -// N.B. This function must be called with the Write Lock -func (t *Throttler) isAllowed(operation string) bool { - credits := t.credits[operation] - if credits < minimumCredits { - t.metrics.ThrottledDebugSpans.Inc(1) - return false - } - t.credits[operation] = credits - minimumCredits - return true -} - -func (t *Throttler) pollManager() { - defer t.stopped.Done() - ticker := time.NewTicker(t.refreshInterval) - defer ticker.Stop() - for { - select { - case <-ticker.C: - t.refreshCredits() - case <-t.close: - return - } - } -} - -func (t *Throttler) operations() []string { - t.mux.RLock() - defer t.mux.RUnlock() - operations := make([]string, 0, len(t.credits)) - for op := range t.credits { - operations = append(operations, op) - } - return operations -} - -func (t *Throttler) refreshCredits() { - operations := t.operations() - if len(operations) == 0 { - return - } - newCredits, err := t.fetchCredits(operations) - if err != nil { - t.metrics.ThrottlerUpdateFailure.Inc(1) - t.logger.Error("Failed to fetch credits: " + err.Error()) - return - } - t.metrics.ThrottlerUpdateSuccess.Inc(1) - - t.mux.Lock() - defer t.mux.Unlock() - for _, opBalance := range newCredits.Balances { - t.credits[opBalance.Operation] += opBalance.Balance - } -} - -func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) { - uuid := t.uuid.Load() - uuidStr, _ := uuid.(string) - if uuid == nil || uuidStr == "" { - return nil, errorUUIDNotSet - } - return t.creditManager.FetchCredits(uuidStr, t.service, operations) -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go deleted file mode 100644 index 196ed69cacae..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package throttler - -// Throttler is used to rate limits operations. For example, given how debug spans -// are always sampled, a throttler can be enabled per client to rate limit the amount -// of debug spans a client can start. -type Throttler interface { - // IsAllowed determines whether the operation should be allowed and not be - // throttled. - IsAllowed(operation string) bool -} - -// DefaultThrottler doesn't throttle at all. -type DefaultThrottler struct{} - -// IsAllowed implements Throttler#IsAllowed. -func (t DefaultThrottler) IsAllowed(operation string) bool { - return true -} diff --git a/vendor/github.com/uber/jaeger-client-go/interop.go b/vendor/github.com/uber/jaeger-client-go/interop.go deleted file mode 100644 index 8402d087c29f..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/interop.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "github.com/opentracing/opentracing-go" -) - -// TODO this file should not be needed after TChannel PR. - -type formatKey int - -// SpanContextFormat is a constant used as OpenTracing Format. -// Requires *SpanContext as carrier. -// This format is intended for interop with TChannel or other Zipkin-like tracers. -const SpanContextFormat formatKey = iota - -type jaegerTraceContextPropagator struct { - tracer *Tracer -} - -func (p *jaegerTraceContextPropagator) Inject( - ctx SpanContext, - abstractCarrier interface{}, -) error { - carrier, ok := abstractCarrier.(*SpanContext) - if !ok { - return opentracing.ErrInvalidCarrier - } - - carrier.CopyFrom(&ctx) - return nil -} - -func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { - carrier, ok := abstractCarrier.(*SpanContext) - if !ok { - return emptyContext, opentracing.ErrInvalidCarrier - } - ctx := new(SpanContext) - ctx.CopyFrom(carrier) - return *ctx, nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go deleted file mode 100644 index 868b2a5b5465..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "fmt" - - "github.com/opentracing/opentracing-go/log" - - j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" -) - -type tags []*j.Tag - -// ConvertLogsToJaegerTags converts log Fields into jaeger tags. -func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag { - fields := tags(make([]*j.Tag, 0, len(logFields))) - for _, field := range logFields { - field.Marshal(&fields) - } - return fields -} - -func (t *tags) EmitString(key, value string) { - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value}) -} - -func (t *tags) EmitBool(key string, value bool) { - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value}) -} - -func (t *tags) EmitInt(key string, value int) { - vLong := int64(value) - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) -} - -func (t *tags) EmitInt32(key string, value int32) { - vLong := int64(value) - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) -} - -func (t *tags) EmitInt64(key string, value int64) { - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value}) -} - -func (t *tags) EmitUint32(key string, value uint32) { - vLong := int64(value) - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) -} - -func (t *tags) EmitUint64(key string, value uint64) { - vLong := int64(value) - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) -} - -func (t *tags) EmitFloat32(key string, value float32) { - vDouble := float64(value) - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble}) -} - -func (t *tags) EmitFloat64(key string, value float64) { - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value}) -} - -func (t *tags) EmitObject(key string, value interface{}) { - vStr := fmt.Sprintf("%+v", value) - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr}) -} - -func (t *tags) EmitLazyLogger(value log.LazyLogger) { - value(t) -} diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go deleted file mode 100644 index 6ce1caf873d9..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "time" - - "github.com/opentracing/opentracing-go" - - j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" - "github.com/uber/jaeger-client-go/utils" -) - -// BuildJaegerThrift builds jaeger span based on internal span. -func BuildJaegerThrift(span *Span) *j.Span { - span.Lock() - defer span.Unlock() - startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime) - duration := span.duration.Nanoseconds() / int64(time.Microsecond) - jaegerSpan := &j.Span{ - TraceIdLow: int64(span.context.traceID.Low), - TraceIdHigh: int64(span.context.traceID.High), - SpanId: int64(span.context.spanID), - ParentSpanId: int64(span.context.parentID), - OperationName: span.operationName, - Flags: int32(span.context.flags), - StartTime: startTime, - Duration: duration, - Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength), - Logs: buildLogs(span.logs), - References: buildReferences(span.references), - } - return jaegerSpan -} - -// BuildJaegerProcessThrift creates a thrift Process type. -func BuildJaegerProcessThrift(span *Span) *j.Process { - span.Lock() - defer span.Unlock() - return buildJaegerProcessThrift(span.tracer) -} - -func buildJaegerProcessThrift(tracer *Tracer) *j.Process { - process := &j.Process{ - ServiceName: tracer.serviceName, - Tags: buildTags(tracer.tags, tracer.options.maxTagValueLength), - } - if tracer.process.UUID != "" { - process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING}) - } - return process -} - -func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag { - jTags := make([]*j.Tag, 0, len(tags)) - for _, tag := range tags { - jTag := buildTag(&tag, maxTagValueLength) - jTags = append(jTags, jTag) - } - return jTags -} - -func buildLogs(logs []opentracing.LogRecord) []*j.Log { - jLogs := make([]*j.Log, 0, len(logs)) - for _, log := range logs { - jLog := &j.Log{ - Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp), - Fields: ConvertLogsToJaegerTags(log.Fields), - } - jLogs = append(jLogs, jLog) - } - return jLogs -} - -func buildTag(tag *Tag, maxTagValueLength int) *j.Tag { - jTag := &j.Tag{Key: tag.key} - switch value := tag.value.(type) { - case string: - vStr := truncateString(value, maxTagValueLength) - jTag.VStr = &vStr - jTag.VType = j.TagType_STRING - case []byte: - if len(value) > maxTagValueLength { - value = value[:maxTagValueLength] - } - jTag.VBinary = value - jTag.VType = j.TagType_BINARY - case int: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case int8: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint8: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case int16: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint16: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case int32: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint32: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case int64: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint64: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case float32: - vDouble := float64(value) - jTag.VDouble = &vDouble - jTag.VType = j.TagType_DOUBLE - case float64: - vDouble := float64(value) - jTag.VDouble = &vDouble - jTag.VType = j.TagType_DOUBLE - case bool: - vBool := value - jTag.VBool = &vBool - jTag.VType = j.TagType_BOOL - default: - vStr := truncateString(stringify(value), maxTagValueLength) - jTag.VStr = &vStr - jTag.VType = j.TagType_STRING - } - return jTag -} - -func buildReferences(references []Reference) []*j.SpanRef { - retMe := make([]*j.SpanRef, 0, len(references)) - for _, ref := range references { - if ref.Type == opentracing.ChildOfRef { - retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF)) - } else if ref.Type == opentracing.FollowsFromRef { - retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM)) - } - } - return retMe -} - -func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef { - return &j.SpanRef{ - RefType: refType, - TraceIdLow: int64(ctx.traceID.Low), - TraceIdHigh: int64(ctx.traceID.High), - SpanId: int64(ctx.spanID), - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/log/logger.go b/vendor/github.com/uber/jaeger-client-go/log/logger.go deleted file mode 100644 index 894bb3dbf712..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/log/logger.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package log - -import ( - "bytes" - "fmt" - "log" - "sync" -) - -// Logger provides an abstract interface for logging from Reporters. -// Applications can provide their own implementation of this interface to adapt -// reporters logging to whatever logging library they prefer (stdlib log, -// logrus, go-logging, etc). -type Logger interface { - // Error logs a message at error priority - Error(msg string) - - // Infof logs a message at info priority - Infof(msg string, args ...interface{}) -} - -// StdLogger is implementation of the Logger interface that delegates to default `log` package -var StdLogger = &stdLogger{} - -type stdLogger struct{} - -func (l *stdLogger) Error(msg string) { - log.Printf("ERROR: %s", msg) -} - -// Infof logs a message at info priority -func (l *stdLogger) Infof(msg string, args ...interface{}) { - log.Printf(msg, args...) -} - -// NullLogger is implementation of the Logger interface that is no-op -var NullLogger = &nullLogger{} - -type nullLogger struct{} - -func (l *nullLogger) Error(msg string) {} -func (l *nullLogger) Infof(msg string, args ...interface{}) {} - -// BytesBufferLogger implements Logger backed by a bytes.Buffer. -type BytesBufferLogger struct { - mux sync.Mutex - buf bytes.Buffer -} - -// Error implements Logger. -func (l *BytesBufferLogger) Error(msg string) { - l.mux.Lock() - l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg)) - l.mux.Unlock() -} - -// Infof implements Logger. -func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) { - l.mux.Lock() - l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n") - l.mux.Unlock() -} - -// String returns string representation of the underlying buffer. -func (l *BytesBufferLogger) String() string { - l.mux.Lock() - defer l.mux.Unlock() - return l.buf.String() -} - -// Flush empties the underlying buffer. -func (l *BytesBufferLogger) Flush() { - l.mux.Lock() - defer l.mux.Unlock() - l.buf.Reset() -} diff --git a/vendor/github.com/uber/jaeger-client-go/logger.go b/vendor/github.com/uber/jaeger-client-go/logger.go deleted file mode 100644 index d4f0b501923d..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/logger.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import "log" - -// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead. - -// Logger provides an abstract interface for logging from Reporters. -// Applications can provide their own implementation of this interface to adapt -// reporters logging to whatever logging library they prefer (stdlib log, -// logrus, go-logging, etc). -type Logger interface { - // Error logs a message at error priority - Error(msg string) - - // Infof logs a message at info priority - Infof(msg string, args ...interface{}) -} - -// StdLogger is implementation of the Logger interface that delegates to default `log` package -var StdLogger = &stdLogger{} - -type stdLogger struct{} - -func (l *stdLogger) Error(msg string) { - log.Printf("ERROR: %s", msg) -} - -// Infof logs a message at info priority -func (l *stdLogger) Infof(msg string, args ...interface{}) { - log.Printf(msg, args...) -} - -// NullLogger is implementation of the Logger interface that delegates to default `log` package -var NullLogger = &nullLogger{} - -type nullLogger struct{} - -func (l *nullLogger) Error(msg string) {} -func (l *nullLogger) Infof(msg string, args ...interface{}) {} diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go deleted file mode 100644 index e56db9b732a4..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/metrics.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (c) 2017-2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "github.com/uber/jaeger-lib/metrics" -) - -// Metrics is a container of all stats emitted by Jaeger tracer. -type Metrics struct { - // Number of traces started by this tracer as sampled - TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y" help:"Number of traces started by this tracer as sampled"` - - // Number of traces started by this tracer as not sampled - TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"` - - // Number of externally started sampled traces this tracer joined - TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"` - - // Number of externally started not-sampled traces this tracer joined - TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"` - - // Number of sampled spans started by this tracer - SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of sampled spans started by this tracer"` - - // Number of unsampled spans started by this tracer - SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of unsampled spans started by this tracer"` - - // Number of spans finished by this tracer - SpansFinished metrics.Counter `metric:"finished_spans" help:"Number of spans finished by this tracer"` - - // Number of errors decoding tracing context - DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"` - - // Number of spans successfully reported - ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok" help:"Number of spans successfully reported"` - - // Number of spans not reported due to a Sender failure - ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err" help:"Number of spans not reported due to a Sender failure"` - - // Number of spans dropped due to internal queue overflow - ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped" help:"Number of spans dropped due to internal queue overflow"` - - // Current number of spans in the reporter queue - ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length" help:"Current number of spans in the reporter queue"` - - // Number of times the Sampler succeeded to retrieve sampling strategy - SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve sampling strategy"` - - // Number of times the Sampler failed to retrieve sampling strategy - SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err" help:"Number of times the Sampler failed to retrieve sampling strategy"` - - // Number of times the Sampler succeeded to retrieve and update sampling strategy - SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve and update sampling strategy"` - - // Number of times the Sampler failed to update sampling strategy - SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err" help:"Number of times the Sampler failed to update sampling strategy"` - - // Number of times baggage was successfully written or updated on spans. - BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok" help:"Number of times baggage was successfully written or updated on spans"` - - // Number of times baggage failed to write or update on spans. - BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err" help:"Number of times baggage failed to write or update on spans"` - - // Number of times baggage was truncated as per baggage restrictions. - BaggageTruncate metrics.Counter `metric:"baggage_truncations" help:"Number of times baggage was truncated as per baggage restrictions"` - - // Number of times baggage restrictions were successfully updated. - BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok" help:"Number of times baggage restrictions were successfully updated"` - - // Number of times baggage restrictions failed to update. - BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err" help:"Number of times baggage restrictions failed to update"` - - // Number of times debug spans were throttled. - ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans" help:"Number of times debug spans were throttled"` - - // Number of times throttler successfully updated. - ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok" help:"Number of times throttler successfully updated"` - - // Number of times throttler failed to update. - ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err" help:"Number of times throttler failed to update"` -} - -// NewMetrics creates a new Metrics struct and initializes it. -func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics { - m := &Metrics{} - // TODO the namespace "jaeger" should be configurable - metrics.MustInit(m, factory.Namespace(metrics.NSOptions{Name: "jaeger"}).Namespace(metrics.NSOptions{Name: "tracer"}), globalTags) - return m -} - -// NewNullMetrics creates a new Metrics struct that won't report any metrics. -func NewNullMetrics() *Metrics { - return NewMetrics(metrics.NullFactory, nil) -} diff --git a/vendor/github.com/uber/jaeger-client-go/observer.go b/vendor/github.com/uber/jaeger-client-go/observer.go deleted file mode 100644 index 7bbd028897a9..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/observer.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import opentracing "github.com/opentracing/opentracing-go" - -// Observer can be registered with the Tracer to receive notifications about -// new Spans. -// -// Deprecated: use jaeger.ContribObserver instead. -type Observer interface { - OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver -} - -// SpanObserver is created by the Observer and receives notifications about -// other Span events. -// -// Deprecated: use jaeger.ContribSpanObserver instead. -type SpanObserver interface { - OnSetOperationName(operationName string) - OnSetTag(key string, value interface{}) - OnFinish(options opentracing.FinishOptions) -} - -// compositeObserver is a dispatcher to other observers -type compositeObserver struct { - observers []ContribObserver -} - -// compositeSpanObserver is a dispatcher to other span observers -type compositeSpanObserver struct { - observers []ContribSpanObserver -} - -// noopSpanObserver is used when there are no observers registered -// on the Tracer or none of them returns span observers from OnStartSpan. -var noopSpanObserver = &compositeSpanObserver{} - -func (o *compositeObserver) append(contribObserver ContribObserver) { - o.observers = append(o.observers, contribObserver) -} - -func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver { - var spanObservers []ContribSpanObserver - for _, obs := range o.observers { - spanObs, ok := obs.OnStartSpan(sp, operationName, options) - if ok { - if spanObservers == nil { - spanObservers = make([]ContribSpanObserver, 0, len(o.observers)) - } - spanObservers = append(spanObservers, spanObs) - } - } - if len(spanObservers) == 0 { - return noopSpanObserver - } - return &compositeSpanObserver{observers: spanObservers} -} - -func (o *compositeSpanObserver) OnSetOperationName(operationName string) { - for _, obs := range o.observers { - obs.OnSetOperationName(operationName) - } -} - -func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) { - for _, obs := range o.observers { - obs.OnSetTag(key, value) - } -} - -func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) { - for _, obs := range o.observers { - obs.OnFinish(options) - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/process.go b/vendor/github.com/uber/jaeger-client-go/process.go deleted file mode 100644 index 30cbf99624c9..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/process.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -// Process holds process specific metadata that's relevant to this client. -type Process struct { - Service string - UUID string - Tags []Tag -} - -// ProcessSetter sets a process. This can be used by any class that requires -// the process to be set as part of initialization. -// See internal/throttler/remote/throttler.go for an example. -type ProcessSetter interface { - SetProcess(process Process) -} diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go deleted file mode 100644 index 5b50cfb7118c..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/propagation.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "log" - "net/url" - "strings" - "sync" - - opentracing "github.com/opentracing/opentracing-go" -) - -// Injector is responsible for injecting SpanContext instances in a manner suitable -// for propagation via a format-specific "carrier" object. Typically the -// injection will take place across an RPC boundary, but message queues and -// other IPC mechanisms are also reasonable places to use an Injector. -type Injector interface { - // Inject takes `SpanContext` and injects it into `carrier`. The actual type - // of `carrier` depends on the `format` passed to `Tracer.Inject()`. - // - // Implementations may return opentracing.ErrInvalidCarrier or any other - // implementation-specific error if injection fails. - Inject(ctx SpanContext, carrier interface{}) error -} - -// Extractor is responsible for extracting SpanContext instances from a -// format-specific "carrier" object. Typically the extraction will take place -// on the server side of an RPC boundary, but message queues and other IPC -// mechanisms are also reasonable places to use an Extractor. -type Extractor interface { - // Extract decodes a SpanContext instance from the given `carrier`, - // or (nil, opentracing.ErrSpanContextNotFound) if no context could - // be found in the `carrier`. - Extract(carrier interface{}) (SpanContext, error) -} - -// TextMapPropagator is a combined Injector and Extractor for TextMap format -type TextMapPropagator struct { - headerKeys *HeadersConfig - metrics Metrics - encodeValue func(string) string - decodeValue func(string) string -} - -// NewTextMapPropagator creates a combined Injector and Extractor for TextMap format -func NewTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator { - return &TextMapPropagator{ - headerKeys: headerKeys, - metrics: metrics, - encodeValue: func(val string) string { - return val - }, - decodeValue: func(val string) string { - return val - }, - } -} - -// NewHTTPHeaderPropagator creates a combined Injector and Extractor for HTTPHeaders format -func NewHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator { - return &TextMapPropagator{ - headerKeys: headerKeys, - metrics: metrics, - encodeValue: func(val string) string { - return url.QueryEscape(val) - }, - decodeValue: func(val string) string { - // ignore decoding errors, cannot do anything about them - if v, err := url.QueryUnescape(val); err == nil { - return v - } - return val - }, - } -} - -// BinaryPropagator is a combined Injector and Extractor for Binary format -type BinaryPropagator struct { - tracer *Tracer - buffers sync.Pool -} - -// NewBinaryPropagator creates a combined Injector and Extractor for Binary format -func NewBinaryPropagator(tracer *Tracer) *BinaryPropagator { - return &BinaryPropagator{ - tracer: tracer, - buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }}, - } -} - -// Inject implements Injector of TextMapPropagator -func (p *TextMapPropagator) Inject( - sc SpanContext, - abstractCarrier interface{}, -) error { - textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter) - if !ok { - return opentracing.ErrInvalidCarrier - } - - // Do not encode the string with trace context to avoid accidental double-encoding - // if people are using opentracing < 0.10.0. Our colon-separated representation - // of the trace context is already safe for HTTP headers. - textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String()) - for k, v := range sc.baggage { - safeKey := p.addBaggageKeyPrefix(k) - safeVal := p.encodeValue(v) - textMapWriter.Set(safeKey, safeVal) - } - return nil -} - -// Extract implements Extractor of TextMapPropagator -func (p *TextMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { - textMapReader, ok := abstractCarrier.(opentracing.TextMapReader) - if !ok { - return emptyContext, opentracing.ErrInvalidCarrier - } - var ctx SpanContext - var baggage map[string]string - err := textMapReader.ForeachKey(func(rawKey, value string) error { - key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap - if key == p.headerKeys.TraceContextHeaderName { - var err error - safeVal := p.decodeValue(value) - if ctx, err = ContextFromString(safeVal); err != nil { - return err - } - } else if key == p.headerKeys.JaegerDebugHeader { - ctx.debugID = p.decodeValue(value) - } else if key == p.headerKeys.JaegerBaggageHeader { - if baggage == nil { - baggage = make(map[string]string) - } - for k, v := range p.parseCommaSeparatedMap(value) { - baggage[k] = v - } - } else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) { - if baggage == nil { - baggage = make(map[string]string) - } - safeKey := p.removeBaggageKeyPrefix(key) - safeVal := p.decodeValue(value) - baggage[safeKey] = safeVal - } - return nil - }) - if err != nil { - p.metrics.DecodingErrors.Inc(1) - return emptyContext, err - } - if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 { - return emptyContext, opentracing.ErrSpanContextNotFound - } - ctx.baggage = baggage - return ctx, nil -} - -// Inject implements Injector of BinaryPropagator -func (p *BinaryPropagator) Inject( - sc SpanContext, - abstractCarrier interface{}, -) error { - carrier, ok := abstractCarrier.(io.Writer) - if !ok { - return opentracing.ErrInvalidCarrier - } - - // Handle the tracer context - if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil { - return err - } - if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil { - return err - } - if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil { - return err - } - if err := binary.Write(carrier, binary.BigEndian, sc.flags); err != nil { - return err - } - - // Handle the baggage items - if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil { - return err - } - for k, v := range sc.baggage { - if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil { - return err - } - io.WriteString(carrier, k) - if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil { - return err - } - io.WriteString(carrier, v) - } - - return nil -} - -// Extract implements Extractor of BinaryPropagator -func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { - carrier, ok := abstractCarrier.(io.Reader) - if !ok { - return emptyContext, opentracing.ErrInvalidCarrier - } - var ctx SpanContext - - if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - if err := binary.Read(carrier, binary.BigEndian, &ctx.flags); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - - // Handle the baggage items - var numBaggage int32 - if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - if iNumBaggage := int(numBaggage); iNumBaggage > 0 { - ctx.baggage = make(map[string]string, iNumBaggage) - buf := p.buffers.Get().(*bytes.Buffer) - defer p.buffers.Put(buf) - - var keyLen, valLen int32 - for i := 0; i < iNumBaggage; i++ { - if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - buf.Reset() - buf.Grow(int(keyLen)) - if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - key := buf.String() - - if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - buf.Reset() - buf.Grow(int(valLen)) - if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - ctx.baggage[key] = buf.String() - } - } - - return ctx, nil -} - -// Converts a comma separated key value pair list into a map -// e.g. key1=value1, key2=value2, key3 = value3 -// is converted to map[string]string { "key1" : "value1", -// "key2" : "value2", -// "key3" : "value3" } -func (p *TextMapPropagator) parseCommaSeparatedMap(value string) map[string]string { - baggage := make(map[string]string) - value, err := url.QueryUnescape(value) - if err != nil { - log.Printf("Unable to unescape %s, %v", value, err) - return baggage - } - for _, kvpair := range strings.Split(value, ",") { - kv := strings.Split(strings.TrimSpace(kvpair), "=") - if len(kv) == 2 { - baggage[kv[0]] = kv[1] - } else { - log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader) - } - } - return baggage -} - -// Converts a baggage item key into an http header format, -// by prepending TraceBaggageHeaderPrefix and encoding the key string -func (p *TextMapPropagator) addBaggageKeyPrefix(key string) string { - // TODO encodeBaggageKeyAsHeader add caching and escaping - return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key) -} - -func (p *TextMapPropagator) removeBaggageKeyPrefix(key string) string { - // TODO decodeBaggageHeaderKey add caching and escaping - return key[len(p.headerKeys.TraceBaggageHeaderPrefix):] -} diff --git a/vendor/github.com/uber/jaeger-client-go/reference.go b/vendor/github.com/uber/jaeger-client-go/reference.go deleted file mode 100644 index 5646e78bb2a8..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/reference.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import "github.com/opentracing/opentracing-go" - -// Reference represents a causal reference to other Spans (via their SpanContext). -type Reference struct { - Type opentracing.SpanReferenceType - Context SpanContext -} diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go deleted file mode 100644 index 27163ebe433a..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/reporter.go +++ /dev/null @@ -1,297 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/opentracing/opentracing-go" - - "github.com/uber/jaeger-client-go/log" -) - -// Reporter is called by the tracer when a span is completed to report the span to the tracing collector. -type Reporter interface { - // Report submits a new span to collectors, possibly asynchronously and/or with buffering. - Report(span *Span) - - // Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory. - Close() -} - -// ------------------------------ - -type nullReporter struct{} - -// NewNullReporter creates a no-op reporter that ignores all reported spans. -func NewNullReporter() Reporter { - return &nullReporter{} -} - -// Report implements Report() method of Reporter by doing nothing. -func (r *nullReporter) Report(span *Span) { - // no-op -} - -// Close implements Close() method of Reporter by doing nothing. -func (r *nullReporter) Close() { - // no-op -} - -// ------------------------------ - -type loggingReporter struct { - logger Logger -} - -// NewLoggingReporter creates a reporter that logs all reported spans to provided logger. -func NewLoggingReporter(logger Logger) Reporter { - return &loggingReporter{logger} -} - -// Report implements Report() method of Reporter by logging the span to the logger. -func (r *loggingReporter) Report(span *Span) { - r.logger.Infof("Reporting span %+v", span) -} - -// Close implements Close() method of Reporter by doing nothing. -func (r *loggingReporter) Close() { - // no-op -} - -// ------------------------------ - -// InMemoryReporter is used for testing, and simply collects spans in memory. -type InMemoryReporter struct { - spans []opentracing.Span - lock sync.Mutex -} - -// NewInMemoryReporter creates a reporter that stores spans in memory. -// NOTE: the Tracer should be created with options.PoolSpans = false. -func NewInMemoryReporter() *InMemoryReporter { - return &InMemoryReporter{ - spans: make([]opentracing.Span, 0, 10), - } -} - -// Report implements Report() method of Reporter by storing the span in the buffer. -func (r *InMemoryReporter) Report(span *Span) { - r.lock.Lock() - // Need to retain the span otherwise it will be released - r.spans = append(r.spans, span.Retain()) - r.lock.Unlock() -} - -// Close implements Close() method of Reporter -func (r *InMemoryReporter) Close() { - r.Reset() -} - -// SpansSubmitted returns the number of spans accumulated in the buffer. -func (r *InMemoryReporter) SpansSubmitted() int { - r.lock.Lock() - defer r.lock.Unlock() - return len(r.spans) -} - -// GetSpans returns accumulated spans as a copy of the buffer. -func (r *InMemoryReporter) GetSpans() []opentracing.Span { - r.lock.Lock() - defer r.lock.Unlock() - copied := make([]opentracing.Span, len(r.spans)) - copy(copied, r.spans) - return copied -} - -// Reset clears all accumulated spans. -func (r *InMemoryReporter) Reset() { - r.lock.Lock() - defer r.lock.Unlock() - - // Before reset the collection need to release Span memory - for _, span := range r.spans { - span.(*Span).Release() - } - r.spans = r.spans[:0] -} - -// ------------------------------ - -type compositeReporter struct { - reporters []Reporter -} - -// NewCompositeReporter creates a reporter that ignores all reported spans. -func NewCompositeReporter(reporters ...Reporter) Reporter { - return &compositeReporter{reporters: reporters} -} - -// Report implements Report() method of Reporter by delegating to each underlying reporter. -func (r *compositeReporter) Report(span *Span) { - for _, reporter := range r.reporters { - reporter.Report(span) - } -} - -// Close implements Close() method of Reporter by closing each underlying reporter. -func (r *compositeReporter) Close() { - for _, reporter := range r.reporters { - reporter.Close() - } -} - -// ------------- REMOTE REPORTER ----------------- - -type reporterQueueItemType int - -const ( - defaultQueueSize = 100 - defaultBufferFlushInterval = 1 * time.Second - - reporterQueueItemSpan reporterQueueItemType = iota - reporterQueueItemClose -) - -type reporterQueueItem struct { - itemType reporterQueueItemType - span *Span - close *sync.WaitGroup -} - -type remoteReporter struct { - // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. - // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq - queueLength int64 - closed int64 // 0 - not closed, 1 - closed - - reporterOptions - - sender Transport - queue chan reporterQueueItem -} - -// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender. -// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped). -// Periodically the transport buffer is flushed even if it hasn't reached max packet size. -// Calls to Close() block until all spans reported prior to the call to Close are flushed. -func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter { - options := reporterOptions{} - for _, option := range opts { - option(&options) - } - if options.bufferFlushInterval <= 0 { - options.bufferFlushInterval = defaultBufferFlushInterval - } - if options.logger == nil { - options.logger = log.NullLogger - } - if options.metrics == nil { - options.metrics = NewNullMetrics() - } - if options.queueSize <= 0 { - options.queueSize = defaultQueueSize - } - reporter := &remoteReporter{ - reporterOptions: options, - sender: sender, - queue: make(chan reporterQueueItem, options.queueSize), - } - go reporter.processQueue() - return reporter -} - -// Report implements Report() method of Reporter. -// It passes the span to a background go-routine for submission to Jaeger backend. -// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented. -// If Report() is called after the reporter has been Close()-ed, the additional spans will not be -// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly, -// because some of them may still be successfully added to the queue. -func (r *remoteReporter) Report(span *Span) { - select { - // Need to retain the span otherwise it will be released - case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span.Retain()}: - atomic.AddInt64(&r.queueLength, 1) - default: - r.metrics.ReporterDropped.Inc(1) - } -} - -// Close implements Close() method of Reporter by waiting for the queue to be drained. -func (r *remoteReporter) Close() { - if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped { - r.logger.Error("Repeated attempt to close the reporter is ignored") - return - } - r.sendCloseEvent() - r.sender.Close() -} - -func (r *remoteReporter) sendCloseEvent() { - wg := &sync.WaitGroup{} - wg.Add(1) - item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg} - - r.queue <- item // if the queue is full we will block until there is space - atomic.AddInt64(&r.queueLength, 1) - wg.Wait() -} - -// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer. -// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger. -// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped -// reporting new spans. -func (r *remoteReporter) processQueue() { - // flush causes the Sender to flush its accumulated spans and clear the buffer - flush := func() { - if flushed, err := r.sender.Flush(); err != nil { - r.metrics.ReporterFailure.Inc(int64(flushed)) - r.logger.Error(fmt.Sprintf("error when flushing the buffer: %s", err.Error())) - } else if flushed > 0 { - r.metrics.ReporterSuccess.Inc(int64(flushed)) - } - } - - timer := time.NewTicker(r.bufferFlushInterval) - for { - select { - case <-timer.C: - flush() - case item := <-r.queue: - atomic.AddInt64(&r.queueLength, -1) - switch item.itemType { - case reporterQueueItemSpan: - span := item.span - if flushed, err := r.sender.Append(span); err != nil { - r.metrics.ReporterFailure.Inc(int64(flushed)) - r.logger.Error(fmt.Sprintf("error reporting span %q: %s", span.OperationName(), err.Error())) - } else if flushed > 0 { - r.metrics.ReporterSuccess.Inc(int64(flushed)) - // to reduce the number of gauge stats, we only emit queue length on flush - r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength)) - } - span.Release() - case reporterQueueItemClose: - timer.Stop() - flush() - item.close.Done() - return - } - } - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/reporter_options.go b/vendor/github.com/uber/jaeger-client-go/reporter_options.go deleted file mode 100644 index 65012d7015dc..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/reporter_options.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "time" -) - -// ReporterOption is a function that sets some option on the reporter. -type ReporterOption func(c *reporterOptions) - -// ReporterOptions is a factory for all available ReporterOption's -var ReporterOptions reporterOptions - -// reporterOptions control behavior of the reporter. -type reporterOptions struct { - // queueSize is the size of internal queue where reported spans are stored before they are processed in the background - queueSize int - // bufferFlushInterval is how often the buffer is force-flushed, even if it's not full - bufferFlushInterval time.Duration - // logger is used to log errors of span submissions - logger Logger - // metrics is used to record runtime stats - metrics *Metrics -} - -// QueueSize creates a ReporterOption that sets the size of the internal queue where -// spans are stored before they are processed. -func (reporterOptions) QueueSize(queueSize int) ReporterOption { - return func(r *reporterOptions) { - r.queueSize = queueSize - } -} - -// Metrics creates a ReporterOption that initializes Metrics in the reporter, -// which is used to record runtime statistics. -func (reporterOptions) Metrics(metrics *Metrics) ReporterOption { - return func(r *reporterOptions) { - r.metrics = metrics - } -} - -// BufferFlushInterval creates a ReporterOption that sets how often the queue -// is force-flushed. -func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption { - return func(r *reporterOptions) { - r.bufferFlushInterval = bufferFlushInterval - } -} - -// Logger creates a ReporterOption that initializes the logger used to log -// errors of span submissions. -func (reporterOptions) Logger(logger Logger) ReporterOption { - return func(r *reporterOptions) { - r.logger = logger - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md deleted file mode 100644 index 879948e9c9cd..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md +++ /dev/null @@ -1,5 +0,0 @@ -An Observer that can be used to emit RPC metrics -================================================ - -It can be attached to the tracer during tracer construction. -See `ExampleObserver` function in [observer_test.go](./observer_test.go). diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go deleted file mode 100644 index 51aa11b350e5..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package rpcmetrics implements an Observer that can be used to emit RPC metrics. -package rpcmetrics diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go deleted file mode 100644 index 30555243d0c0..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcmetrics - -import "sync" - -// normalizedEndpoints is a cache for endpointName -> safeName mappings. -type normalizedEndpoints struct { - names map[string]string - maxSize int - defaultName string - normalizer NameNormalizer - mux sync.RWMutex -} - -func newNormalizedEndpoints(maxSize int, normalizer NameNormalizer) *normalizedEndpoints { - return &normalizedEndpoints{ - maxSize: maxSize, - normalizer: normalizer, - names: make(map[string]string, maxSize), - } -} - -// normalize looks up the name in the cache, if not found it uses normalizer -// to convert the name to a safe name. If called with more than maxSize unique -// names it returns "" for all other names beyond those already cached. -func (n *normalizedEndpoints) normalize(name string) string { - n.mux.RLock() - norm, ok := n.names[name] - l := len(n.names) - n.mux.RUnlock() - if ok { - return norm - } - if l >= n.maxSize { - return "" - } - return n.normalizeWithLock(name) -} - -func (n *normalizedEndpoints) normalizeWithLock(name string) string { - norm := n.normalizer.Normalize(name) - n.mux.Lock() - defer n.mux.Unlock() - // cache may have grown while we were not holding the lock - if len(n.names) >= n.maxSize { - return "" - } - n.names[name] = norm - return norm -} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go deleted file mode 100644 index a8cec2fa685b..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcmetrics - -import ( - "sync" - - "github.com/uber/jaeger-lib/metrics" -) - -const ( - otherEndpointsPlaceholder = "other" - endpointNameMetricTag = "endpoint" -) - -// Metrics is a collection of metrics for an endpoint describing -// throughput, success, errors, and performance. -type Metrics struct { - // RequestCountSuccess is a counter of the total number of successes. - RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"` - - // RequestCountFailures is a counter of the number of times any failure has been observed. - RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"` - - // RequestLatencySuccess is a latency histogram of successful requests. - RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"` - - // RequestLatencyFailures is a latency histogram of failed requests. - RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"` - - // HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299 - HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"` - - // HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399 - HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"` - - // HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499 - HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"` - - // HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599 - HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"` -} - -func (m *Metrics) recordHTTPStatusCode(statusCode uint16) { - if statusCode >= 200 && statusCode < 300 { - m.HTTPStatusCode2xx.Inc(1) - } else if statusCode >= 300 && statusCode < 400 { - m.HTTPStatusCode3xx.Inc(1) - } else if statusCode >= 400 && statusCode < 500 { - m.HTTPStatusCode4xx.Inc(1) - } else if statusCode >= 500 && statusCode < 600 { - m.HTTPStatusCode5xx.Inc(1) - } -} - -// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name. -// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped -// to a generic endpoint name "other". -type MetricsByEndpoint struct { - metricsFactory metrics.Factory - endpoints *normalizedEndpoints - metricsByEndpoint map[string]*Metrics - mux sync.RWMutex -} - -func newMetricsByEndpoint( - metricsFactory metrics.Factory, - normalizer NameNormalizer, - maxNumberOfEndpoints int, -) *MetricsByEndpoint { - return &MetricsByEndpoint{ - metricsFactory: metricsFactory, - endpoints: newNormalizedEndpoints(maxNumberOfEndpoints, normalizer), - metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other" - } -} - -func (m *MetricsByEndpoint) get(endpoint string) *Metrics { - safeName := m.endpoints.normalize(endpoint) - if safeName == "" { - safeName = otherEndpointsPlaceholder - } - m.mux.RLock() - met := m.metricsByEndpoint[safeName] - m.mux.RUnlock() - if met != nil { - return met - } - - return m.getWithWriteLock(safeName) -} - -// split to make easier to test -func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics { - m.mux.Lock() - defer m.mux.Unlock() - - // it is possible that the name has been already registered after we released - // the read lock and before we grabbed the write lock, so check for that. - if met, ok := m.metricsByEndpoint[safeName]; ok { - return met - } - - // it would be nice to create the struct before locking, since Init() is somewhat - // expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics. - met := &Metrics{} - tags := map[string]string{endpointNameMetricTag: safeName} - metrics.Init(met, m.metricsFactory, tags) - - m.metricsByEndpoint[safeName] = met - return met -} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go deleted file mode 100644 index 148d84b3a1a6..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcmetrics - -// NameNormalizer is used to convert the endpoint names to strings -// that can be safely used as tags in the metrics. -type NameNormalizer interface { - Normalize(name string) string -} - -// DefaultNameNormalizer converts endpoint names so that they contain only characters -// from the safe charset [a-zA-Z0-9-./_]. All other characters are replaced with '-'. -var DefaultNameNormalizer = &SimpleNameNormalizer{ - SafeSets: []SafeCharacterSet{ - &Range{From: 'a', To: 'z'}, - &Range{From: 'A', To: 'Z'}, - &Range{From: '0', To: '9'}, - &Char{'-'}, - &Char{'_'}, - &Char{'/'}, - &Char{'.'}, - }, - Replacement: '-', -} - -// SimpleNameNormalizer uses a set of safe character sets. -type SimpleNameNormalizer struct { - SafeSets []SafeCharacterSet - Replacement byte -} - -// SafeCharacterSet determines if the given character is "safe" -type SafeCharacterSet interface { - IsSafe(c byte) bool -} - -// Range implements SafeCharacterSet -type Range struct { - From, To byte -} - -// IsSafe implements SafeCharacterSet -func (r *Range) IsSafe(c byte) bool { - return c >= r.From && c <= r.To -} - -// Char implements SafeCharacterSet -type Char struct { - Val byte -} - -// IsSafe implements SafeCharacterSet -func (ch *Char) IsSafe(c byte) bool { - return c == ch.Val -} - -// Normalize checks each character in the string against SafeSets, -// and if it's not safe substitutes it with Replacement. -func (n *SimpleNameNormalizer) Normalize(name string) string { - var retMe []byte - nameBytes := []byte(name) - for i, b := range nameBytes { - if n.safeByte(b) { - if retMe != nil { - retMe[i] = b - } - } else { - if retMe == nil { - retMe = make([]byte, len(nameBytes)) - copy(retMe[0:i], nameBytes[0:i]) - } - retMe[i] = n.Replacement - } - } - if retMe == nil { - return name - } - return string(retMe) -} - -// safeByte checks if b against all safe charsets. -func (n *SimpleNameNormalizer) safeByte(b byte) bool { - for i := range n.SafeSets { - if n.SafeSets[i].IsSafe(b) { - return true - } - } - return false -} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go deleted file mode 100644 index eca5ff6f3b98..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcmetrics - -import ( - "strconv" - "sync" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/uber/jaeger-lib/metrics" - - jaeger "github.com/uber/jaeger-client-go" -) - -const defaultMaxNumberOfEndpoints = 200 - -// Observer is an observer that can emit RPC metrics. -type Observer struct { - metricsByEndpoint *MetricsByEndpoint -} - -// NewObserver creates a new observer that can emit RPC metrics. -func NewObserver(metricsFactory metrics.Factory, normalizer NameNormalizer) *Observer { - return &Observer{ - metricsByEndpoint: newMetricsByEndpoint( - metricsFactory, - normalizer, - defaultMaxNumberOfEndpoints, - ), - } -} - -// OnStartSpan creates a new Observer for the span. -func (o *Observer) OnStartSpan( - operationName string, - options opentracing.StartSpanOptions, -) jaeger.SpanObserver { - return NewSpanObserver(o.metricsByEndpoint, operationName, options) -} - -// SpanKind identifies the span as inboud, outbound, or internal -type SpanKind int - -const ( - // Local span kind - Local SpanKind = iota - // Inbound span kind - Inbound - // Outbound span kind - Outbound -) - -// SpanObserver collects RPC metrics -type SpanObserver struct { - metricsByEndpoint *MetricsByEndpoint - operationName string - startTime time.Time - mux sync.Mutex - kind SpanKind - httpStatusCode uint16 - err bool -} - -// NewSpanObserver creates a new SpanObserver that can emit RPC metrics. -func NewSpanObserver( - metricsByEndpoint *MetricsByEndpoint, - operationName string, - options opentracing.StartSpanOptions, -) *SpanObserver { - so := &SpanObserver{ - metricsByEndpoint: metricsByEndpoint, - operationName: operationName, - startTime: options.StartTime, - } - for k, v := range options.Tags { - so.handleTagInLock(k, v) - } - return so -} - -// handleTags watches for special tags -// - SpanKind -// - HttpStatusCode -// - Error -func (so *SpanObserver) handleTagInLock(key string, value interface{}) { - if key == string(ext.SpanKind) { - if v, ok := value.(ext.SpanKindEnum); ok { - value = string(v) - } - if v, ok := value.(string); ok { - if v == string(ext.SpanKindRPCClientEnum) { - so.kind = Outbound - } else if v == string(ext.SpanKindRPCServerEnum) { - so.kind = Inbound - } - } - return - } - if key == string(ext.HTTPStatusCode) { - if v, ok := value.(uint16); ok { - so.httpStatusCode = v - } else if v, ok := value.(int); ok { - so.httpStatusCode = uint16(v) - } else if v, ok := value.(string); ok { - if vv, err := strconv.Atoi(v); err == nil { - so.httpStatusCode = uint16(vv) - } - } - return - } - if key == string(ext.Error) { - if v, ok := value.(bool); ok { - so.err = v - } else if v, ok := value.(string); ok { - if vv, err := strconv.ParseBool(v); err == nil { - so.err = vv - } - } - return - } -} - -// OnFinish emits the RPC metrics. It only has an effect when operation name -// is not blank, and the span kind is an RPC server. -func (so *SpanObserver) OnFinish(options opentracing.FinishOptions) { - so.mux.Lock() - defer so.mux.Unlock() - - if so.operationName == "" || so.kind != Inbound { - return - } - - mets := so.metricsByEndpoint.get(so.operationName) - latency := options.FinishTime.Sub(so.startTime) - if so.err { - mets.RequestCountFailures.Inc(1) - mets.RequestLatencyFailures.Record(latency) - } else { - mets.RequestCountSuccess.Inc(1) - mets.RequestLatencySuccess.Record(latency) - } - mets.recordHTTPStatusCode(so.httpStatusCode) -} - -// OnSetOperationName records new operation name. -func (so *SpanObserver) OnSetOperationName(operationName string) { - so.mux.Lock() - so.operationName = operationName - so.mux.Unlock() -} - -// OnSetTag implements SpanObserver -func (so *SpanObserver) OnSetTag(key string, value interface{}) { - so.mux.Lock() - so.handleTagInLock(key, value) - so.mux.Unlock() -} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go deleted file mode 100644 index 3e1630953b90..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/sampler.go +++ /dev/null @@ -1,557 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "fmt" - "math" - "net/url" - "sync" - "sync/atomic" - "time" - - "github.com/uber/jaeger-client-go/log" - "github.com/uber/jaeger-client-go/thrift-gen/sampling" - "github.com/uber/jaeger-client-go/utils" -) - -const ( - defaultSamplingServerURL = "http://localhost:5778/sampling" - defaultSamplingRefreshInterval = time.Minute - defaultMaxOperations = 2000 -) - -// Sampler decides whether a new trace should be sampled or not. -type Sampler interface { - // IsSampled decides whether a trace with given `id` and `operation` - // should be sampled. This function will also return the tags that - // can be used to identify the type of sampling that was applied to - // the root span. Most simple samplers would return two tags, - // sampler.type and sampler.param, similar to those used in the Configuration - IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) - - // Close does a clean shutdown of the sampler, stopping any background - // go-routines it may have started. - Close() - - // Equal checks if the `other` sampler is functionally equivalent - // to this sampler. - // TODO remove this function. This function is used to determine if 2 samplers are equivalent - // which does not bode well with the adaptive sampler which has to create all the composite samplers - // for the comparison to occur. This is expensive to do if only one sampler has changed. - Equal(other Sampler) bool -} - -// ----------------------- - -// ConstSampler is a sampler that always makes the same decision. -type ConstSampler struct { - Decision bool - tags []Tag -} - -// NewConstSampler creates a ConstSampler. -func NewConstSampler(sample bool) Sampler { - tags := []Tag{ - {key: SamplerTypeTagKey, value: SamplerTypeConst}, - {key: SamplerParamTagKey, value: sample}, - } - return &ConstSampler{Decision: sample, tags: tags} -} - -// IsSampled implements IsSampled() of Sampler. -func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - return s.Decision, s.tags -} - -// Close implements Close() of Sampler. -func (s *ConstSampler) Close() { - // nothing to do -} - -// Equal implements Equal() of Sampler. -func (s *ConstSampler) Equal(other Sampler) bool { - if o, ok := other.(*ConstSampler); ok { - return s.Decision == o.Decision - } - return false -} - -// ----------------------- - -// ProbabilisticSampler is a sampler that randomly samples a certain percentage -// of traces. -type ProbabilisticSampler struct { - samplingRate float64 - samplingBoundary uint64 - tags []Tag -} - -const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff - -// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the -// samplingRate, in the range between 0.0 and 1.0. -// -// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision -// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63). -// TODO remove the error from this function for next major release -func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) { - if samplingRate < 0.0 || samplingRate > 1.0 { - return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate) - } - return newProbabilisticSampler(samplingRate), nil -} - -func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler { - samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0)) - tags := []Tag{ - {key: SamplerTypeTagKey, value: SamplerTypeProbabilistic}, - {key: SamplerParamTagKey, value: samplingRate}, - } - return &ProbabilisticSampler{ - samplingRate: samplingRate, - samplingBoundary: uint64(float64(maxRandomNumber) * samplingRate), - tags: tags, - } -} - -// SamplingRate returns the sampling probability this sampled was constructed with. -func (s *ProbabilisticSampler) SamplingRate() float64 { - return s.samplingRate -} - -// IsSampled implements IsSampled() of Sampler. -func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - return s.samplingBoundary >= id.Low, s.tags -} - -// Close implements Close() of Sampler. -func (s *ProbabilisticSampler) Close() { - // nothing to do -} - -// Equal implements Equal() of Sampler. -func (s *ProbabilisticSampler) Equal(other Sampler) bool { - if o, ok := other.(*ProbabilisticSampler); ok { - return s.samplingBoundary == o.samplingBoundary - } - return false -} - -// ----------------------- - -type rateLimitingSampler struct { - maxTracesPerSecond float64 - rateLimiter utils.RateLimiter - tags []Tag -} - -// NewRateLimitingSampler creates a sampler that samples at most maxTracesPerSecond. The distribution of sampled -// traces follows burstiness of the service, i.e. a service with uniformly distributed requests will have those -// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a number of -// sequential requests can be sampled each second. -func NewRateLimitingSampler(maxTracesPerSecond float64) Sampler { - tags := []Tag{ - {key: SamplerTypeTagKey, value: SamplerTypeRateLimiting}, - {key: SamplerParamTagKey, value: maxTracesPerSecond}, - } - return &rateLimitingSampler{ - maxTracesPerSecond: maxTracesPerSecond, - rateLimiter: utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)), - tags: tags, - } -} - -// IsSampled implements IsSampled() of Sampler. -func (s *rateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - return s.rateLimiter.CheckCredit(1.0), s.tags -} - -func (s *rateLimitingSampler) Close() { - // nothing to do -} - -func (s *rateLimitingSampler) Equal(other Sampler) bool { - if o, ok := other.(*rateLimitingSampler); ok { - return s.maxTracesPerSecond == o.maxTracesPerSecond - } - return false -} - -// ----------------------- - -// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both probabilisticSampler and -// rateLimitingSampler. The rateLimitingSampler is used as a guaranteed lower bound sampler such that -// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound -// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes. -// -// The probabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both -// samplers return true, the tags for probabilisticSampler will be used. -type GuaranteedThroughputProbabilisticSampler struct { - probabilisticSampler *ProbabilisticSampler - lowerBoundSampler Sampler - tags []Tag - samplingRate float64 - lowerBound float64 -} - -// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both -// probabilisticSampler and rateLimitingSampler. -func NewGuaranteedThroughputProbabilisticSampler( - lowerBound, samplingRate float64, -) (*GuaranteedThroughputProbabilisticSampler, error) { - return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil -} - -func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler { - s := &GuaranteedThroughputProbabilisticSampler{ - lowerBoundSampler: NewRateLimitingSampler(lowerBound), - lowerBound: lowerBound, - } - s.setProbabilisticSampler(samplingRate) - return s -} - -func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) { - if s.probabilisticSampler == nil || s.samplingRate != samplingRate { - s.probabilisticSampler = newProbabilisticSampler(samplingRate) - s.samplingRate = s.probabilisticSampler.SamplingRate() - s.tags = []Tag{ - {key: SamplerTypeTagKey, value: SamplerTypeLowerBound}, - {key: SamplerParamTagKey, value: s.samplingRate}, - } - } -} - -// IsSampled implements IsSampled() of Sampler. -func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled { - s.lowerBoundSampler.IsSampled(id, operation) - return true, tags - } - sampled, _ := s.lowerBoundSampler.IsSampled(id, operation) - return sampled, s.tags -} - -// Close implements Close() of Sampler. -func (s *GuaranteedThroughputProbabilisticSampler) Close() { - s.probabilisticSampler.Close() - s.lowerBoundSampler.Close() -} - -// Equal implements Equal() of Sampler. -func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool { - // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for - // more information. - return false -} - -// this function should only be called while holding a Write lock -func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) { - s.setProbabilisticSampler(samplingRate) - if s.lowerBound != lowerBound { - s.lowerBoundSampler = NewRateLimitingSampler(lowerBound) - s.lowerBound = lowerBound - } -} - -// ----------------------- - -type adaptiveSampler struct { - sync.RWMutex - - samplers map[string]*GuaranteedThroughputProbabilisticSampler - defaultSampler *ProbabilisticSampler - lowerBound float64 - maxOperations int -} - -// NewAdaptiveSampler returns a delegating sampler that applies both probabilisticSampler and -// rateLimitingSampler via the guaranteedThroughputProbabilisticSampler. This sampler keeps track of all -// operations and delegates calls to the respective guaranteedThroughputProbabilisticSampler. -func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (Sampler, error) { - return newAdaptiveSampler(strategies, maxOperations), nil -} - -func newAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) Sampler { - samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler) - for _, strategy := range strategies.PerOperationStrategies { - sampler := newGuaranteedThroughputProbabilisticSampler( - strategies.DefaultLowerBoundTracesPerSecond, - strategy.ProbabilisticSampling.SamplingRate, - ) - samplers[strategy.Operation] = sampler - } - return &adaptiveSampler{ - samplers: samplers, - defaultSampler: newProbabilisticSampler(strategies.DefaultSamplingProbability), - lowerBound: strategies.DefaultLowerBoundTracesPerSecond, - maxOperations: maxOperations, - } -} - -func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - s.RLock() - sampler, ok := s.samplers[operation] - if ok { - defer s.RUnlock() - return sampler.IsSampled(id, operation) - } - s.RUnlock() - s.Lock() - defer s.Unlock() - - // Check if sampler has already been created - sampler, ok = s.samplers[operation] - if ok { - return sampler.IsSampled(id, operation) - } - // Store only up to maxOperations of unique ops. - if len(s.samplers) >= s.maxOperations { - return s.defaultSampler.IsSampled(id, operation) - } - newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate()) - s.samplers[operation] = newSampler - return newSampler.IsSampled(id, operation) -} - -func (s *adaptiveSampler) Close() { - s.Lock() - defer s.Unlock() - for _, sampler := range s.samplers { - sampler.Close() - } - s.defaultSampler.Close() -} - -func (s *adaptiveSampler) Equal(other Sampler) bool { - // NB The Equal() function is overly expensive for adaptiveSampler since it's composed of multiple - // samplers which all need to be initialized before this function can be called for a comparison. - // Therefore, adaptiveSampler uses the update() function to only alter the samplers that need - // changing. Hence this function always returns false so that the update function can be called. - // Once the Equal() function is removed from the Sampler API, this will no longer be needed. - return false -} - -func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrategies) { - s.Lock() - defer s.Unlock() - for _, strategy := range strategies.PerOperationStrategies { - operation := strategy.Operation - samplingRate := strategy.ProbabilisticSampling.SamplingRate - lowerBound := strategies.DefaultLowerBoundTracesPerSecond - if sampler, ok := s.samplers[operation]; ok { - sampler.update(lowerBound, samplingRate) - } else { - sampler := newGuaranteedThroughputProbabilisticSampler( - lowerBound, - samplingRate, - ) - s.samplers[operation] = sampler - } - } - s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond - if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability { - s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability) - } -} - -// ----------------------- - -// RemotelyControlledSampler is a delegating sampler that polls a remote server -// for the appropriate sampling strategy, constructs a corresponding sampler and -// delegates to it for sampling decisions. -type RemotelyControlledSampler struct { - // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. - // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq - closed int64 // 0 - not closed, 1 - closed - - sync.RWMutex - samplerOptions - - serviceName string - manager sampling.SamplingManager - doneChan chan *sync.WaitGroup -} - -type httpSamplingManager struct { - serverURL string -} - -func (s *httpSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) { - var out sampling.SamplingStrategyResponse - v := url.Values{} - v.Set("service", serviceName) - if err := utils.GetJSON(s.serverURL+"?"+v.Encode(), &out); err != nil { - return nil, err - } - return &out, nil -} - -// NewRemotelyControlledSampler creates a sampler that periodically pulls -// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent). -func NewRemotelyControlledSampler( - serviceName string, - opts ...SamplerOption, -) *RemotelyControlledSampler { - options := applySamplerOptions(opts...) - sampler := &RemotelyControlledSampler{ - samplerOptions: options, - serviceName: serviceName, - manager: &httpSamplingManager{serverURL: options.samplingServerURL}, - doneChan: make(chan *sync.WaitGroup), - } - go sampler.pollController() - return sampler -} - -func applySamplerOptions(opts ...SamplerOption) samplerOptions { - options := samplerOptions{} - for _, option := range opts { - option(&options) - } - if options.sampler == nil { - options.sampler = newProbabilisticSampler(0.001) - } - if options.logger == nil { - options.logger = log.NullLogger - } - if options.maxOperations <= 0 { - options.maxOperations = defaultMaxOperations - } - if options.samplingServerURL == "" { - options.samplingServerURL = defaultSamplingServerURL - } - if options.metrics == nil { - options.metrics = NewNullMetrics() - } - if options.samplingRefreshInterval <= 0 { - options.samplingRefreshInterval = defaultSamplingRefreshInterval - } - return options -} - -// IsSampled implements IsSampled() of Sampler. -func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - s.RLock() - defer s.RUnlock() - return s.sampler.IsSampled(id, operation) -} - -// Close implements Close() of Sampler. -func (s *RemotelyControlledSampler) Close() { - if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped { - s.logger.Error("Repeated attempt to close the sampler is ignored") - return - } - - var wg sync.WaitGroup - wg.Add(1) - s.doneChan <- &wg - wg.Wait() -} - -// Equal implements Equal() of Sampler. -func (s *RemotelyControlledSampler) Equal(other Sampler) bool { - // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for - // more information. - if o, ok := other.(*RemotelyControlledSampler); ok { - s.RLock() - o.RLock() - defer s.RUnlock() - defer o.RUnlock() - return s.sampler.Equal(o.sampler) - } - return false -} - -func (s *RemotelyControlledSampler) pollController() { - ticker := time.NewTicker(s.samplingRefreshInterval) - defer ticker.Stop() - s.pollControllerWithTicker(ticker) -} - -func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) { - for { - select { - case <-ticker.C: - s.updateSampler() - case wg := <-s.doneChan: - wg.Done() - return - } - } -} - -func (s *RemotelyControlledSampler) getSampler() Sampler { - s.Lock() - defer s.Unlock() - return s.sampler -} - -func (s *RemotelyControlledSampler) setSampler(sampler Sampler) { - s.Lock() - defer s.Unlock() - s.sampler = sampler -} - -func (s *RemotelyControlledSampler) updateSampler() { - res, err := s.manager.GetSamplingStrategy(s.serviceName) - if err != nil { - s.metrics.SamplerQueryFailure.Inc(1) - s.logger.Infof("Unable to query sampling strategy: %v", err) - return - } - s.Lock() - defer s.Unlock() - - s.metrics.SamplerRetrieved.Inc(1) - if strategies := res.GetOperationSampling(); strategies != nil { - s.updateAdaptiveSampler(strategies) - } else { - err = s.updateRateLimitingOrProbabilisticSampler(res) - } - if err != nil { - s.metrics.SamplerUpdateFailure.Inc(1) - s.logger.Infof("Unable to handle sampling strategy response %+v. Got error: %v", res, err) - return - } - s.metrics.SamplerUpdated.Inc(1) -} - -// NB: this function should only be called while holding a Write lock -func (s *RemotelyControlledSampler) updateAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies) { - if adaptiveSampler, ok := s.sampler.(*adaptiveSampler); ok { - adaptiveSampler.update(strategies) - } else { - s.sampler = newAdaptiveSampler(strategies, s.maxOperations) - } -} - -// NB: this function should only be called while holding a Write lock -func (s *RemotelyControlledSampler) updateRateLimitingOrProbabilisticSampler(res *sampling.SamplingStrategyResponse) error { - var newSampler Sampler - if probabilistic := res.GetProbabilisticSampling(); probabilistic != nil { - newSampler = newProbabilisticSampler(probabilistic.SamplingRate) - } else if rateLimiting := res.GetRateLimitingSampling(); rateLimiting != nil { - newSampler = NewRateLimitingSampler(float64(rateLimiting.MaxTracesPerSecond)) - } else { - return fmt.Errorf("Unsupported sampling strategy type %v", res.GetStrategyType()) - } - if !s.sampler.Equal(newSampler) { - s.sampler = newSampler - } - return nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_options.go deleted file mode 100644 index 75d28a561190..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/sampler_options.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "time" -) - -// SamplerOption is a function that sets some option on the sampler -type SamplerOption func(options *samplerOptions) - -// SamplerOptions is a factory for all available SamplerOption's -var SamplerOptions samplerOptions - -type samplerOptions struct { - metrics *Metrics - maxOperations int - sampler Sampler - logger Logger - samplingServerURL string - samplingRefreshInterval time.Duration -} - -// Metrics creates a SamplerOption that initializes Metrics on the sampler, -// which is used to emit statistics. -func (samplerOptions) Metrics(m *Metrics) SamplerOption { - return func(o *samplerOptions) { - o.metrics = m - } -} - -// MaxOperations creates a SamplerOption that sets the maximum number of -// operations the sampler will keep track of. -func (samplerOptions) MaxOperations(maxOperations int) SamplerOption { - return func(o *samplerOptions) { - o.maxOperations = maxOperations - } -} - -// InitialSampler creates a SamplerOption that sets the initial sampler -// to use before a remote sampler is created and used. -func (samplerOptions) InitialSampler(sampler Sampler) SamplerOption { - return func(o *samplerOptions) { - o.sampler = sampler - } -} - -// Logger creates a SamplerOption that sets the logger used by the sampler. -func (samplerOptions) Logger(logger Logger) SamplerOption { - return func(o *samplerOptions) { - o.logger = logger - } -} - -// SamplingServerURL creates a SamplerOption that sets the sampling server url -// of the local agent that contains the sampling strategies. -func (samplerOptions) SamplingServerURL(samplingServerURL string) SamplerOption { - return func(o *samplerOptions) { - o.samplingServerURL = samplingServerURL - } -} - -// SamplingRefreshInterval creates a SamplerOption that sets how often the -// sampler will poll local agent for the appropriate sampling strategy. -func (samplerOptions) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption { - return func(o *samplerOptions) { - o.samplingRefreshInterval = samplingRefreshInterval - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go deleted file mode 100644 index 89195f14e08a..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/span.go +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright (c) 2017-2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "sync" - "sync/atomic" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" -) - -// Span implements opentracing.Span -type Span struct { - // referenceCounter used to increase the lifetime of - // the object before return it into the pool. - referenceCounter int32 - - sync.RWMutex - - tracer *Tracer - - context SpanContext - - // The name of the "operation" this span is an instance of. - // Known as a "span name" in some implementations. - operationName string - - // firstInProcess, if true, indicates that this span is the root of the (sub)tree - // of spans in the current process. In other words it's true for the root spans, - // and the ingress spans when the process joins another trace. - firstInProcess bool - - // startTime is the timestamp indicating when the span began, with microseconds precision. - startTime time.Time - - // duration returns duration of the span with microseconds precision. - // Zero value means duration is unknown. - duration time.Duration - - // tags attached to this span - tags []Tag - - // The span's "micro-log" - logs []opentracing.LogRecord - - // references for this span - references []Reference - - observer ContribSpanObserver -} - -// Tag is a simple key value wrapper. -// TODO deprecate in the next major release, use opentracing.Tag instead. -type Tag struct { - key string - value interface{} -} - -// SetOperationName sets or changes the operation name. -func (s *Span) SetOperationName(operationName string) opentracing.Span { - s.Lock() - defer s.Unlock() - if s.context.IsSampled() { - s.operationName = operationName - } - s.observer.OnSetOperationName(operationName) - return s -} - -// SetTag implements SetTag() of opentracing.Span -func (s *Span) SetTag(key string, value interface{}) opentracing.Span { - s.observer.OnSetTag(key, value) - if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) { - return s - } - s.Lock() - defer s.Unlock() - if s.context.IsSampled() { - s.setTagNoLocking(key, value) - } - return s -} - -func (s *Span) setTagNoLocking(key string, value interface{}) { - s.tags = append(s.tags, Tag{key: key, value: value}) -} - -// LogFields implements opentracing.Span API -func (s *Span) LogFields(fields ...log.Field) { - s.Lock() - defer s.Unlock() - if !s.context.IsSampled() { - return - } - s.logFieldsNoLocking(fields...) -} - -// this function should only be called while holding a Write lock -func (s *Span) logFieldsNoLocking(fields ...log.Field) { - lr := opentracing.LogRecord{ - Fields: fields, - Timestamp: time.Now(), - } - s.appendLog(lr) -} - -// LogKV implements opentracing.Span API -func (s *Span) LogKV(alternatingKeyValues ...interface{}) { - s.RLock() - sampled := s.context.IsSampled() - s.RUnlock() - if !sampled { - return - } - fields, err := log.InterleavedKVToFields(alternatingKeyValues...) - if err != nil { - s.LogFields(log.Error(err), log.String("function", "LogKV")) - return - } - s.LogFields(fields...) -} - -// LogEvent implements opentracing.Span API -func (s *Span) LogEvent(event string) { - s.Log(opentracing.LogData{Event: event}) -} - -// LogEventWithPayload implements opentracing.Span API -func (s *Span) LogEventWithPayload(event string, payload interface{}) { - s.Log(opentracing.LogData{Event: event, Payload: payload}) -} - -// Log implements opentracing.Span API -func (s *Span) Log(ld opentracing.LogData) { - s.Lock() - defer s.Unlock() - if s.context.IsSampled() { - if ld.Timestamp.IsZero() { - ld.Timestamp = s.tracer.timeNow() - } - s.appendLog(ld.ToLogRecord()) - } -} - -// this function should only be called while holding a Write lock -func (s *Span) appendLog(lr opentracing.LogRecord) { - // TODO add logic to limit number of logs per span (issue #46) - s.logs = append(s.logs, lr) -} - -// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext -func (s *Span) SetBaggageItem(key, value string) opentracing.Span { - s.Lock() - defer s.Unlock() - s.tracer.setBaggage(s, key, value) - return s -} - -// BaggageItem implements BaggageItem() of opentracing.SpanContext -func (s *Span) BaggageItem(key string) string { - s.RLock() - defer s.RUnlock() - return s.context.baggage[key] -} - -// Finish implements opentracing.Span API -// After finishing the Span object it returns back to the allocator unless the reporter retains it again, -// so after that, the Span object should no longer be used because it won't be valid anymore. -func (s *Span) Finish() { - s.FinishWithOptions(opentracing.FinishOptions{}) -} - -// FinishWithOptions implements opentracing.Span API -func (s *Span) FinishWithOptions(options opentracing.FinishOptions) { - if options.FinishTime.IsZero() { - options.FinishTime = s.tracer.timeNow() - } - s.observer.OnFinish(options) - s.Lock() - if s.context.IsSampled() { - s.duration = options.FinishTime.Sub(s.startTime) - // Note: bulk logs are not subject to maxLogsPerSpan limit - if options.LogRecords != nil { - s.logs = append(s.logs, options.LogRecords...) - } - for _, ld := range options.BulkLogData { - s.logs = append(s.logs, ld.ToLogRecord()) - } - } - s.Unlock() - // call reportSpan even for non-sampled traces, to return span to the pool - // and update metrics counter - s.tracer.reportSpan(s) -} - -// Context implements opentracing.Span API -func (s *Span) Context() opentracing.SpanContext { - s.Lock() - defer s.Unlock() - return s.context -} - -// Tracer implements opentracing.Span API -func (s *Span) Tracer() opentracing.Tracer { - return s.tracer -} - -func (s *Span) String() string { - s.RLock() - defer s.RUnlock() - return s.context.String() -} - -// OperationName allows retrieving current operation name. -func (s *Span) OperationName() string { - s.RLock() - defer s.RUnlock() - return s.operationName -} - -// Retain increases object counter to increase the lifetime of the object -func (s *Span) Retain() *Span { - atomic.AddInt32(&s.referenceCounter, 1) - return s -} - -// Release decrements object counter and return to the -// allocator manager when counter will below zero -func (s *Span) Release() { - if atomic.AddInt32(&s.referenceCounter, -1) == -1 { - s.tracer.spanAllocator.Put(s) - } -} - -// reset span state and release unused data -func (s *Span) reset() { - s.firstInProcess = false - s.context = emptyContext - s.operationName = "" - s.tracer = nil - s.startTime = time.Time{} - s.duration = 0 - s.observer = nil - atomic.StoreInt32(&s.referenceCounter, 0) - - // Note: To reuse memory we can save the pointers on the heap - s.tags = s.tags[:0] - s.logs = s.logs[:0] - s.references = s.references[:0] -} - -func (s *Span) serviceName() string { - return s.tracer.serviceName -} - -// setSamplingPriority returns true if the flag was updated successfully, false otherwise. -func setSamplingPriority(s *Span, value interface{}) bool { - val, ok := value.(uint16) - if !ok { - return false - } - s.Lock() - defer s.Unlock() - if val == 0 { - s.context.flags = s.context.flags & (^flagSampled) - return true - } - if s.tracer.isDebugAllowed(s.operationName) { - s.context.flags = s.context.flags | flagDebug | flagSampled - return true - } - return false -} diff --git a/vendor/github.com/uber/jaeger-client-go/span_allocator.go b/vendor/github.com/uber/jaeger-client-go/span_allocator.go deleted file mode 100644 index 6fe0cd0ce1eb..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/span_allocator.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import "sync" - -// SpanAllocator abstraction of managign span allocations -type SpanAllocator interface { - Get() *Span - Put(*Span) -} - -type syncPollSpanAllocator struct { - spanPool sync.Pool -} - -func newSyncPollSpanAllocator() SpanAllocator { - return &syncPollSpanAllocator{ - spanPool: sync.Pool{New: func() interface{} { - return &Span{} - }}, - } -} - -func (pool *syncPollSpanAllocator) Get() *Span { - return pool.spanPool.Get().(*Span) -} - -func (pool *syncPollSpanAllocator) Put(span *Span) { - span.reset() - pool.spanPool.Put(span) -} - -type simpleSpanAllocator struct{} - -func (pool simpleSpanAllocator) Get() *Span { - return &Span{} -} - -func (pool simpleSpanAllocator) Put(span *Span) { - // @comment https://github.com/jaegertracing/jaeger-client-go/pull/381#issuecomment-475904351 - // since finished spans are not reused, no need to reset them - // span.reset() -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go deleted file mode 100644 index e48811c500af..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go +++ /dev/null @@ -1,411 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package agent - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" - "github.com/uber/jaeger-client-go/thrift-gen/jaeger" - "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var _ = jaeger.GoUnusedProtection__ -var _ = zipkincore.GoUnusedProtection__ - -type Agent interface { - // Parameters: - // - Spans - EmitZipkinBatch(spans []*zipkincore.Span) (err error) - // Parameters: - // - Batch - EmitBatch(batch *jaeger.Batch) (err error) -} - -type AgentClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { - return &AgentClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { - return &AgentClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// Parameters: -// - Spans -func (p *AgentClient) EmitZipkinBatch(spans []*zipkincore.Span) (err error) { - if err = p.sendEmitZipkinBatch(spans); err != nil { - return - } - return -} - -func (p *AgentClient) sendEmitZipkinBatch(spans []*zipkincore.Span) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("emitZipkinBatch", thrift.ONEWAY, p.SeqId); err != nil { - return - } - args := AgentEmitZipkinBatchArgs{ - Spans: spans, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -// Parameters: -// - Batch -func (p *AgentClient) EmitBatch(batch *jaeger.Batch) (err error) { - if err = p.sendEmitBatch(batch); err != nil { - return - } - return -} - -func (p *AgentClient) sendEmitBatch(batch *jaeger.Batch) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil { - return - } - args := AgentEmitBatchArgs{ - Batch: batch, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -type AgentProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Agent -} - -func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewAgentProcessor(handler Agent) *AgentProcessor { - - self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self0.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler} - self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} - return self0 -} - -func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x1.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x1 - -} - -type agentProcessorEmitZipkinBatch struct { - handler Agent -} - -func (p *agentProcessorEmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitZipkinBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err - } - - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.EmitZipkinBatch(args.Spans); err2 != nil { - return true, err2 - } - return true, nil -} - -type agentProcessorEmitBatch struct { - handler Agent -} - -func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err - } - - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.EmitBatch(args.Batch); err2 != nil { - return true, err2 - } - return true, nil -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Spans -type AgentEmitZipkinBatchArgs struct { - Spans []*zipkincore.Span `thrift:"spans,1" json:"spans"` -} - -func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs { - return &AgentEmitZipkinBatchArgs{} -} - -func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span { - return p.Spans -} -func (p *AgentEmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*zipkincore.Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem2 := &zipkincore.Span{} - if err := _elem2.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - p.Spans = append(p.Spans, _elem2) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("emitZipkinBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) - } - return err -} - -func (p *AgentEmitZipkinBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p) -} - -// Attributes: -// - Batch -type AgentEmitBatchArgs struct { - Batch *jaeger.Batch `thrift:"batch,1" json:"batch"` -} - -func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { - return &AgentEmitBatchArgs{} -} - -var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch - -func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch { - if !p.IsSetBatch() { - return AgentEmitBatchArgs_Batch_DEFAULT - } - return p.Batch -} -func (p *AgentEmitBatchArgs) IsSetBatch() bool { - return p.Batch != nil -} - -func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error { - p.Batch = &jaeger.Batch{} - if err := p.Batch.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("emitBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) - } - if err := p.Batch.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) - } - return err -} - -func (p *AgentEmitBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go deleted file mode 100644 index aa9857bb82ac..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go +++ /dev/null @@ -1,23 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package agent - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" - "github.com/uber/jaeger-client-go/thrift-gen/jaeger" - "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var _ = jaeger.GoUnusedProtection__ -var _ = zipkincore.GoUnusedProtection__ - -func init() { -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go deleted file mode 100644 index 9c28f11c1ac7..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go +++ /dev/null @@ -1,21 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package agent - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" - "github.com/uber/jaeger-client-go/thrift-gen/jaeger" - "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var _ = jaeger.GoUnusedProtection__ -var _ = zipkincore.GoUnusedProtection__ -var GoUnusedProtection__ int diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go deleted file mode 100644 index 1f79c1255cb1..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go +++ /dev/null @@ -1,435 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package baggage - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -type BaggageRestrictionManager interface { - // getBaggageRestrictions retrieves the baggage restrictions for a specific service. - // Usually, baggageRestrictions apply to all services however there may be situations - // where a baggageKey might only be allowed to be set by a specific service. - // - // Parameters: - // - ServiceName - GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error) -} - -type BaggageRestrictionManagerClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// getBaggageRestrictions retrieves the baggage restrictions for a specific service. -// Usually, baggageRestrictions apply to all services however there may be situations -// where a baggageKey might only be allowed to be set by a specific service. -// -// Parameters: -// - ServiceName -func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error) { - if err = p.sendGetBaggageRestrictions(serviceName); err != nil { - return - } - return p.recvGetBaggageRestrictions() -} - -func (p *BaggageRestrictionManagerClient) sendGetBaggageRestrictions(serviceName string) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.CALL, p.SeqId); err != nil { - return - } - args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{ - ServiceName: serviceName, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *BaggageRestrictionManagerClient) recvGetBaggageRestrictions() (value []*BaggageRestriction, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "getBaggageRestrictions" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getBaggageRestrictions failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getBaggageRestrictions failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error1 error - error1, err = error0.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error1 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getBaggageRestrictions failed: invalid message type") - return - } - result := BaggageRestrictionManagerGetBaggageRestrictionsResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - value = result.GetSuccess() - return -} - -type BaggageRestrictionManagerProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler BaggageRestrictionManager -} - -func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor { - - self2 := &BaggageRestrictionManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self2.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler: handler} - return self2 -} - -func (p *BaggageRestrictionManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x3.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x3 - -} - -type baggageRestrictionManagerProcessorGetBaggageRestrictions struct { - handler BaggageRestrictionManager -} - -func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := BaggageRestrictionManagerGetBaggageRestrictionsResult{} - var retval []*BaggageRestriction - var err2 error - if retval, err2 = p.handler.GetBaggageRestrictions(args.ServiceName); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: "+err2.Error()) - oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - ServiceName -type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct { - ServiceName string `thrift:"serviceName,1" json:"serviceName"` -} - -func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs { - return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{} -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string { - return p.ServiceName -} -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getBaggageRestrictions_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) - } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) - } - return err -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p) -} - -// Attributes: -// - Success -type BaggageRestrictionManagerGetBaggageRestrictionsResult struct { - Success []*BaggageRestriction `thrift:"success,0" json:"success,omitempty"` -} - -func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult { - return &BaggageRestrictionManagerGetBaggageRestrictionsResult{} -} - -var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction { - return p.Success -} -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) readField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BaggageRestriction, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem4 := &BaggageRestriction{} - if err := _elem4.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Success = append(p.Success, _elem4) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getBaggageRestrictions_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go deleted file mode 100644 index ed35ce9ab514..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go +++ /dev/null @@ -1,18 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package baggage - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -func init() { -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go deleted file mode 100644 index 7888892f633f..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go +++ /dev/null @@ -1,154 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package baggage - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var GoUnusedProtection__ int - -// Attributes: -// - BaggageKey -// - MaxValueLength -type BaggageRestriction struct { - BaggageKey string `thrift:"baggageKey,1,required" json:"baggageKey"` - MaxValueLength int32 `thrift:"maxValueLength,2,required" json:"maxValueLength"` -} - -func NewBaggageRestriction() *BaggageRestriction { - return &BaggageRestriction{} -} - -func (p *BaggageRestriction) GetBaggageKey() string { - return p.BaggageKey -} - -func (p *BaggageRestriction) GetMaxValueLength() int32 { - return p.MaxValueLength -} -func (p *BaggageRestriction) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetBaggageKey bool = false - var issetMaxValueLength bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetBaggageKey = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetMaxValueLength = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetBaggageKey { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set")) - } - if !issetMaxValueLength { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set")) - } - return nil -} - -func (p *BaggageRestriction) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.BaggageKey = v - } - return nil -} - -func (p *BaggageRestriction) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.MaxValueLength = v - } - return nil -} - -func (p *BaggageRestriction) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("BaggageRestriction"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BaggageRestriction) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("baggageKey", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err) - } - if err := oprot.WriteString(string(p.BaggageKey)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err) - } - return err -} - -func (p *BaggageRestriction) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("maxValueLength", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err) - } - if err := oprot.WriteI32(int32(p.MaxValueLength)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err) - } - return err -} - -func (p *BaggageRestriction) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestriction(%+v)", *p) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go deleted file mode 100644 index b32c37dd2615..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go +++ /dev/null @@ -1,242 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package jaeger - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -type Agent interface { - // Parameters: - // - Batch - EmitBatch(batch *Batch) (err error) -} - -type AgentClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { - return &AgentClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { - return &AgentClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// Parameters: -// - Batch -func (p *AgentClient) EmitBatch(batch *Batch) (err error) { - if err = p.sendEmitBatch(batch); err != nil { - return - } - return -} - -func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil { - return - } - args := AgentEmitBatchArgs{ - Batch: batch, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -type AgentProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Agent -} - -func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewAgentProcessor(handler Agent) *AgentProcessor { - - self6 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self6.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} - return self6 -} - -func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x7 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x7.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x7 - -} - -type agentProcessorEmitBatch struct { - handler Agent -} - -func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err - } - - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.EmitBatch(args.Batch); err2 != nil { - return true, err2 - } - return true, nil -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Batch -type AgentEmitBatchArgs struct { - Batch *Batch `thrift:"batch,1" json:"batch"` -} - -func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { - return &AgentEmitBatchArgs{} -} - -var AgentEmitBatchArgs_Batch_DEFAULT *Batch - -func (p *AgentEmitBatchArgs) GetBatch() *Batch { - if !p.IsSetBatch() { - return AgentEmitBatchArgs_Batch_DEFAULT - } - return p.Batch -} -func (p *AgentEmitBatchArgs) IsSetBatch() bool { - return p.Batch != nil -} - -func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error { - p.Batch = &Batch{} - if err := p.Batch.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("emitBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) - } - if err := p.Batch.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) - } - return err -} - -func (p *AgentEmitBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go deleted file mode 100644 index 621b8b1c20f4..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go +++ /dev/null @@ -1,18 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package jaeger - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -func init() { -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go deleted file mode 100644 index d23ed2fc2839..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go +++ /dev/null @@ -1,1838 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package jaeger - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var GoUnusedProtection__ int - -type TagType int64 - -const ( - TagType_STRING TagType = 0 - TagType_DOUBLE TagType = 1 - TagType_BOOL TagType = 2 - TagType_LONG TagType = 3 - TagType_BINARY TagType = 4 -) - -func (p TagType) String() string { - switch p { - case TagType_STRING: - return "STRING" - case TagType_DOUBLE: - return "DOUBLE" - case TagType_BOOL: - return "BOOL" - case TagType_LONG: - return "LONG" - case TagType_BINARY: - return "BINARY" - } - return "" -} - -func TagTypeFromString(s string) (TagType, error) { - switch s { - case "STRING": - return TagType_STRING, nil - case "DOUBLE": - return TagType_DOUBLE, nil - case "BOOL": - return TagType_BOOL, nil - case "LONG": - return TagType_LONG, nil - case "BINARY": - return TagType_BINARY, nil - } - return TagType(0), fmt.Errorf("not a valid TagType string") -} - -func TagTypePtr(v TagType) *TagType { return &v } - -func (p TagType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *TagType) UnmarshalText(text []byte) error { - q, err := TagTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -type SpanRefType int64 - -const ( - SpanRefType_CHILD_OF SpanRefType = 0 - SpanRefType_FOLLOWS_FROM SpanRefType = 1 -) - -func (p SpanRefType) String() string { - switch p { - case SpanRefType_CHILD_OF: - return "CHILD_OF" - case SpanRefType_FOLLOWS_FROM: - return "FOLLOWS_FROM" - } - return "" -} - -func SpanRefTypeFromString(s string) (SpanRefType, error) { - switch s { - case "CHILD_OF": - return SpanRefType_CHILD_OF, nil - case "FOLLOWS_FROM": - return SpanRefType_FOLLOWS_FROM, nil - } - return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string") -} - -func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v } - -func (p SpanRefType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *SpanRefType) UnmarshalText(text []byte) error { - q, err := SpanRefTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -// Attributes: -// - Key -// - VType -// - VStr -// - VDouble -// - VBool -// - VLong -// - VBinary -type Tag struct { - Key string `thrift:"key,1,required" json:"key"` - VType TagType `thrift:"vType,2,required" json:"vType"` - VStr *string `thrift:"vStr,3" json:"vStr,omitempty"` - VDouble *float64 `thrift:"vDouble,4" json:"vDouble,omitempty"` - VBool *bool `thrift:"vBool,5" json:"vBool,omitempty"` - VLong *int64 `thrift:"vLong,6" json:"vLong,omitempty"` - VBinary []byte `thrift:"vBinary,7" json:"vBinary,omitempty"` -} - -func NewTag() *Tag { - return &Tag{} -} - -func (p *Tag) GetKey() string { - return p.Key -} - -func (p *Tag) GetVType() TagType { - return p.VType -} - -var Tag_VStr_DEFAULT string - -func (p *Tag) GetVStr() string { - if !p.IsSetVStr() { - return Tag_VStr_DEFAULT - } - return *p.VStr -} - -var Tag_VDouble_DEFAULT float64 - -func (p *Tag) GetVDouble() float64 { - if !p.IsSetVDouble() { - return Tag_VDouble_DEFAULT - } - return *p.VDouble -} - -var Tag_VBool_DEFAULT bool - -func (p *Tag) GetVBool() bool { - if !p.IsSetVBool() { - return Tag_VBool_DEFAULT - } - return *p.VBool -} - -var Tag_VLong_DEFAULT int64 - -func (p *Tag) GetVLong() int64 { - if !p.IsSetVLong() { - return Tag_VLong_DEFAULT - } - return *p.VLong -} - -var Tag_VBinary_DEFAULT []byte - -func (p *Tag) GetVBinary() []byte { - return p.VBinary -} -func (p *Tag) IsSetVStr() bool { - return p.VStr != nil -} - -func (p *Tag) IsSetVDouble() bool { - return p.VDouble != nil -} - -func (p *Tag) IsSetVBool() bool { - return p.VBool != nil -} - -func (p *Tag) IsSetVLong() bool { - return p.VLong != nil -} - -func (p *Tag) IsSetVBinary() bool { - return p.VBinary != nil -} - -func (p *Tag) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetKey bool = false - var issetVType bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetKey = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetVType = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - case 7: - if err := p.readField7(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetKey { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set")) - } - if !issetVType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set")) - } - return nil -} - -func (p *Tag) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Key = v - } - return nil -} - -func (p *Tag) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - temp := TagType(v) - p.VType = temp - } - return nil -} - -func (p *Tag) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.VStr = &v - } - return nil -} - -func (p *Tag) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.VDouble = &v - } - return nil -} - -func (p *Tag) readField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.VBool = &v - } - return nil -} - -func (p *Tag) readField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.VLong = &v - } - return nil -} - -func (p *Tag) readField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.VBinary = v - } - return nil -} - -func (p *Tag) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Tag"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Tag) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := oprot.WriteString(string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *Tag) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("vType", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) - } - if err := oprot.WriteI32(int32(p.VType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) - } - return err -} - -func (p *Tag) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetVStr() { - if err := oprot.WriteFieldBegin("vStr", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) - } - if err := oprot.WriteString(string(*p.VStr)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) - } - } - return err -} - -func (p *Tag) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetVDouble() { - if err := oprot.WriteFieldBegin("vDouble", thrift.DOUBLE, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) - } - if err := oprot.WriteDouble(float64(*p.VDouble)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) - } - } - return err -} - -func (p *Tag) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetVBool() { - if err := oprot.WriteFieldBegin("vBool", thrift.BOOL, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) - } - if err := oprot.WriteBool(bool(*p.VBool)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) - } - } - return err -} - -func (p *Tag) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetVLong() { - if err := oprot.WriteFieldBegin("vLong", thrift.I64, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) - } - if err := oprot.WriteI64(int64(*p.VLong)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) - } - } - return err -} - -func (p *Tag) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetVBinary() { - if err := oprot.WriteFieldBegin("vBinary", thrift.STRING, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) - } - if err := oprot.WriteBinary(p.VBinary); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) - } - } - return err -} - -func (p *Tag) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Tag(%+v)", *p) -} - -// Attributes: -// - Timestamp -// - Fields -type Log struct { - Timestamp int64 `thrift:"timestamp,1,required" json:"timestamp"` - Fields []*Tag `thrift:"fields,2,required" json:"fields"` -} - -func NewLog() *Log { - return &Log{} -} - -func (p *Log) GetTimestamp() int64 { - return p.Timestamp -} - -func (p *Log) GetFields() []*Tag { - return p.Fields -} -func (p *Log) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTimestamp bool = false - var issetFields bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTimestamp = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetFields = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTimestamp { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set")) - } - if !issetFields { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set")) - } - return nil -} - -func (p *Log) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Timestamp = v - } - return nil -} - -func (p *Log) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Fields = tSlice - for i := 0; i < size; i++ { - _elem0 := &Tag{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Fields = append(p.Fields, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Log) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Log"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Log) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) - } - return err -} - -func (p *Log) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("fields", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Fields)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Fields { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) - } - return err -} - -func (p *Log) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Log(%+v)", *p) -} - -// Attributes: -// - RefType -// - TraceIdLow -// - TraceIdHigh -// - SpanId -type SpanRef struct { - RefType SpanRefType `thrift:"refType,1,required" json:"refType"` - TraceIdLow int64 `thrift:"traceIdLow,2,required" json:"traceIdLow"` - TraceIdHigh int64 `thrift:"traceIdHigh,3,required" json:"traceIdHigh"` - SpanId int64 `thrift:"spanId,4,required" json:"spanId"` -} - -func NewSpanRef() *SpanRef { - return &SpanRef{} -} - -func (p *SpanRef) GetRefType() SpanRefType { - return p.RefType -} - -func (p *SpanRef) GetTraceIdLow() int64 { - return p.TraceIdLow -} - -func (p *SpanRef) GetTraceIdHigh() int64 { - return p.TraceIdHigh -} - -func (p *SpanRef) GetSpanId() int64 { - return p.SpanId -} -func (p *SpanRef) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetRefType bool = false - var issetTraceIdLow bool = false - var issetTraceIdHigh bool = false - var issetSpanId bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetRefType = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTraceIdLow = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - issetTraceIdHigh = true - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - issetSpanId = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetRefType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set")) - } - if !issetTraceIdLow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) - } - if !issetTraceIdHigh { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) - } - if !issetSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) - } - return nil -} - -func (p *SpanRef) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := SpanRefType(v) - p.RefType = temp - } - return nil -} - -func (p *SpanRef) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TraceIdLow = v - } - return nil -} - -func (p *SpanRef) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.TraceIdHigh = v - } - return nil -} - -func (p *SpanRef) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.SpanId = v - } - return nil -} - -func (p *SpanRef) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("SpanRef"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SpanRef) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("refType", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) - } - if err := oprot.WriteI32(int32(p.RefType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) - } - return err -} - -func (p *SpanRef) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) - } - return err -} - -func (p *SpanRef) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) - } - return err -} - -func (p *SpanRef) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spanId", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) - } - if err := oprot.WriteI64(int64(p.SpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) - } - return err -} - -func (p *SpanRef) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SpanRef(%+v)", *p) -} - -// Attributes: -// - TraceIdLow -// - TraceIdHigh -// - SpanId -// - ParentSpanId -// - OperationName -// - References -// - Flags -// - StartTime -// - Duration -// - Tags -// - Logs -type Span struct { - TraceIdLow int64 `thrift:"traceIdLow,1,required" json:"traceIdLow"` - TraceIdHigh int64 `thrift:"traceIdHigh,2,required" json:"traceIdHigh"` - SpanId int64 `thrift:"spanId,3,required" json:"spanId"` - ParentSpanId int64 `thrift:"parentSpanId,4,required" json:"parentSpanId"` - OperationName string `thrift:"operationName,5,required" json:"operationName"` - References []*SpanRef `thrift:"references,6" json:"references,omitempty"` - Flags int32 `thrift:"flags,7,required" json:"flags"` - StartTime int64 `thrift:"startTime,8,required" json:"startTime"` - Duration int64 `thrift:"duration,9,required" json:"duration"` - Tags []*Tag `thrift:"tags,10" json:"tags,omitempty"` - Logs []*Log `thrift:"logs,11" json:"logs,omitempty"` -} - -func NewSpan() *Span { - return &Span{} -} - -func (p *Span) GetTraceIdLow() int64 { - return p.TraceIdLow -} - -func (p *Span) GetTraceIdHigh() int64 { - return p.TraceIdHigh -} - -func (p *Span) GetSpanId() int64 { - return p.SpanId -} - -func (p *Span) GetParentSpanId() int64 { - return p.ParentSpanId -} - -func (p *Span) GetOperationName() string { - return p.OperationName -} - -var Span_References_DEFAULT []*SpanRef - -func (p *Span) GetReferences() []*SpanRef { - return p.References -} - -func (p *Span) GetFlags() int32 { - return p.Flags -} - -func (p *Span) GetStartTime() int64 { - return p.StartTime -} - -func (p *Span) GetDuration() int64 { - return p.Duration -} - -var Span_Tags_DEFAULT []*Tag - -func (p *Span) GetTags() []*Tag { - return p.Tags -} - -var Span_Logs_DEFAULT []*Log - -func (p *Span) GetLogs() []*Log { - return p.Logs -} -func (p *Span) IsSetReferences() bool { - return p.References != nil -} - -func (p *Span) IsSetTags() bool { - return p.Tags != nil -} - -func (p *Span) IsSetLogs() bool { - return p.Logs != nil -} - -func (p *Span) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTraceIdLow bool = false - var issetTraceIdHigh bool = false - var issetSpanId bool = false - var issetParentSpanId bool = false - var issetOperationName bool = false - var issetFlags bool = false - var issetStartTime bool = false - var issetDuration bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTraceIdLow = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTraceIdHigh = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - issetSpanId = true - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - issetParentSpanId = true - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - issetOperationName = true - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - case 7: - if err := p.readField7(iprot); err != nil { - return err - } - issetFlags = true - case 8: - if err := p.readField8(iprot); err != nil { - return err - } - issetStartTime = true - case 9: - if err := p.readField9(iprot); err != nil { - return err - } - issetDuration = true - case 10: - if err := p.readField10(iprot); err != nil { - return err - } - case 11: - if err := p.readField11(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTraceIdLow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) - } - if !issetTraceIdHigh { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) - } - if !issetSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) - } - if !issetParentSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set")) - } - if !issetOperationName { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set")) - } - if !issetFlags { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set")) - } - if !issetStartTime { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set")) - } - if !issetDuration { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set")) - } - return nil -} - -func (p *Span) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.TraceIdLow = v - } - return nil -} - -func (p *Span) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TraceIdHigh = v - } - return nil -} - -func (p *Span) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.SpanId = v - } - return nil -} - -func (p *Span) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.ParentSpanId = v - } - return nil -} - -func (p *Span) readField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.OperationName = v - } - return nil -} - -func (p *Span) readField6(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*SpanRef, 0, size) - p.References = tSlice - for i := 0; i < size; i++ { - _elem1 := &SpanRef{} - if err := _elem1.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.References = append(p.References, _elem1) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) readField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.Flags = v - } - return nil -} - -func (p *Span) readField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 8: ", err) - } else { - p.StartTime = v - } - return nil -} - -func (p *Span) readField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.Duration = v - } - return nil -} - -func (p *Span) readField10(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Tags = tSlice - for i := 0; i < size; i++ { - _elem2 := &Tag{} - if err := _elem2.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - p.Tags = append(p.Tags, _elem2) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) readField11(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Log, 0, size) - p.Logs = tSlice - for i := 0; i < size; i++ { - _elem3 := &Log{} - if err := _elem3.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) - } - p.Logs = append(p.Logs, _elem3) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Span"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - if err := p.writeField9(oprot); err != nil { - return err - } - if err := p.writeField10(oprot); err != nil { - return err - } - if err := p.writeField11(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Span) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) - } - return err -} - -func (p *Span) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) - } - return err -} - -func (p *Span) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spanId", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) - } - if err := oprot.WriteI64(int64(p.SpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) - } - return err -} - -func (p *Span) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("parentSpanId", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) - } - if err := oprot.WriteI64(int64(p.ParentSpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) - } - return err -} - -func (p *Span) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("operationName", thrift.STRING, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) - } - if err := oprot.WriteString(string(p.OperationName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) - } - return err -} - -func (p *Span) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetReferences() { - if err := oprot.WriteFieldBegin("references", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.References)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.References { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) - } - } - return err -} - -func (p *Span) writeField7(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("flags", thrift.I32, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) - } - if err := oprot.WriteI32(int32(p.Flags)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) - } - return err -} - -func (p *Span) writeField8(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("startTime", thrift.I64, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) - } - if err := oprot.WriteI64(int64(p.StartTime)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) - } - return err -} - -func (p *Span) writeField9(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("duration", thrift.I64, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) - } - if err := oprot.WriteI64(int64(p.Duration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) - } - return err -} - -func (p *Span) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin("tags", thrift.LIST, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tags { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) - } - } - return err -} - -func (p *Span) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetLogs() { - if err := oprot.WriteFieldBegin("logs", thrift.LIST, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Logs)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Logs { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) - } - } - return err -} - -func (p *Span) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Span(%+v)", *p) -} - -// Attributes: -// - ServiceName -// - Tags -type Process struct { - ServiceName string `thrift:"serviceName,1,required" json:"serviceName"` - Tags []*Tag `thrift:"tags,2" json:"tags,omitempty"` -} - -func NewProcess() *Process { - return &Process{} -} - -func (p *Process) GetServiceName() string { - return p.ServiceName -} - -var Process_Tags_DEFAULT []*Tag - -func (p *Process) GetTags() []*Tag { - return p.Tags -} -func (p *Process) IsSetTags() bool { - return p.Tags != nil -} - -func (p *Process) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetServiceName bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetServiceName = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetServiceName { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set")) - } - return nil -} - -func (p *Process) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *Process) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Tags = tSlice - for i := 0; i < size; i++ { - _elem4 := &Tag{} - if err := _elem4.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Tags = append(p.Tags, _elem4) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Process) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Process"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Process) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) - } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) - } - return err -} - -func (p *Process) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin("tags", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tags { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) - } - } - return err -} - -func (p *Process) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Process(%+v)", *p) -} - -// Attributes: -// - Process -// - Spans -type Batch struct { - Process *Process `thrift:"process,1,required" json:"process"` - Spans []*Span `thrift:"spans,2,required" json:"spans"` -} - -func NewBatch() *Batch { - return &Batch{} -} - -var Batch_Process_DEFAULT *Process - -func (p *Batch) GetProcess() *Process { - if !p.IsSetProcess() { - return Batch_Process_DEFAULT - } - return p.Process -} - -func (p *Batch) GetSpans() []*Span { - return p.Spans -} -func (p *Batch) IsSetProcess() bool { - return p.Process != nil -} - -func (p *Batch) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetProcess bool = false - var issetSpans bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetProcess = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetSpans = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetProcess { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set")) - } - if !issetSpans { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set")) - } - return nil -} - -func (p *Batch) readField1(iprot thrift.TProtocol) error { - p.Process = &Process{} - if err := p.Process.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err) - } - return nil -} - -func (p *Batch) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem5 := &Span{} - if err := _elem5.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) - } - p.Spans = append(p.Spans, _elem5) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Batch) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Batch"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Batch) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("process", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) - } - if err := p.Process.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) - } - return err -} - -func (p *Batch) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spans", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) - } - return err -} - -func (p *Batch) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Batch(%+v)", *p) -} - -// Attributes: -// - Ok -type BatchSubmitResponse struct { - Ok bool `thrift:"ok,1,required" json:"ok"` -} - -func NewBatchSubmitResponse() *BatchSubmitResponse { - return &BatchSubmitResponse{} -} - -func (p *BatchSubmitResponse) GetOk() bool { - return p.Ok -} -func (p *BatchSubmitResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetOk = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) - } - return nil -} - -func (p *BatchSubmitResponse) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ok = v - } - return nil -} - -func (p *BatchSubmitResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("BatchSubmitResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BatchSubmitResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) - } - if err := oprot.WriteBool(bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) - } - return err -} - -func (p *BatchSubmitResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BatchSubmitResponse(%+v)", *p) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go deleted file mode 100644 index 0f6e3a884d95..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go +++ /dev/null @@ -1,18 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package sampling - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -func init() { -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go deleted file mode 100644 index 33179cfeb3b9..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go +++ /dev/null @@ -1,410 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package sampling - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -type SamplingManager interface { - // Parameters: - // - ServiceName - GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) -} - -type SamplingManagerClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient { - return &SamplingManagerClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient { - return &SamplingManagerClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// Parameters: -// - ServiceName -func (p *SamplingManagerClient) GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) { - if err = p.sendGetSamplingStrategy(serviceName); err != nil { - return - } - return p.recvGetSamplingStrategy() -} - -func (p *SamplingManagerClient) sendGetSamplingStrategy(serviceName string) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("getSamplingStrategy", thrift.CALL, p.SeqId); err != nil { - return - } - args := SamplingManagerGetSamplingStrategyArgs{ - ServiceName: serviceName, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *SamplingManagerClient) recvGetSamplingStrategy() (value *SamplingStrategyResponse, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "getSamplingStrategy" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getSamplingStrategy failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getSamplingStrategy failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error1 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error2 error - error2, err = error1.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error2 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getSamplingStrategy failed: invalid message type") - return - } - result := SamplingManagerGetSamplingStrategyResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - value = result.GetSuccess() - return -} - -type SamplingManagerProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler SamplingManager -} - -func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor { - - self3 := &SamplingManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self3.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler: handler} - return self3 -} - -func (p *SamplingManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x4.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x4 - -} - -type samplingManagerProcessorGetSamplingStrategy struct { - handler SamplingManager -} - -func (p *samplingManagerProcessorGetSamplingStrategy) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := SamplingManagerGetSamplingStrategyArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := SamplingManagerGetSamplingStrategyResult{} - var retval *SamplingStrategyResponse - var err2 error - if retval, err2 = p.handler.GetSamplingStrategy(args.ServiceName); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: "+err2.Error()) - oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getSamplingStrategy", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - ServiceName -type SamplingManagerGetSamplingStrategyArgs struct { - ServiceName string `thrift:"serviceName,1" json:"serviceName"` -} - -func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs { - return &SamplingManagerGetSamplingStrategyArgs{} -} - -func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string { - return p.ServiceName -} -func (p *SamplingManagerGetSamplingStrategyArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getSamplingStrategy_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) - } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) - } - return err -} - -func (p *SamplingManagerGetSamplingStrategyArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p) -} - -// Attributes: -// - Success -type SamplingManagerGetSamplingStrategyResult struct { - Success *SamplingStrategyResponse `thrift:"success,0" json:"success,omitempty"` -} - -func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult { - return &SamplingManagerGetSamplingStrategyResult{} -} - -var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse - -func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse { - if !p.IsSetSuccess() { - return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT - } - return p.Success -} -func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) readField0(iprot thrift.TProtocol) error { - p.Success = &SamplingStrategyResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getSamplingStrategy_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *SamplingManagerGetSamplingStrategyResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go deleted file mode 100644 index 9abaf0542d40..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go +++ /dev/null @@ -1,873 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package sampling - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var GoUnusedProtection__ int - -type SamplingStrategyType int64 - -const ( - SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0 - SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1 -) - -func (p SamplingStrategyType) String() string { - switch p { - case SamplingStrategyType_PROBABILISTIC: - return "PROBABILISTIC" - case SamplingStrategyType_RATE_LIMITING: - return "RATE_LIMITING" - } - return "" -} - -func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) { - switch s { - case "PROBABILISTIC": - return SamplingStrategyType_PROBABILISTIC, nil - case "RATE_LIMITING": - return SamplingStrategyType_RATE_LIMITING, nil - } - return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string") -} - -func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v } - -func (p SamplingStrategyType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *SamplingStrategyType) UnmarshalText(text []byte) error { - q, err := SamplingStrategyTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -// Attributes: -// - SamplingRate -type ProbabilisticSamplingStrategy struct { - SamplingRate float64 `thrift:"samplingRate,1,required" json:"samplingRate"` -} - -func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy { - return &ProbabilisticSamplingStrategy{} -} - -func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 { - return p.SamplingRate -} -func (p *ProbabilisticSamplingStrategy) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetSamplingRate bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetSamplingRate = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetSamplingRate { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set")) - } - return nil -} - -func (p *ProbabilisticSamplingStrategy) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.SamplingRate = v - } - return nil -} - -func (p *ProbabilisticSamplingStrategy) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("ProbabilisticSamplingStrategy"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ProbabilisticSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("samplingRate", thrift.DOUBLE, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err) - } - if err := oprot.WriteDouble(float64(p.SamplingRate)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err) - } - return err -} - -func (p *ProbabilisticSamplingStrategy) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p) -} - -// Attributes: -// - MaxTracesPerSecond -type RateLimitingSamplingStrategy struct { - MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" json:"maxTracesPerSecond"` -} - -func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy { - return &RateLimitingSamplingStrategy{} -} - -func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 { - return p.MaxTracesPerSecond -} -func (p *RateLimitingSamplingStrategy) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetMaxTracesPerSecond bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetMaxTracesPerSecond = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetMaxTracesPerSecond { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set")) - } - return nil -} - -func (p *RateLimitingSamplingStrategy) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI16(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.MaxTracesPerSecond = v - } - return nil -} - -func (p *RateLimitingSamplingStrategy) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("RateLimitingSamplingStrategy"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RateLimitingSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("maxTracesPerSecond", thrift.I16, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err) - } - if err := oprot.WriteI16(int16(p.MaxTracesPerSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err) - } - return err -} - -func (p *RateLimitingSamplingStrategy) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p) -} - -// Attributes: -// - Operation -// - ProbabilisticSampling -type OperationSamplingStrategy struct { - Operation string `thrift:"operation,1,required" json:"operation"` - ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" json:"probabilisticSampling"` -} - -func NewOperationSamplingStrategy() *OperationSamplingStrategy { - return &OperationSamplingStrategy{} -} - -func (p *OperationSamplingStrategy) GetOperation() string { - return p.Operation -} - -var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy - -func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy { - if !p.IsSetProbabilisticSampling() { - return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT - } - return p.ProbabilisticSampling -} -func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool { - return p.ProbabilisticSampling != nil -} - -func (p *OperationSamplingStrategy) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOperation bool = false - var issetProbabilisticSampling bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetOperation = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetProbabilisticSampling = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOperation { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set")) - } - if !issetProbabilisticSampling { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set")) - } - return nil -} - -func (p *OperationSamplingStrategy) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Operation = v - } - return nil -} - -func (p *OperationSamplingStrategy) readField2(iprot thrift.TProtocol) error { - p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{} - if err := p.ProbabilisticSampling.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err) - } - return nil -} - -func (p *OperationSamplingStrategy) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("OperationSamplingStrategy"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *OperationSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("operation", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err) - } - if err := oprot.WriteString(string(p.Operation)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err) - } - return err -} - -func (p *OperationSamplingStrategy) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) - } - if err := p.ProbabilisticSampling.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) - } - return err -} - -func (p *OperationSamplingStrategy) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p) -} - -// Attributes: -// - DefaultSamplingProbability -// - DefaultLowerBoundTracesPerSecond -// - PerOperationStrategies -// - DefaultUpperBoundTracesPerSecond -type PerOperationSamplingStrategies struct { - DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" json:"defaultSamplingProbability"` - DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" json:"defaultLowerBoundTracesPerSecond"` - PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" json:"perOperationStrategies"` - DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" json:"defaultUpperBoundTracesPerSecond,omitempty"` -} - -func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies { - return &PerOperationSamplingStrategies{} -} - -func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 { - return p.DefaultSamplingProbability -} - -func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 { - return p.DefaultLowerBoundTracesPerSecond -} - -func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy { - return p.PerOperationStrategies -} - -var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64 - -func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 { - if !p.IsSetDefaultUpperBoundTracesPerSecond() { - return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT - } - return *p.DefaultUpperBoundTracesPerSecond -} -func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool { - return p.DefaultUpperBoundTracesPerSecond != nil -} - -func (p *PerOperationSamplingStrategies) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetDefaultSamplingProbability bool = false - var issetDefaultLowerBoundTracesPerSecond bool = false - var issetPerOperationStrategies bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetDefaultSamplingProbability = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetDefaultLowerBoundTracesPerSecond = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - issetPerOperationStrategies = true - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetDefaultSamplingProbability { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set")) - } - if !issetDefaultLowerBoundTracesPerSecond { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set")) - } - if !issetPerOperationStrategies { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set")) - } - return nil -} - -func (p *PerOperationSamplingStrategies) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.DefaultSamplingProbability = v - } - return nil -} - -func (p *PerOperationSamplingStrategies) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.DefaultLowerBoundTracesPerSecond = v - } - return nil -} - -func (p *PerOperationSamplingStrategies) readField3(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*OperationSamplingStrategy, 0, size) - p.PerOperationStrategies = tSlice - for i := 0; i < size; i++ { - _elem0 := &OperationSamplingStrategy{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *PerOperationSamplingStrategies) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.DefaultUpperBoundTracesPerSecond = &v - } - return nil -} - -func (p *PerOperationSamplingStrategies) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("PerOperationSamplingStrategies"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *PerOperationSamplingStrategies) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("defaultSamplingProbability", thrift.DOUBLE, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err) - } - if err := oprot.WriteDouble(float64(p.DefaultSamplingProbability)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err) - } - return err -} - -func (p *PerOperationSamplingStrategies) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err) - } - if err := oprot.WriteDouble(float64(p.DefaultLowerBoundTracesPerSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err) - } - return err -} - -func (p *PerOperationSamplingStrategies) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("perOperationStrategies", thrift.LIST, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PerOperationStrategies)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.PerOperationStrategies { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err) - } - return err -} - -func (p *PerOperationSamplingStrategies) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDefaultUpperBoundTracesPerSecond() { - if err := oprot.WriteFieldBegin("defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err) - } - if err := oprot.WriteDouble(float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err) - } - } - return err -} - -func (p *PerOperationSamplingStrategies) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p) -} - -// Attributes: -// - StrategyType -// - ProbabilisticSampling -// - RateLimitingSampling -// - OperationSampling -type SamplingStrategyResponse struct { - StrategyType SamplingStrategyType `thrift:"strategyType,1,required" json:"strategyType"` - ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" json:"probabilisticSampling,omitempty"` - RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" json:"rateLimitingSampling,omitempty"` - OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" json:"operationSampling,omitempty"` -} - -func NewSamplingStrategyResponse() *SamplingStrategyResponse { - return &SamplingStrategyResponse{} -} - -func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType { - return p.StrategyType -} - -var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy - -func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy { - if !p.IsSetProbabilisticSampling() { - return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT - } - return p.ProbabilisticSampling -} - -var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy - -func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy { - if !p.IsSetRateLimitingSampling() { - return SamplingStrategyResponse_RateLimitingSampling_DEFAULT - } - return p.RateLimitingSampling -} - -var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies - -func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies { - if !p.IsSetOperationSampling() { - return SamplingStrategyResponse_OperationSampling_DEFAULT - } - return p.OperationSampling -} -func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool { - return p.ProbabilisticSampling != nil -} - -func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool { - return p.RateLimitingSampling != nil -} - -func (p *SamplingStrategyResponse) IsSetOperationSampling() bool { - return p.OperationSampling != nil -} - -func (p *SamplingStrategyResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetStrategyType bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetStrategyType = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetStrategyType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set")) - } - return nil -} - -func (p *SamplingStrategyResponse) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := SamplingStrategyType(v) - p.StrategyType = temp - } - return nil -} - -func (p *SamplingStrategyResponse) readField2(iprot thrift.TProtocol) error { - p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{} - if err := p.ProbabilisticSampling.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err) - } - return nil -} - -func (p *SamplingStrategyResponse) readField3(iprot thrift.TProtocol) error { - p.RateLimitingSampling = &RateLimitingSamplingStrategy{} - if err := p.RateLimitingSampling.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err) - } - return nil -} - -func (p *SamplingStrategyResponse) readField4(iprot thrift.TProtocol) error { - p.OperationSampling = &PerOperationSamplingStrategies{} - if err := p.OperationSampling.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err) - } - return nil -} - -func (p *SamplingStrategyResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("SamplingStrategyResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SamplingStrategyResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("strategyType", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err) - } - if err := oprot.WriteI32(int32(p.StrategyType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err) - } - return err -} - -func (p *SamplingStrategyResponse) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetProbabilisticSampling() { - if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) - } - if err := p.ProbabilisticSampling.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) - } - } - return err -} - -func (p *SamplingStrategyResponse) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetRateLimitingSampling() { - if err := oprot.WriteFieldBegin("rateLimitingSampling", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err) - } - if err := p.RateLimitingSampling.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err) - } - } - return err -} - -func (p *SamplingStrategyResponse) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetOperationSampling() { - if err := oprot.WriteFieldBegin("operationSampling", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err) - } - if err := p.OperationSampling.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err) - } - } - return err -} - -func (p *SamplingStrategyResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go deleted file mode 100644 index a53d46f0efe9..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go +++ /dev/null @@ -1,35 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package zipkincore - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -const CLIENT_SEND = "cs" -const CLIENT_RECV = "cr" -const SERVER_SEND = "ss" -const SERVER_RECV = "sr" -const MESSAGE_SEND = "ms" -const MESSAGE_RECV = "mr" -const WIRE_SEND = "ws" -const WIRE_RECV = "wr" -const CLIENT_SEND_FRAGMENT = "csf" -const CLIENT_RECV_FRAGMENT = "crf" -const SERVER_SEND_FRAGMENT = "ssf" -const SERVER_RECV_FRAGMENT = "srf" -const LOCAL_COMPONENT = "lc" -const CLIENT_ADDR = "ca" -const SERVER_ADDR = "sa" -const MESSAGE_ADDR = "ma" - -func init() { -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go deleted file mode 100644 index 2d49e1d5f216..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go +++ /dev/null @@ -1,1337 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package zipkincore - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var GoUnusedProtection__ int - -type AnnotationType int64 - -const ( - AnnotationType_BOOL AnnotationType = 0 - AnnotationType_BYTES AnnotationType = 1 - AnnotationType_I16 AnnotationType = 2 - AnnotationType_I32 AnnotationType = 3 - AnnotationType_I64 AnnotationType = 4 - AnnotationType_DOUBLE AnnotationType = 5 - AnnotationType_STRING AnnotationType = 6 -) - -func (p AnnotationType) String() string { - switch p { - case AnnotationType_BOOL: - return "BOOL" - case AnnotationType_BYTES: - return "BYTES" - case AnnotationType_I16: - return "I16" - case AnnotationType_I32: - return "I32" - case AnnotationType_I64: - return "I64" - case AnnotationType_DOUBLE: - return "DOUBLE" - case AnnotationType_STRING: - return "STRING" - } - return "" -} - -func AnnotationTypeFromString(s string) (AnnotationType, error) { - switch s { - case "BOOL": - return AnnotationType_BOOL, nil - case "BYTES": - return AnnotationType_BYTES, nil - case "I16": - return AnnotationType_I16, nil - case "I32": - return AnnotationType_I32, nil - case "I64": - return AnnotationType_I64, nil - case "DOUBLE": - return AnnotationType_DOUBLE, nil - case "STRING": - return AnnotationType_STRING, nil - } - return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") -} - -func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } - -func (p AnnotationType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *AnnotationType) UnmarshalText(text []byte) error { - q, err := AnnotationTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -// Indicates the network context of a service recording an annotation with two -// exceptions. -// -// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, -// the endpoint indicates the source or destination of an RPC. This exception -// allows zipkin to display network context of uninstrumented services, or -// clients such as web browsers. -// -// Attributes: -// - Ipv4: IPv4 host address packed into 4 bytes. -// -// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 -// - Port: IPv4 port -// -// Note: this is to be treated as an unsigned integer, so watch for negatives. -// -// Conventionally, when the port isn't known, port = 0. -// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web" -// -// Conventionally, when the service name isn't known, service_name = "unknown". -// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() -type Endpoint struct { - Ipv4 int32 `thrift:"ipv4,1" json:"ipv4"` - Port int16 `thrift:"port,2" json:"port"` - ServiceName string `thrift:"service_name,3" json:"service_name"` - Ipv6 []byte `thrift:"ipv6,4" json:"ipv6,omitempty"` -} - -func NewEndpoint() *Endpoint { - return &Endpoint{} -} - -func (p *Endpoint) GetIpv4() int32 { - return p.Ipv4 -} - -func (p *Endpoint) GetPort() int16 { - return p.Port -} - -func (p *Endpoint) GetServiceName() string { - return p.ServiceName -} - -var Endpoint_Ipv6_DEFAULT []byte - -func (p *Endpoint) GetIpv6() []byte { - return p.Ipv6 -} -func (p *Endpoint) IsSetIpv6() bool { - return p.Ipv6 != nil -} - -func (p *Endpoint) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Endpoint) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ipv4 = v - } - return nil -} - -func (p *Endpoint) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI16(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Port = v - } - return nil -} - -func (p *Endpoint) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *Endpoint) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.Ipv6 = v - } - return nil -} - -func (p *Endpoint) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Endpoint"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ipv4", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) - } - if err := oprot.WriteI32(int32(p.Ipv4)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) - } - return err -} - -func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("port", thrift.I16, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) - } - if err := oprot.WriteI16(int16(p.Port)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) - } - return err -} - -func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("service_name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) - } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) - } - return err -} - -func (p *Endpoint) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetIpv6() { - if err := oprot.WriteFieldBegin("ipv6", thrift.STRING, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) - } - if err := oprot.WriteBinary(p.Ipv6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) - } - } - return err -} - -func (p *Endpoint) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Endpoint(%+v)", *p) -} - -// An annotation is similar to a log statement. It includes a host field which -// allows these events to be attributed properly, and also aggregatable. -// -// Attributes: -// - Timestamp: Microseconds from epoch. -// -// This value should use the most precise value possible. For example, -// gettimeofday or syncing nanoTime against a tick of currentTimeMillis. -// - Value -// - Host: Always the host that recorded the event. By specifying the host you allow -// rollup of all events (such as client requests to a service) by IP address. -type Annotation struct { - Timestamp int64 `thrift:"timestamp,1" json:"timestamp"` - Value string `thrift:"value,2" json:"value"` - Host *Endpoint `thrift:"host,3" json:"host,omitempty"` -} - -func NewAnnotation() *Annotation { - return &Annotation{} -} - -func (p *Annotation) GetTimestamp() int64 { - return p.Timestamp -} - -func (p *Annotation) GetValue() string { - return p.Value -} - -var Annotation_Host_DEFAULT *Endpoint - -func (p *Annotation) GetHost() *Endpoint { - if !p.IsSetHost() { - return Annotation_Host_DEFAULT - } - return p.Host -} -func (p *Annotation) IsSetHost() bool { - return p.Host != nil -} - -func (p *Annotation) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Annotation) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Timestamp = v - } - return nil -} - -func (p *Annotation) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *Annotation) readField3(iprot thrift.TProtocol) error { - p.Host = &Endpoint{} - if err := p.Host.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) - } - return nil -} - -func (p *Annotation) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Annotation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) - } - return err -} - -func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := oprot.WriteString(string(p.Value)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetHost() { - if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) - } - if err := p.Host.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) - } - } - return err -} - -func (p *Annotation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Annotation(%+v)", *p) -} - -// Binary annotations are tags applied to a Span to give it context. For -// example, a binary annotation of "http.uri" could the path to a resource in a -// RPC call. -// -// Binary annotations of type STRING are always queryable, though more a -// historical implementation detail than a structural concern. -// -// Binary annotations can repeat, and vary on the host. Similar to Annotation, -// the host indicates who logged the event. This allows you to tell the -// difference between the client and server side of the same key. For example, -// the key "http.uri" might be different on the client and server side due to -// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, -// you can see the different points of view, which often help in debugging. -// -// Attributes: -// - Key -// - Value -// - AnnotationType -// - Host: The host that recorded tag, which allows you to differentiate between -// multiple tags with the same key. There are two exceptions to this. -// -// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or -// destination of an RPC. This exception allows zipkin to display network -// context of uninstrumented services, or clients such as web browsers. -type BinaryAnnotation struct { - Key string `thrift:"key,1" json:"key"` - Value []byte `thrift:"value,2" json:"value"` - AnnotationType AnnotationType `thrift:"annotation_type,3" json:"annotation_type"` - Host *Endpoint `thrift:"host,4" json:"host,omitempty"` -} - -func NewBinaryAnnotation() *BinaryAnnotation { - return &BinaryAnnotation{} -} - -func (p *BinaryAnnotation) GetKey() string { - return p.Key -} - -func (p *BinaryAnnotation) GetValue() []byte { - return p.Value -} - -func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { - return p.AnnotationType -} - -var BinaryAnnotation_Host_DEFAULT *Endpoint - -func (p *BinaryAnnotation) GetHost() *Endpoint { - if !p.IsSetHost() { - return BinaryAnnotation_Host_DEFAULT - } - return p.Host -} -func (p *BinaryAnnotation) IsSetHost() bool { - return p.Host != nil -} - -func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BinaryAnnotation) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Key = v - } - return nil -} - -func (p *BinaryAnnotation) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *BinaryAnnotation) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - temp := AnnotationType(v) - p.AnnotationType = temp - } - return nil -} - -func (p *BinaryAnnotation) readField4(iprot thrift.TProtocol) error { - p.Host = &Endpoint{} - if err := p.Host.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) - } - return nil -} - -func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("BinaryAnnotation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := oprot.WriteString(string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := oprot.WriteBinary(p.Value); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("annotation_type", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) - } - if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetHost() { - if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) - } - if err := p.Host.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) - } - } - return err -} - -func (p *BinaryAnnotation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BinaryAnnotation(%+v)", *p) -} - -// A trace is a series of spans (often RPC calls) which form a latency tree. -// -// The root span is where trace_id = id and parent_id = Nil. The root span is -// usually the longest interval in the trace, starting with a SERVER_RECV -// annotation and ending with a SERVER_SEND. -// -// Attributes: -// - TraceID -// - Name: Span name in lowercase, rpc method for example -// -// Conventionally, when the span name isn't known, name = "unknown". -// - ID -// - ParentID -// - Annotations -// - BinaryAnnotations -// - Debug -// - Timestamp: Microseconds from epoch of the creation of this span. -// -// This value should be set directly by instrumentation, using the most -// precise value possible. For example, gettimeofday or syncing nanoTime -// against a tick of currentTimeMillis. -// -// For compatibilty with instrumentation that precede this field, collectors -// or span stores can derive this via Annotation.timestamp. -// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. -// -// This field is optional for compatibility with old data: first-party span -// stores are expected to support this at time of introduction. -// - Duration: Measurement of duration in microseconds, used to support queries. -// -// This value should be set directly, where possible. Doing so encourages -// precise measurement decoupled from problems of clocks, such as skew or NTP -// updates causing time to move backwards. -// -// For compatibilty with instrumentation that precede this field, collectors -// or span stores can derive this by subtracting Annotation.timestamp. -// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. -// -// If this field is persisted as unset, zipkin will continue to work, except -// duration query support will be implementation-specific. Similarly, setting -// this field non-atomically is implementation-specific. -// -// This field is i64 vs i32 to support spans longer than 35 minutes. -// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this -// means the trace uses 128 bit traceIds instead of 64 bit. -type Span struct { - TraceID int64 `thrift:"trace_id,1" json:"trace_id"` - // unused field # 2 - Name string `thrift:"name,3" json:"name"` - ID int64 `thrift:"id,4" json:"id"` - ParentID *int64 `thrift:"parent_id,5" json:"parent_id,omitempty"` - Annotations []*Annotation `thrift:"annotations,6" json:"annotations"` - // unused field # 7 - BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" json:"binary_annotations"` - Debug bool `thrift:"debug,9" json:"debug,omitempty"` - Timestamp *int64 `thrift:"timestamp,10" json:"timestamp,omitempty"` - Duration *int64 `thrift:"duration,11" json:"duration,omitempty"` - TraceIDHigh *int64 `thrift:"trace_id_high,12" json:"trace_id_high,omitempty"` -} - -func NewSpan() *Span { - return &Span{} -} - -func (p *Span) GetTraceID() int64 { - return p.TraceID -} - -func (p *Span) GetName() string { - return p.Name -} - -func (p *Span) GetID() int64 { - return p.ID -} - -var Span_ParentID_DEFAULT int64 - -func (p *Span) GetParentID() int64 { - if !p.IsSetParentID() { - return Span_ParentID_DEFAULT - } - return *p.ParentID -} - -func (p *Span) GetAnnotations() []*Annotation { - return p.Annotations -} - -func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { - return p.BinaryAnnotations -} - -var Span_Debug_DEFAULT bool = false - -func (p *Span) GetDebug() bool { - return p.Debug -} - -var Span_Timestamp_DEFAULT int64 - -func (p *Span) GetTimestamp() int64 { - if !p.IsSetTimestamp() { - return Span_Timestamp_DEFAULT - } - return *p.Timestamp -} - -var Span_Duration_DEFAULT int64 - -func (p *Span) GetDuration() int64 { - if !p.IsSetDuration() { - return Span_Duration_DEFAULT - } - return *p.Duration -} - -var Span_TraceIDHigh_DEFAULT int64 - -func (p *Span) GetTraceIDHigh() int64 { - if !p.IsSetTraceIDHigh() { - return Span_TraceIDHigh_DEFAULT - } - return *p.TraceIDHigh -} -func (p *Span) IsSetParentID() bool { - return p.ParentID != nil -} - -func (p *Span) IsSetDebug() bool { - return p.Debug != Span_Debug_DEFAULT -} - -func (p *Span) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *Span) IsSetDuration() bool { - return p.Duration != nil -} - -func (p *Span) IsSetTraceIDHigh() bool { - return p.TraceIDHigh != nil -} - -func (p *Span) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - case 8: - if err := p.readField8(iprot); err != nil { - return err - } - case 9: - if err := p.readField9(iprot); err != nil { - return err - } - case 10: - if err := p.readField10(iprot); err != nil { - return err - } - case 11: - if err := p.readField11(iprot); err != nil { - return err - } - case 12: - if err := p.readField12(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Span) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.TraceID = v - } - return nil -} - -func (p *Span) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Name = v - } - return nil -} - -func (p *Span) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.ID = v - } - return nil -} - -func (p *Span) readField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.ParentID = &v - } - return nil -} - -func (p *Span) readField6(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Annotation, 0, size) - p.Annotations = tSlice - for i := 0; i < size; i++ { - _elem0 := &Annotation{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Annotations = append(p.Annotations, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) readField8(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BinaryAnnotation, 0, size) - p.BinaryAnnotations = tSlice - for i := 0; i < size; i++ { - _elem1 := &BinaryAnnotation{} - if err := _elem1.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) readField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.Debug = v - } - return nil -} - -func (p *Span) readField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 10: ", err) - } else { - p.Timestamp = &v - } - return nil -} - -func (p *Span) readField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 11: ", err) - } else { - p.Duration = &v - } - return nil -} - -func (p *Span) readField12(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 12: ", err) - } else { - p.TraceIDHigh = &v - } - return nil -} - -func (p *Span) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Span"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - if err := p.writeField9(oprot); err != nil { - return err - } - if err := p.writeField10(oprot); err != nil { - return err - } - if err := p.writeField11(oprot); err != nil { - return err - } - if err := p.writeField12(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Span) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("trace_id", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) - } - return err -} - -func (p *Span) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) - } - if err := oprot.WriteString(string(p.Name)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) - } - return err -} - -func (p *Span) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("id", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) - } - if err := oprot.WriteI64(int64(p.ID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) - } - return err -} - -func (p *Span) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetParentID() { - if err := oprot.WriteFieldBegin("parent_id", thrift.I64, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) - } - if err := oprot.WriteI64(int64(*p.ParentID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) - } - } - return err -} - -func (p *Span) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("annotations", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Annotations { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) - } - return err -} - -func (p *Span) writeField8(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("binary_annotations", thrift.LIST, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.BinaryAnnotations { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) - } - return err -} - -func (p *Span) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetDebug() { - if err := oprot.WriteFieldBegin("debug", thrift.BOOL, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) - } - if err := oprot.WriteBool(bool(p.Debug)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) - } - } - return err -} - -func (p *Span) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) - } - } - return err -} - -func (p *Span) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetDuration() { - if err := oprot.WriteFieldBegin("duration", thrift.I64, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) - } - if err := oprot.WriteI64(int64(*p.Duration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) - } - } - return err -} - -func (p *Span) writeField12(oprot thrift.TProtocol) (err error) { - if p.IsSetTraceIDHigh() { - if err := oprot.WriteFieldBegin("trace_id_high", thrift.I64, 12); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) - } - if err := oprot.WriteI64(int64(*p.TraceIDHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) - } - } - return err -} - -func (p *Span) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Span(%+v)", *p) -} - -// Attributes: -// - Ok -type Response struct { - Ok bool `thrift:"ok,1,required" json:"ok"` -} - -func NewResponse() *Response { - return &Response{} -} - -func (p *Response) GetOk() bool { - return p.Ok -} -func (p *Response) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetOk = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) - } - return nil -} - -func (p *Response) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ok = v - } - return nil -} - -func (p *Response) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Response) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) - } - if err := oprot.WriteBool(bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) - } - return err -} - -func (p *Response) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Response(%+v)", *p) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go deleted file mode 100644 index 417e883d0e31..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go +++ /dev/null @@ -1,446 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package zipkincore - -import ( - "bytes" - "fmt" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -type ZipkinCollector interface { - // Parameters: - // - Spans - SubmitZipkinBatch(spans []*Span) (r []*Response, err error) -} - -type ZipkinCollectorClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient { - return &ZipkinCollectorClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient { - return &ZipkinCollectorClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// Parameters: -// - Spans -func (p *ZipkinCollectorClient) SubmitZipkinBatch(spans []*Span) (r []*Response, err error) { - if err = p.sendSubmitZipkinBatch(spans); err != nil { - return - } - return p.recvSubmitZipkinBatch() -} - -func (p *ZipkinCollectorClient) sendSubmitZipkinBatch(spans []*Span) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("submitZipkinBatch", thrift.CALL, p.SeqId); err != nil { - return - } - args := ZipkinCollectorSubmitZipkinBatchArgs{ - Spans: spans, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *ZipkinCollectorClient) recvSubmitZipkinBatch() (value []*Response, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "submitZipkinBatch" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "submitZipkinBatch failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "submitZipkinBatch failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error2 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error3 error - error3, err = error2.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error3 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "submitZipkinBatch failed: invalid message type") - return - } - result := ZipkinCollectorSubmitZipkinBatchResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - value = result.GetSuccess() - return -} - -type ZipkinCollectorProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler ZipkinCollector -} - -func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor { - - self4 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self4.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler} - return self4 -} - -func (p *ZipkinCollectorProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x5.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x5 - -} - -type zipkinCollectorProcessorSubmitZipkinBatch struct { - handler ZipkinCollector -} - -func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := ZipkinCollectorSubmitZipkinBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := ZipkinCollectorSubmitZipkinBatchResult{} - var retval []*Response - var err2 error - if retval, err2 = p.handler.SubmitZipkinBatch(args.Spans); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error()) - oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("submitZipkinBatch", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Spans -type ZipkinCollectorSubmitZipkinBatchArgs struct { - Spans []*Span `thrift:"spans,1" json:"spans"` -} - -func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs { - return &ZipkinCollectorSubmitZipkinBatchArgs{} -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span { - return p.Spans -} -func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem6 := &Span{} - if err := _elem6.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) - } - p.Spans = append(p.Spans, _elem6) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("submitZipkinBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) - } - return err -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p) -} - -// Attributes: -// - Success -type ZipkinCollectorSubmitZipkinBatchResult struct { - Success []*Response `thrift:"success,0" json:"success,omitempty"` -} - -func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult { - return &ZipkinCollectorSubmitZipkinBatchResult{} -} - -var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response - -func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response { - return p.Success -} -func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) readField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Response, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem7 := &Response{} - if err := _elem7.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err) - } - p.Success = append(p.Success, _elem7) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("submitZipkinBatch_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/README.md b/vendor/github.com/uber/jaeger-client-go/thrift/README.md deleted file mode 100644 index 1d8e642e028b..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Apache Thrift - -This is a partial copy of Apache Thrift v0.10 (https://github.com/apache/thrift/commit/b2a4d4ae21c789b689dd162deb819665567f481c). - -It is vendored code to avoid compatibility issues introduced in Thrift v0.11. - -See https://github.com/jaegertracing/jaeger-client-go/pull/303. diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go deleted file mode 100644 index 6655cc5a9720..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -const ( - UNKNOWN_APPLICATION_EXCEPTION = 0 - UNKNOWN_METHOD = 1 - INVALID_MESSAGE_TYPE_EXCEPTION = 2 - WRONG_METHOD_NAME = 3 - BAD_SEQUENCE_ID = 4 - MISSING_RESULT = 5 - INTERNAL_ERROR = 6 - PROTOCOL_ERROR = 7 -) - -// Application level Thrift exception -type TApplicationException interface { - TException - TypeId() int32 - Read(iprot TProtocol) (TApplicationException, error) - Write(oprot TProtocol) error -} - -type tApplicationException struct { - message string - type_ int32 -} - -func (e tApplicationException) Error() string { - return e.message -} - -func NewTApplicationException(type_ int32, message string) TApplicationException { - return &tApplicationException{message, type_} -} - -func (p *tApplicationException) TypeId() int32 { - return p.type_ -} - -func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) { - _, err := iprot.ReadStructBegin() - if err != nil { - return nil, err - } - - message := "" - type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) - - for { - _, ttype, id, err := iprot.ReadFieldBegin() - if err != nil { - return nil, err - } - if ttype == STOP { - break - } - switch id { - case 1: - if ttype == STRING { - if message, err = iprot.ReadString(); err != nil { - return nil, err - } - } else { - if err = SkipDefaultDepth(iprot, ttype); err != nil { - return nil, err - } - } - case 2: - if ttype == I32 { - if type_, err = iprot.ReadI32(); err != nil { - return nil, err - } - } else { - if err = SkipDefaultDepth(iprot, ttype); err != nil { - return nil, err - } - } - default: - if err = SkipDefaultDepth(iprot, ttype); err != nil { - return nil, err - } - } - if err = iprot.ReadFieldEnd(); err != nil { - return nil, err - } - } - return NewTApplicationException(type_, message), iprot.ReadStructEnd() -} - -func (p *tApplicationException) Write(oprot TProtocol) (err error) { - err = oprot.WriteStructBegin("TApplicationException") - if len(p.Error()) > 0 { - err = oprot.WriteFieldBegin("message", STRING, 1) - if err != nil { - return - } - err = oprot.WriteString(p.Error()) - if err != nil { - return - } - err = oprot.WriteFieldEnd() - if err != nil { - return - } - } - err = oprot.WriteFieldBegin("type", I32, 2) - if err != nil { - return - } - err = oprot.WriteI32(p.type_) - if err != nil { - return - } - err = oprot.WriteFieldEnd() - if err != nil { - return - } - err = oprot.WriteFieldStop() - if err != nil { - return - } - err = oprot.WriteStructEnd() - return -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go deleted file mode 100644 index 690d341111b5..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go +++ /dev/null @@ -1,514 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -type TBinaryProtocol struct { - trans TRichTransport - origTransport TTransport - reader io.Reader - writer io.Writer - strictRead bool - strictWrite bool - buffer [64]byte -} - -type TBinaryProtocolFactory struct { - strictRead bool - strictWrite bool -} - -func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { - return NewTBinaryProtocol(t, false, true) -} - -func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { - p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite} - if et, ok := t.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(t) - } - p.reader = p.trans - p.writer = p.trans - return p -} - -func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { - return NewTBinaryProtocolFactory(false, true) -} - -func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { - return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite} -} - -func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { - return NewTBinaryProtocol(t, p.strictRead, p.strictWrite) -} - -/** - * Writing Methods - */ - -func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { - if p.strictWrite { - version := uint32(VERSION_1) | uint32(typeId) - e := p.WriteI32(int32(version)) - if e != nil { - return e - } - e = p.WriteString(name) - if e != nil { - return e - } - e = p.WriteI32(seqId) - return e - } else { - e := p.WriteString(name) - if e != nil { - return e - } - e = p.WriteByte(int8(typeId)) - if e != nil { - return e - } - e = p.WriteI32(seqId) - return e - } - return nil -} - -func (p *TBinaryProtocol) WriteMessageEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteStructBegin(name string) error { - return nil -} - -func (p *TBinaryProtocol) WriteStructEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - e := p.WriteByte(int8(typeId)) - if e != nil { - return e - } - e = p.WriteI16(id) - return e -} - -func (p *TBinaryProtocol) WriteFieldEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldStop() error { - e := p.WriteByte(STOP) - return e -} - -func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - e := p.WriteByte(int8(keyType)) - if e != nil { - return e - } - e = p.WriteByte(int8(valueType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteMapEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error { - e := p.WriteByte(int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteListEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error { - e := p.WriteByte(int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteSetEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteBool(value bool) error { - if value { - return p.WriteByte(1) - } - return p.WriteByte(0) -} - -func (p *TBinaryProtocol) WriteByte(value int8) error { - e := p.trans.WriteByte(byte(value)) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI16(value int16) error { - v := p.buffer[0:2] - binary.BigEndian.PutUint16(v, uint16(value)) - _, e := p.writer.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI32(value int32) error { - v := p.buffer[0:4] - binary.BigEndian.PutUint32(v, uint32(value)) - _, e := p.writer.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI64(value int64) error { - v := p.buffer[0:8] - binary.BigEndian.PutUint64(v, uint64(value)) - _, err := p.writer.Write(v) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteDouble(value float64) error { - return p.WriteI64(int64(math.Float64bits(value))) -} - -func (p *TBinaryProtocol) WriteString(value string) error { - e := p.WriteI32(int32(len(value))) - if e != nil { - return e - } - _, err := p.trans.WriteString(value) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteBinary(value []byte) error { - e := p.WriteI32(int32(len(value))) - if e != nil { - return e - } - _, err := p.writer.Write(value) - return NewTProtocolException(err) -} - -/** - * Reading methods - */ - -func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { - size, e := p.ReadI32() - if e != nil { - return "", typeId, 0, NewTProtocolException(e) - } - if size < 0 { - typeId = TMessageType(size & 0x0ff) - version := int64(int64(size) & VERSION_MASK) - if version != VERSION_1 { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) - } - name, e = p.ReadString() - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - seqId, e = p.ReadI32() - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - return name, typeId, seqId, nil - } - if p.strictRead { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) - } - name, e2 := p.readStringBody(size) - if e2 != nil { - return name, typeId, seqId, e2 - } - b, e3 := p.ReadByte() - if e3 != nil { - return name, typeId, seqId, e3 - } - typeId = TMessageType(b) - seqId, e4 := p.ReadI32() - if e4 != nil { - return name, typeId, seqId, e4 - } - return name, typeId, seqId, nil -} - -func (p *TBinaryProtocol) ReadMessageEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) { - return -} - -func (p *TBinaryProtocol) ReadStructEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) { - t, err := p.ReadByte() - typeId = TType(t) - if err != nil { - return name, typeId, seqId, err - } - if t != STOP { - seqId, err = p.ReadI16() - } - return name, typeId, seqId, err -} - -func (p *TBinaryProtocol) ReadFieldEnd() error { - return nil -} - -var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) - -func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) { - k, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - kType = TType(k) - v, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - vType = TType(v) - size32, e := p.ReadI32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return kType, vType, size, nil -} - -func (p *TBinaryProtocol) ReadMapEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) { - b, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - return -} - -func (p *TBinaryProtocol) ReadListEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) { - b, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return elemType, size, nil -} - -func (p *TBinaryProtocol) ReadSetEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadBool() (bool, error) { - b, e := p.ReadByte() - v := true - if b != 1 { - v = false - } - return v, e -} - -func (p *TBinaryProtocol) ReadByte() (int8, error) { - v, err := p.trans.ReadByte() - return int8(v), err -} - -func (p *TBinaryProtocol) ReadI16() (value int16, err error) { - buf := p.buffer[0:2] - err = p.readAll(buf) - value = int16(binary.BigEndian.Uint16(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI32() (value int32, err error) { - buf := p.buffer[0:4] - err = p.readAll(buf) - value = int32(binary.BigEndian.Uint32(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI64() (value int64, err error) { - buf := p.buffer[0:8] - err = p.readAll(buf) - value = int64(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadDouble() (value float64, err error) { - buf := p.buffer[0:8] - err = p.readAll(buf) - value = math.Float64frombits(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadString() (value string, err error) { - size, e := p.ReadI32() - if e != nil { - return "", e - } - if size < 0 { - err = invalidDataLength - return - } - - return p.readStringBody(size) -} - -func (p *TBinaryProtocol) ReadBinary() ([]byte, error) { - size, e := p.ReadI32() - if e != nil { - return nil, e - } - if size < 0 { - return nil, invalidDataLength - } - if uint64(size) > p.trans.RemainingBytes() { - return nil, invalidDataLength - } - - isize := int(size) - buf := make([]byte, isize) - _, err := io.ReadFull(p.trans, buf) - return buf, NewTProtocolException(err) -} - -func (p *TBinaryProtocol) Flush() (err error) { - return NewTProtocolException(p.trans.Flush()) -} - -func (p *TBinaryProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *TBinaryProtocol) Transport() TTransport { - return p.origTransport -} - -func (p *TBinaryProtocol) readAll(buf []byte) error { - _, err := io.ReadFull(p.reader, buf) - return NewTProtocolException(err) -} - -const readLimit = 32768 - -func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { - if size < 0 { - return "", nil - } - if uint64(size) > p.trans.RemainingBytes() { - return "", invalidDataLength - } - - var ( - buf bytes.Buffer - e error - b []byte - ) - - switch { - case int(size) <= len(p.buffer): - b = p.buffer[:size] // avoids allocation for small reads - case int(size) < readLimit: - b = make([]byte, size) - default: - b = make([]byte, readLimit) - } - - for size > 0 { - _, e = io.ReadFull(p.trans, b) - buf.Write(b) - if e != nil { - break - } - size -= readLimit - if size < readLimit && size > 0 { - b = b[:size] - } - } - return buf.String(), NewTProtocolException(e) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go deleted file mode 100644 index b9299f2fa13c..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go +++ /dev/null @@ -1,815 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - COMPACT_PROTOCOL_ID = 0x082 - COMPACT_VERSION = 1 - COMPACT_VERSION_MASK = 0x1f - COMPACT_TYPE_MASK = 0x0E0 - COMPACT_TYPE_BITS = 0x07 - COMPACT_TYPE_SHIFT_AMOUNT = 5 -) - -type tCompactType byte - -const ( - COMPACT_BOOLEAN_TRUE = 0x01 - COMPACT_BOOLEAN_FALSE = 0x02 - COMPACT_BYTE = 0x03 - COMPACT_I16 = 0x04 - COMPACT_I32 = 0x05 - COMPACT_I64 = 0x06 - COMPACT_DOUBLE = 0x07 - COMPACT_BINARY = 0x08 - COMPACT_LIST = 0x09 - COMPACT_SET = 0x0A - COMPACT_MAP = 0x0B - COMPACT_STRUCT = 0x0C -) - -var ( - ttypeToCompactType map[TType]tCompactType -) - -func init() { - ttypeToCompactType = map[TType]tCompactType{ - STOP: STOP, - BOOL: COMPACT_BOOLEAN_TRUE, - BYTE: COMPACT_BYTE, - I16: COMPACT_I16, - I32: COMPACT_I32, - I64: COMPACT_I64, - DOUBLE: COMPACT_DOUBLE, - STRING: COMPACT_BINARY, - LIST: COMPACT_LIST, - SET: COMPACT_SET, - MAP: COMPACT_MAP, - STRUCT: COMPACT_STRUCT, - } -} - -type TCompactProtocolFactory struct{} - -func NewTCompactProtocolFactory() *TCompactProtocolFactory { - return &TCompactProtocolFactory{} -} - -func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTCompactProtocol(trans) -} - -type TCompactProtocol struct { - trans TRichTransport - origTransport TTransport - - // Used to keep track of the last field for the current and previous structs, - // so we can do the delta stuff. - lastField []int - lastFieldId int - - // If we encounter a boolean field begin, save the TField here so it can - // have the value incorporated. - booleanFieldName string - booleanFieldId int16 - booleanFieldPending bool - - // If we read a field header, and it's a boolean field, save the boolean - // value here so that readBool can use it. - boolValue bool - boolValueIsNotNull bool - buffer [64]byte -} - -// Create a TCompactProtocol given a TTransport -func NewTCompactProtocol(trans TTransport) *TCompactProtocol { - p := &TCompactProtocol{origTransport: trans, lastField: []int{}} - if et, ok := trans.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(trans) - } - - return p - -} - -// -// Public Writing methods. -// - -// Write a message header to the wire. Compact Protocol messages contain the -// protocol version so we can migrate forwards in the future if need be. -func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { - err := p.writeByteDirect(COMPACT_PROTOCOL_ID) - if err != nil { - return NewTProtocolException(err) - } - err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) - if err != nil { - return NewTProtocolException(err) - } - _, err = p.writeVarint32(seqid) - if err != nil { - return NewTProtocolException(err) - } - e := p.WriteString(name) - return e - -} - -func (p *TCompactProtocol) WriteMessageEnd() error { return nil } - -// Write a struct begin. This doesn't actually put anything on the wire. We -// use it as an opportunity to put special placeholder markers on the field -// stack so we can get the field id deltas correct. -func (p *TCompactProtocol) WriteStructBegin(name string) error { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return nil -} - -// Write a struct end. This doesn't actually put anything on the wire. We use -// this as an opportunity to pop the last field from the current struct off -// of the field stack. -func (p *TCompactProtocol) WriteStructEnd() error { - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - if typeId == BOOL { - // we want to possibly include the value, so we'll wait. - p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true - return nil - } - _, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF) - return NewTProtocolException(err) -} - -// The workhorse of writeFieldBegin. It has the option of doing a -// 'type override' of the type header. This is used specifically in the -// boolean field case. -func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) { - // short lastField = lastField_.pop(); - - // if there's a type override, use that. - var typeToWrite byte - if typeOverride == 0xFF { - typeToWrite = byte(p.getCompactType(typeId)) - } else { - typeToWrite = typeOverride - } - // check if we can use delta encoding for the field id - fieldId := int(id) - written := 0 - if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { - // write them together - err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) - if err != nil { - return 0, err - } - } else { - // write them separate - err := p.writeByteDirect(typeToWrite) - if err != nil { - return 0, err - } - err = p.WriteI16(id) - written = 1 + 2 - if err != nil { - return 0, err - } - } - - p.lastFieldId = fieldId - // p.lastField.Push(field.id); - return written, nil -} - -func (p *TCompactProtocol) WriteFieldEnd() error { return nil } - -func (p *TCompactProtocol) WriteFieldStop() error { - err := p.writeByteDirect(STOP) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - if size == 0 { - err := p.writeByteDirect(0) - return NewTProtocolException(err) - } - _, err := p.writeVarint32(int32(size)) - if err != nil { - return NewTProtocolException(err) - } - err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteMapEnd() error { return nil } - -// Write a list header. -func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteListEnd() error { return nil } - -// Write a set header. -func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteSetEnd() error { return nil } - -func (p *TCompactProtocol) WriteBool(value bool) error { - v := byte(COMPACT_BOOLEAN_FALSE) - if value { - v = byte(COMPACT_BOOLEAN_TRUE) - } - if p.booleanFieldPending { - // we haven't written the field header yet - _, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v) - p.booleanFieldPending = false - return NewTProtocolException(err) - } - // we're not part of a field, so just write the value. - err := p.writeByteDirect(v) - return NewTProtocolException(err) -} - -// Write a byte. Nothing to see here! -func (p *TCompactProtocol) WriteByte(value int8) error { - err := p.writeByteDirect(byte(value)) - return NewTProtocolException(err) -} - -// Write an I16 as a zigzag varint. -func (p *TCompactProtocol) WriteI16(value int16) error { - _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) - return NewTProtocolException(err) -} - -// Write an i32 as a zigzag varint. -func (p *TCompactProtocol) WriteI32(value int32) error { - _, err := p.writeVarint32(p.int32ToZigzag(value)) - return NewTProtocolException(err) -} - -// Write an i64 as a zigzag varint. -func (p *TCompactProtocol) WriteI64(value int64) error { - _, err := p.writeVarint64(p.int64ToZigzag(value)) - return NewTProtocolException(err) -} - -// Write a double to the wire as 8 bytes. -func (p *TCompactProtocol) WriteDouble(value float64) error { - buf := p.buffer[0:8] - binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) - _, err := p.trans.Write(buf) - return NewTProtocolException(err) -} - -// Write a string to the wire with a varint size preceding. -func (p *TCompactProtocol) WriteString(value string) error { - _, e := p.writeVarint32(int32(len(value))) - if e != nil { - return NewTProtocolException(e) - } - if len(value) > 0 { - } - _, e = p.trans.WriteString(value) - return e -} - -// Write a byte array, using a varint for the size. -func (p *TCompactProtocol) WriteBinary(bin []byte) error { - _, e := p.writeVarint32(int32(len(bin))) - if e != nil { - return NewTProtocolException(e) - } - if len(bin) > 0 { - _, e = p.trans.Write(bin) - return NewTProtocolException(e) - } - return nil -} - -// -// Reading methods. -// - -// Read a message header. -func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { - - protocolId, err := p.readByteDirect() - if err != nil { - return - } - - if protocolId != COMPACT_PROTOCOL_ID { - e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) - return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) - } - - versionAndType, err := p.readByteDirect() - if err != nil { - return - } - - version := versionAndType & COMPACT_VERSION_MASK - typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) - if version != COMPACT_VERSION { - e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version) - err = NewTProtocolExceptionWithType(BAD_VERSION, e) - return - } - seqId, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - name, err = p.ReadString() - return -} - -func (p *TCompactProtocol) ReadMessageEnd() error { return nil } - -// Read a struct begin. There's nothing on the wire for this, but it is our -// opportunity to push a new struct begin marker onto the field stack. -func (p *TCompactProtocol) ReadStructBegin() (name string, err error) { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return -} - -// Doesn't actually consume any wire data, just removes the last field for -// this struct from the field stack. -func (p *TCompactProtocol) ReadStructEnd() error { - // consume the last field we read off the wire. - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -// Read a field header off the wire. -func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { - t, err := p.readByteDirect() - if err != nil { - return - } - - // if it's a stop, then we can return immediately, as the struct is over. - if (t & 0x0f) == STOP { - return "", STOP, 0, nil - } - - // mask off the 4 MSB of the type header. it could contain a field id delta. - modifier := int16((t & 0xf0) >> 4) - if modifier == 0 { - // not a delta. look ahead for the zigzag varint field id. - id, err = p.ReadI16() - if err != nil { - return - } - } else { - // has a delta. add the delta to the last read field id. - id = int16(p.lastFieldId) + modifier - } - typeId, e := p.getTType(tCompactType(t & 0x0f)) - if e != nil { - err = NewTProtocolException(e) - return - } - - // if this happens to be a boolean field, the value is encoded in the type - if p.isBoolType(t) { - // save the boolean value in a special instance variable. - p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) - p.boolValueIsNotNull = true - } - - // push the new field onto the field stack so we can keep the deltas going. - p.lastFieldId = int(id) - return -} - -func (p *TCompactProtocol) ReadFieldEnd() error { return nil } - -// Read a map header off the wire. If the size is zero, skip reading the key -// and value type. This means that 0-length maps will yield TMaps without the -// "correct" types. -func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { - size32, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - keyAndValueType := byte(STOP) - if size != 0 { - keyAndValueType, err = p.readByteDirect() - if err != nil { - return - } - } - keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) - valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) - return -} - -func (p *TCompactProtocol) ReadMapEnd() error { return nil } - -// Read a list header off the wire. If the list size is 0-14, the size will -// be packed into the element type header. If it's a longer list, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) { - size_and_type, err := p.readByteDirect() - if err != nil { - return - } - size = int((size_and_type >> 4) & 0x0f) - if size == 15 { - size2, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size2 < 0 { - err = invalidDataLength - return - } - size = int(size2) - } - elemType, e := p.getTType(tCompactType(size_and_type)) - if e != nil { - err = NewTProtocolException(e) - return - } - return -} - -func (p *TCompactProtocol) ReadListEnd() error { return nil } - -// Read a set header off the wire. If the set size is 0-14, the size will -// be packed into the element type header. If it's a longer set, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) { - return p.ReadListBegin() -} - -func (p *TCompactProtocol) ReadSetEnd() error { return nil } - -// Read a boolean off the wire. If this is a boolean field, the value should -// already have been read during readFieldBegin, so we'll just consume the -// pre-stored value. Otherwise, read a byte. -func (p *TCompactProtocol) ReadBool() (value bool, err error) { - if p.boolValueIsNotNull { - p.boolValueIsNotNull = false - return p.boolValue, nil - } - v, err := p.readByteDirect() - return v == COMPACT_BOOLEAN_TRUE, err -} - -// Read a single byte off the wire. Nothing interesting here. -func (p *TCompactProtocol) ReadByte() (int8, error) { - v, err := p.readByteDirect() - if err != nil { - return 0, NewTProtocolException(err) - } - return int8(v), err -} - -// Read an i16 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI16() (value int16, err error) { - v, err := p.ReadI32() - return int16(v), err -} - -// Read an i32 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI32() (value int32, err error) { - v, e := p.readVarint32() - if e != nil { - return 0, NewTProtocolException(e) - } - value = p.zigzagToInt32(v) - return value, nil -} - -// Read an i64 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI64() (value int64, err error) { - v, e := p.readVarint64() - if e != nil { - return 0, NewTProtocolException(e) - } - value = p.zigzagToInt64(v) - return value, nil -} - -// No magic here - just read a double off the wire. -func (p *TCompactProtocol) ReadDouble() (value float64, err error) { - longBits := p.buffer[0:8] - _, e := io.ReadFull(p.trans, longBits) - if e != nil { - return 0.0, NewTProtocolException(e) - } - return math.Float64frombits(p.bytesToUint64(longBits)), nil -} - -// Reads a []byte (via readBinary), and then UTF-8 decodes it. -func (p *TCompactProtocol) ReadString() (value string, err error) { - length, e := p.readVarint32() - if e != nil { - return "", NewTProtocolException(e) - } - if length < 0 { - return "", invalidDataLength - } - if uint64(length) > p.trans.RemainingBytes() { - return "", invalidDataLength - } - - if length == 0 { - return "", nil - } - var buf []byte - if length <= int32(len(p.buffer)) { - buf = p.buffer[0:length] - } else { - buf = make([]byte, length) - } - _, e = io.ReadFull(p.trans, buf) - return string(buf), NewTProtocolException(e) -} - -// Read a []byte from the wire. -func (p *TCompactProtocol) ReadBinary() (value []byte, err error) { - length, e := p.readVarint32() - if e != nil { - return nil, NewTProtocolException(e) - } - if length == 0 { - return []byte{}, nil - } - if length < 0 { - return nil, invalidDataLength - } - if uint64(length) > p.trans.RemainingBytes() { - return nil, invalidDataLength - } - - buf := make([]byte, length) - _, e = io.ReadFull(p.trans, buf) - return buf, NewTProtocolException(e) -} - -func (p *TCompactProtocol) Flush() (err error) { - return NewTProtocolException(p.trans.Flush()) -} - -func (p *TCompactProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *TCompactProtocol) Transport() TTransport { - return p.origTransport -} - -// -// Internal writing methods -// - -// Abstract method for writing the start of lists and sets. List and sets on -// the wire differ only by the type indicator. -func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { - if size <= 14 { - return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) - } - err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) - if err != nil { - return 0, err - } - m, err := p.writeVarint32(int32(size)) - return 1 + m, err -} - -// Write an i32 as a varint. Results in 1-5 bytes on the wire. -// TODO(pomack): make a permanent buffer like writeVarint64? -func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { - i32buf := p.buffer[0:5] - idx := 0 - for { - if (n & ^0x7F) == 0 { - i32buf[idx] = byte(n) - idx++ - // p.writeByteDirect(byte(n)); - break - // return; - } else { - i32buf[idx] = byte((n & 0x7F) | 0x80) - idx++ - // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); - u := uint32(n) - n = int32(u >> 7) - } - } - return p.trans.Write(i32buf[0:idx]) -} - -// Write an i64 as a varint. Results in 1-10 bytes on the wire. -func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { - varint64out := p.buffer[0:10] - idx := 0 - for { - if (n & ^0x7F) == 0 { - varint64out[idx] = byte(n) - idx++ - break - } else { - varint64out[idx] = byte((n & 0x7F) | 0x80) - idx++ - u := uint64(n) - n = int64(u >> 7) - } - } - return p.trans.Write(varint64out[0:idx]) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { - return (l << 1) ^ (l >> 63) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { - return (n << 1) ^ (n >> 31) -} - -func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) { - binary.LittleEndian.PutUint64(buf, n) -} - -func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) { - binary.LittleEndian.PutUint64(buf, uint64(n)) -} - -// Writes a byte without any possibility of all that field header nonsense. -// Used internally by other writing methods that know they need to write a byte. -func (p *TCompactProtocol) writeByteDirect(b byte) error { - return p.trans.WriteByte(b) -} - -// Writes a byte without any possibility of all that field header nonsense. -func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) { - return 1, p.writeByteDirect(byte(n)) -} - -// -// Internal reading methods -// - -// Read an i32 from the wire as a varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 5 bytes. -func (p *TCompactProtocol) readVarint32() (int32, error) { - // if the wire contains the right stuff, this will just truncate the i64 we - // read and get us the right sign. - v, err := p.readVarint64() - return int32(v), err -} - -// Read an i64 from the wire as a proper varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 10 bytes. -func (p *TCompactProtocol) readVarint64() (int64, error) { - shift := uint(0) - result := int64(0) - for { - b, err := p.readByteDirect() - if err != nil { - return 0, err - } - result |= int64(b&0x7f) << shift - if (b & 0x80) != 0x80 { - break - } - shift += 7 - } - return result, nil -} - -// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. -func (p *TCompactProtocol) readByteDirect() (byte, error) { - return p.trans.ReadByte() -} - -// -// encoding helpers -// - -// Convert from zigzag int to int. -func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { - u := uint32(n) - return int32(u>>1) ^ -(n & 1) -} - -// Convert from zigzag long to long. -func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { - u := uint64(n) - return int64(u>>1) ^ -(n & 1) -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *TCompactProtocol) bytesToInt64(b []byte) int64 { - return int64(binary.LittleEndian.Uint64(b)) -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { - return binary.LittleEndian.Uint64(b) -} - -// -// type testing and converting -// - -func (p *TCompactProtocol) isBoolType(b byte) bool { - return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE -} - -// Given a tCompactType constant, convert it to its corresponding -// TType value. -func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { - switch byte(t) & 0x0f { - case STOP: - return STOP, nil - case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: - return BOOL, nil - case COMPACT_BYTE: - return BYTE, nil - case COMPACT_I16: - return I16, nil - case COMPACT_I32: - return I32, nil - case COMPACT_I64: - return I64, nil - case COMPACT_DOUBLE: - return DOUBLE, nil - case COMPACT_BINARY: - return STRING, nil - case COMPACT_LIST: - return LIST, nil - case COMPACT_SET: - return SET, nil - case COMPACT_MAP: - return MAP, nil - case COMPACT_STRUCT: - return STRUCT, nil - } - return STOP, TException(fmt.Errorf("don't know what type: %d", t&0x0f)) -} - -// Given a TType value, find the appropriate TCompactProtocol.Types constant. -func (p *TCompactProtocol) getCompactType(t TType) tCompactType { - return ttypeToCompactType[t] -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go deleted file mode 100644 index ea8d6f66114c..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" -) - -// Generic Thrift exception -type TException interface { - error -} - -// Prepends additional information to an error without losing the Thrift exception interface -func PrependError(prepend string, err error) error { - if t, ok := err.(TTransportException); ok { - return NewTTransportException(t.TypeId(), prepend+t.Error()) - } - if t, ok := err.(TProtocolException); ok { - return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error())) - } - if t, ok := err.(TApplicationException); ok { - return NewTApplicationException(t.TypeId(), prepend+t.Error()) - } - - return errors.New(prepend + err.Error()) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go deleted file mode 100644 index b62fd56f0634..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" -) - -// Memory buffer-based implementation of the TTransport interface. -type TMemoryBuffer struct { - *bytes.Buffer - size int -} - -type TMemoryBufferTransportFactory struct { - size int -} - -func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) TTransport { - if trans != nil { - t, ok := trans.(*TMemoryBuffer) - if ok && t.size > 0 { - return NewTMemoryBufferLen(t.size) - } - } - return NewTMemoryBufferLen(p.size) -} - -func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory { - return &TMemoryBufferTransportFactory{size: size} -} - -func NewTMemoryBuffer() *TMemoryBuffer { - return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0} -} - -func NewTMemoryBufferLen(size int) *TMemoryBuffer { - buf := make([]byte, 0, size) - return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size} -} - -func (p *TMemoryBuffer) IsOpen() bool { - return true -} - -func (p *TMemoryBuffer) Open() error { - return nil -} - -func (p *TMemoryBuffer) Close() error { - p.Buffer.Reset() - return nil -} - -// Flushing a memory buffer is a no-op -func (p *TMemoryBuffer) Flush() error { - return nil -} - -func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) { - return uint64(p.Buffer.Len()) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go deleted file mode 100644 index 25ab2e98a256..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Message type constants in the Thrift protocol. -type TMessageType int32 - -const ( - INVALID_TMESSAGE_TYPE TMessageType = 0 - CALL TMessageType = 1 - REPLY TMessageType = 2 - EXCEPTION TMessageType = 3 - ONEWAY TMessageType = 4 -) diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go deleted file mode 100644 index aa8daa9b54f9..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "math" - "strconv" -) - -type Numeric interface { - Int64() int64 - Int32() int32 - Int16() int16 - Byte() byte - Int() int - Float64() float64 - Float32() float32 - String() string - isNull() bool -} - -type numeric struct { - iValue int64 - dValue float64 - sValue string - isNil bool -} - -var ( - INFINITY Numeric - NEGATIVE_INFINITY Numeric - NAN Numeric - ZERO Numeric - NUMERIC_NULL Numeric -) - -func NewNumericFromDouble(dValue float64) Numeric { - if math.IsInf(dValue, 1) { - return INFINITY - } - if math.IsInf(dValue, -1) { - return NEGATIVE_INFINITY - } - if math.IsNaN(dValue) { - return NAN - } - iValue := int64(dValue) - sValue := strconv.FormatFloat(dValue, 'g', 10, 64) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI64(iValue int64) Numeric { - dValue := float64(iValue) - sValue := string(iValue) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI32(iValue int32) Numeric { - dValue := float64(iValue) - sValue := string(iValue) - isNil := false - return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromString(sValue string) Numeric { - if sValue == INFINITY.String() { - return INFINITY - } - if sValue == NEGATIVE_INFINITY.String() { - return NEGATIVE_INFINITY - } - if sValue == NAN.String() { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - isNil := len(sValue) == 0 - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromJSONString(sValue string, isNull bool) Numeric { - if isNull { - return NewNullNumeric() - } - if sValue == JSON_INFINITY { - return INFINITY - } - if sValue == JSON_NEGATIVE_INFINITY { - return NEGATIVE_INFINITY - } - if sValue == JSON_NAN { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} -} - -func NewNullNumeric() Numeric { - return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} -} - -func (p *numeric) Int64() int64 { - return p.iValue -} - -func (p *numeric) Int32() int32 { - return int32(p.iValue) -} - -func (p *numeric) Int16() int16 { - return int16(p.iValue) -} - -func (p *numeric) Byte() byte { - return byte(p.iValue) -} - -func (p *numeric) Int() int { - return int(p.iValue) -} - -func (p *numeric) Float64() float64 { - return p.dValue -} - -func (p *numeric) Float32() float32 { - return float32(p.dValue) -} - -func (p *numeric) String() string { - return p.sValue -} - -func (p *numeric) isNull() bool { - return p.isNil -} - -func init() { - INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} - NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} - NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} - ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} - NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor.go b/vendor/github.com/uber/jaeger-client-go/thrift/processor.go deleted file mode 100644 index ca0d3faf20ee..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/processor.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// A processor is a generic object which operates upon an input stream and -// writes to some output stream. -type TProcessor interface { - Process(in, out TProtocol) (bool, TException) -} - -type TProcessorFunction interface { - Process(seqId int32, in, out TProtocol) (bool, TException) -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go deleted file mode 100644 index 45fa202e741c..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" -) - -const ( - VERSION_MASK = 0xffff0000 - VERSION_1 = 0x80010000 -) - -type TProtocol interface { - WriteMessageBegin(name string, typeId TMessageType, seqid int32) error - WriteMessageEnd() error - WriteStructBegin(name string) error - WriteStructEnd() error - WriteFieldBegin(name string, typeId TType, id int16) error - WriteFieldEnd() error - WriteFieldStop() error - WriteMapBegin(keyType TType, valueType TType, size int) error - WriteMapEnd() error - WriteListBegin(elemType TType, size int) error - WriteListEnd() error - WriteSetBegin(elemType TType, size int) error - WriteSetEnd() error - WriteBool(value bool) error - WriteByte(value int8) error - WriteI16(value int16) error - WriteI32(value int32) error - WriteI64(value int64) error - WriteDouble(value float64) error - WriteString(value string) error - WriteBinary(value []byte) error - - ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) - ReadMessageEnd() error - ReadStructBegin() (name string, err error) - ReadStructEnd() error - ReadFieldBegin() (name string, typeId TType, id int16, err error) - ReadFieldEnd() error - ReadMapBegin() (keyType TType, valueType TType, size int, err error) - ReadMapEnd() error - ReadListBegin() (elemType TType, size int, err error) - ReadListEnd() error - ReadSetBegin() (elemType TType, size int, err error) - ReadSetEnd() error - ReadBool() (value bool, err error) - ReadByte() (value int8, err error) - ReadI16() (value int16, err error) - ReadI32() (value int32, err error) - ReadI64() (value int64, err error) - ReadDouble() (value float64, err error) - ReadString() (value string, err error) - ReadBinary() (value []byte, err error) - - Skip(fieldType TType) (err error) - Flush() (err error) - - Transport() TTransport -} - -// The maximum recursive depth the skip() function will traverse -const DEFAULT_RECURSION_DEPTH = 64 - -// Skips over the next data element from the provided input TProtocol object. -func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) { - return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH) -} - -// Skips over the next data element from the provided input TProtocol object. -func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) { - - if maxDepth <= 0 { - return NewTProtocolExceptionWithType( DEPTH_LIMIT, errors.New("Depth limit exceeded")) - } - - switch fieldType { - case STOP: - return - case BOOL: - _, err = self.ReadBool() - return - case BYTE: - _, err = self.ReadByte() - return - case I16: - _, err = self.ReadI16() - return - case I32: - _, err = self.ReadI32() - return - case I64: - _, err = self.ReadI64() - return - case DOUBLE: - _, err = self.ReadDouble() - return - case STRING: - _, err = self.ReadString() - return - case STRUCT: - if _, err = self.ReadStructBegin(); err != nil { - return err - } - for { - _, typeId, _, _ := self.ReadFieldBegin() - if typeId == STOP { - break - } - err := Skip(self, typeId, maxDepth-1) - if err != nil { - return err - } - self.ReadFieldEnd() - } - return self.ReadStructEnd() - case MAP: - keyType, valueType, size, err := self.ReadMapBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, keyType, maxDepth-1) - if err != nil { - return err - } - self.Skip(valueType) - } - return self.ReadMapEnd() - case SET: - elemType, size, err := self.ReadSetBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadSetEnd() - case LIST: - elemType, size, err := self.ReadListBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadListEnd() - } - return nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go deleted file mode 100644 index 6e357ee890df..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "encoding/base64" -) - -// Thrift Protocol exception -type TProtocolException interface { - TException - TypeId() int -} - -const ( - UNKNOWN_PROTOCOL_EXCEPTION = 0 - INVALID_DATA = 1 - NEGATIVE_SIZE = 2 - SIZE_LIMIT = 3 - BAD_VERSION = 4 - NOT_IMPLEMENTED = 5 - DEPTH_LIMIT = 6 -) - -type tProtocolException struct { - typeId int - message string -} - -func (p *tProtocolException) TypeId() int { - return p.typeId -} - -func (p *tProtocolException) String() string { - return p.message -} - -func (p *tProtocolException) Error() string { - return p.message -} - -func NewTProtocolException(err error) TProtocolException { - if err == nil { - return nil - } - if e,ok := err.(TProtocolException); ok { - return e - } - if _, ok := err.(base64.CorruptInputError); ok { - return &tProtocolException{INVALID_DATA, err.Error()} - } - return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()} -} - -func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { - if err == nil { - return nil - } - return &tProtocolException{errType, err.Error()} -} - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go deleted file mode 100644 index c40f796d886a..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Factory interface for constructing protocol instances. -type TProtocolFactory interface { - GetProtocol(trans TTransport) TProtocol -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go deleted file mode 100644 index 8e296a99b5f9..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "io" - -type RichTransport struct { - TTransport -} - -// Wraps Transport to provide TRichTransport interface -func NewTRichTransport(trans TTransport) *RichTransport { - return &RichTransport{trans} -} - -func (r *RichTransport) ReadByte() (c byte, err error) { - return readByte(r.TTransport) -} - -func (r *RichTransport) WriteByte(c byte) error { - return writeByte(r.TTransport, c) -} - -func (r *RichTransport) WriteString(s string) (n int, err error) { - return r.Write([]byte(s)) -} - -func (r *RichTransport) RemainingBytes() (num_bytes uint64) { - return r.TTransport.RemainingBytes() -} - -func readByte(r io.Reader) (c byte, err error) { - v := [1]byte{0} - n, err := r.Read(v[0:1]) - if n > 0 && (err == nil || err == io.EOF) { - return v[0], nil - } - if n > 0 && err != nil { - return v[0], err - } - if err != nil { - return 0, err - } - return v[0], nil -} - -func writeByte(w io.Writer, c byte) error { - v := [1]byte{c} - _, err := w.Write(v[0:1]) - return err -} - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go deleted file mode 100644 index 771222999091..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -type TSerializer struct { - Transport *TMemoryBuffer - Protocol TProtocol -} - -type TStruct interface { - Write(p TProtocol) error - Read(p TProtocol) error -} - -func NewTSerializer() *TSerializer { - transport := NewTMemoryBufferLen(1024) - protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) - - return &TSerializer{ - transport, - protocol} -} - -func (t *TSerializer) WriteString(msg TStruct) (s string, err error) { - t.Transport.Reset() - - if err = msg.Write(t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(); err != nil { - return - } - if err = t.Transport.Flush(); err != nil { - return - } - - return t.Transport.String(), nil -} - -func (t *TSerializer) Write(msg TStruct) (b []byte, err error) { - t.Transport.Reset() - - if err = msg.Write(t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(); err != nil { - return - } - - if err = t.Transport.Flush(); err != nil { - return - } - - b = append(b, t.Transport.Bytes()...) - return -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go deleted file mode 100644 index 412a482d055a..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go +++ /dev/null @@ -1,1337 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "math" - "strconv" -) - -type _ParseContext int - -const ( - _CONTEXT_IN_TOPLEVEL _ParseContext = 1 - _CONTEXT_IN_LIST_FIRST _ParseContext = 2 - _CONTEXT_IN_LIST _ParseContext = 3 - _CONTEXT_IN_OBJECT_FIRST _ParseContext = 4 - _CONTEXT_IN_OBJECT_NEXT_KEY _ParseContext = 5 - _CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6 -) - -func (p _ParseContext) String() string { - switch p { - case _CONTEXT_IN_TOPLEVEL: - return "TOPLEVEL" - case _CONTEXT_IN_LIST_FIRST: - return "LIST-FIRST" - case _CONTEXT_IN_LIST: - return "LIST" - case _CONTEXT_IN_OBJECT_FIRST: - return "OBJECT-FIRST" - case _CONTEXT_IN_OBJECT_NEXT_KEY: - return "OBJECT-NEXT-KEY" - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - return "OBJECT-NEXT-VALUE" - } - return "UNKNOWN-PARSE-CONTEXT" -} - -// JSON protocol implementation for thrift. -// -// This protocol produces/consumes a simple output format -// suitable for parsing by scripting languages. It should not be -// confused with the full-featured TJSONProtocol. -// -type TSimpleJSONProtocol struct { - trans TTransport - - parseContextStack []int - dumpContext []int - - writer *bufio.Writer - reader *bufio.Reader -} - -// Constructor -func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { - v := &TSimpleJSONProtocol{trans: t, - writer: bufio.NewWriter(t), - reader: bufio.NewReader(t), - } - v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) - v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) - return v -} - -// Factory -type TSimpleJSONProtocolFactory struct{} - -func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTSimpleJSONProtocol(trans) -} - -func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory { - return &TSimpleJSONProtocolFactory{} -} - -var ( - JSON_COMMA []byte - JSON_COLON []byte - JSON_LBRACE []byte - JSON_RBRACE []byte - JSON_LBRACKET []byte - JSON_RBRACKET []byte - JSON_QUOTE byte - JSON_QUOTE_BYTES []byte - JSON_NULL []byte - JSON_TRUE []byte - JSON_FALSE []byte - JSON_INFINITY string - JSON_NEGATIVE_INFINITY string - JSON_NAN string - JSON_INFINITY_BYTES []byte - JSON_NEGATIVE_INFINITY_BYTES []byte - JSON_NAN_BYTES []byte - json_nonbase_map_elem_bytes []byte -) - -func init() { - JSON_COMMA = []byte{','} - JSON_COLON = []byte{':'} - JSON_LBRACE = []byte{'{'} - JSON_RBRACE = []byte{'}'} - JSON_LBRACKET = []byte{'['} - JSON_RBRACKET = []byte{']'} - JSON_QUOTE = '"' - JSON_QUOTE_BYTES = []byte{'"'} - JSON_NULL = []byte{'n', 'u', 'l', 'l'} - JSON_TRUE = []byte{'t', 'r', 'u', 'e'} - JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'} - JSON_INFINITY = "Infinity" - JSON_NEGATIVE_INFINITY = "-Infinity" - JSON_NAN = "NaN" - JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} - JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} - JSON_NAN_BYTES = []byte{'N', 'a', 'N'} - json_nonbase_map_elem_bytes = []byte{']', ',', '['} -} - -func jsonQuote(s string) string { - b, _ := json.Marshal(s) - s1 := string(b) - return s1 -} - -func jsonUnquote(s string) (string, bool) { - s1 := new(string) - err := json.Unmarshal([]byte(s), s1) - return *s1, err == nil -} - -func mismatch(expected, actual string) error { - return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) -} - -func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { - p.resetContextStack() // THRIFT-3735 - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteString(name); e != nil { - return e - } - if e := p.WriteByte(int8(typeId)); e != nil { - return e - } - if e := p.WriteI32(seqId); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteMessageEnd() error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error { - if e := p.OutputObjectBegin(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteStructEnd() error { - return p.OutputObjectEnd() -} - -func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - if e := p.WriteString(name); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteFieldEnd() error { - //return p.OutputListEnd() - return nil -} - -func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil } - -func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteByte(int8(keyType)); e != nil { - return e - } - if e := p.WriteByte(int8(valueType)); e != nil { - return e - } - return p.WriteI32(int32(size)) -} - -func (p *TSimpleJSONProtocol) WriteMapEnd() error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TSimpleJSONProtocol) WriteListEnd() error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TSimpleJSONProtocol) WriteSetEnd() error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteBool(b bool) error { - return p.OutputBool(b) -} - -func (p *TSimpleJSONProtocol) WriteByte(b int8) error { - return p.WriteI32(int32(b)) -} - -func (p *TSimpleJSONProtocol) WriteI16(v int16) error { - return p.WriteI32(int32(v)) -} - -func (p *TSimpleJSONProtocol) WriteI32(v int32) error { - return p.OutputI64(int64(v)) -} - -func (p *TSimpleJSONProtocol) WriteI64(v int64) error { - return p.OutputI64(int64(v)) -} - -func (p *TSimpleJSONProtocol) WriteDouble(v float64) error { - return p.OutputF64(v) -} - -func (p *TSimpleJSONProtocol) WriteString(v string) error { - return p.OutputString(v) -} - -func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error { - // JSON library only takes in a string, - // not an arbitrary byte array, to ensure bytes are transmitted - // efficiently we must convert this into a valid JSON string - // therefore we use base64 encoding to avoid excessive escaping/quoting - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - writer := base64.NewEncoder(base64.StdEncoding, p.writer) - if _, e := writer.Write(v); e != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - return NewTProtocolException(e) - } - if e := writer.Close(); e != nil { - return NewTProtocolException(e) - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -// Reading methods. -func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { - p.resetContextStack() // THRIFT-3735 - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, typeId, seqId, err - } - if name, err = p.ReadString(); err != nil { - return name, typeId, seqId, err - } - bTypeId, err := p.ReadByte() - typeId = TMessageType(bTypeId) - if err != nil { - return name, typeId, seqId, err - } - if seqId, err = p.ReadI32(); err != nil { - return name, typeId, seqId, err - } - return name, typeId, seqId, nil -} - -func (p *TSimpleJSONProtocol) ReadMessageEnd() error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) { - _, err = p.ParseObjectStart() - return "", err -} - -func (p *TSimpleJSONProtocol) ReadStructEnd() error { - return p.ParseObjectEnd() -} - -func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { - if err := p.ParsePreValue(); err != nil { - return "", STOP, 0, err - } - b, _ := p.reader.Peek(1) - if len(b) > 0 { - switch b[0] { - case JSON_RBRACE[0]: - return "", STOP, 0, nil - case JSON_QUOTE: - p.reader.ReadByte() - name, err := p.ParseStringBody() - // simplejson is not meant to be read back into thrift - // - see http://wiki.apache.org/thrift/ThriftUsageJava - // - use JSON instead - if err != nil { - return name, STOP, 0, err - } - return name, STOP, -1, p.ParsePostValue() - /* - if err = p.ParsePostValue(); err != nil { - return name, STOP, 0, err - } - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, STOP, 0, err - } - bType, err := p.ReadByte() - thetype := TType(bType) - if err != nil { - return name, thetype, 0, err - } - id, err := p.ReadI16() - return name, thetype, id, err - */ - } - e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) - return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return "", STOP, 0, NewTProtocolException(io.EOF) -} - -func (p *TSimpleJSONProtocol) ReadFieldEnd() error { - return nil - //return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, VOID, 0, e - } - - // read keyType - bKeyType, e := p.ReadByte() - keyType = TType(bKeyType) - if e != nil { - return keyType, valueType, size, e - } - - // read valueType - bValueType, e := p.ReadByte() - valueType = TType(bValueType) - if e != nil { - return keyType, valueType, size, e - } - - // read size - iSize, err := p.ReadI64() - size = int(iSize) - return keyType, valueType, size, err -} - -func (p *TSimpleJSONProtocol) ReadMapEnd() error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TSimpleJSONProtocol) ReadListEnd() error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TSimpleJSONProtocol) ReadSetEnd() error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadBool() (bool, error) { - var value bool - - if err := p.ParsePreValue(); err != nil { - return value, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 { - switch f[0] { - case JSON_TRUE[0]: - b := make([]byte, len(JSON_TRUE)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_TRUE) { - value = true - } else { - e := fmt.Errorf("Expected \"true\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - break - case JSON_FALSE[0]: - b := make([]byte, len(JSON_FALSE)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_FALSE) { - value = false - } else { - e := fmt.Errorf("Expected \"false\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - break - case JSON_NULL[0]: - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_NULL) { - value = false - } else { - e := fmt.Errorf("Expected \"null\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - default: - e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - return value, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ReadByte() (int8, error) { - v, err := p.ReadI64() - return int8(v), err -} - -func (p *TSimpleJSONProtocol) ReadI16() (int16, error) { - v, err := p.ReadI64() - return int16(v), err -} - -func (p *TSimpleJSONProtocol) ReadI32() (int32, error) { - v, err := p.ReadI64() - return int32(v), err -} - -func (p *TSimpleJSONProtocol) ReadI64() (int64, error) { - v, _, err := p.ParseI64() - return v, err -} - -func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) { - v, _, err := p.ParseF64() - return v, err -} - -func (p *TSimpleJSONProtocol) ReadString() (string, error) { - var v string - if err := p.ParsePreValue(); err != nil { - return v, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseStringBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) { - var v []byte - if err := p.ParsePreValue(); err != nil { - return nil, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseBase64EncodedBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - - return v, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) Flush() (err error) { - return NewTProtocolException(p.writer.Flush()) -} - -func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *TSimpleJSONProtocol) Transport() TTransport { - return p.trans -} - -func (p *TSimpleJSONProtocol) OutputPreValue() error { - cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) - switch cxt { - case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: - if _, e := p.write(JSON_COMMA); e != nil { - return NewTProtocolException(e) - } - break - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - if _, e := p.write(JSON_COLON); e != nil { - return NewTProtocolException(e) - } - break - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputPostValue() error { - cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) - switch cxt { - case _CONTEXT_IN_LIST_FIRST: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST)) - break - case _CONTEXT_IN_OBJECT_FIRST: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break - case _CONTEXT_IN_OBJECT_NEXT_KEY: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) - break - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputBool(value bool) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if value { - v = string(JSON_TRUE) - } else { - v = string(JSON_FALSE) - } - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = jsonQuote(v) - default: - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputNull() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_NULL); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputF64(value float64) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if math.IsNaN(value) { - v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) - } else if math.IsInf(value, 1) { - v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) - } else if math.IsInf(value, -1) { - v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) - } else { - v = strconv.FormatFloat(value, 'g', -1, 64) - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = string(JSON_QUOTE) + v + string(JSON_QUOTE) - default: - } - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputI64(value int64) error { - if e := p.OutputPreValue(); e != nil { - return e - } - v := strconv.FormatInt(value, 10) - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = jsonQuote(v) - default: - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputString(s string) error { - if e := p.OutputPreValue(); e != nil { - return e - } - if e := p.OutputStringData(jsonQuote(s)); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputStringData(s string) error { - _, e := p.write([]byte(s)) - return NewTProtocolException(e) -} - -func (p *TSimpleJSONProtocol) OutputObjectBegin() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_LBRACE); e != nil { - return NewTProtocolException(e) - } - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST)) - return nil -} - -func (p *TSimpleJSONProtocol) OutputObjectEnd() error { - if _, e := p.write(JSON_RBRACE); e != nil { - return NewTProtocolException(e) - } - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - if e := p.OutputPostValue(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputListBegin() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_LBRACKET); e != nil { - return NewTProtocolException(e) - } - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST)) - return nil -} - -func (p *TSimpleJSONProtocol) OutputListEnd() error { - if _, e := p.write(JSON_RBRACKET); e != nil { - return NewTProtocolException(e) - } - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - if e := p.OutputPostValue(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteByte(int8(elemType)); e != nil { - return e - } - if e := p.WriteI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) ParsePreValue() error { - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) - b, _ := p.reader.Peek(1) - switch cxt { - case _CONTEXT_IN_LIST: - if len(b) > 0 { - switch b[0] { - case JSON_RBRACKET[0]: - return nil - case JSON_COMMA[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - break - case _CONTEXT_IN_OBJECT_NEXT_KEY: - if len(b) > 0 { - switch b[0] { - case JSON_RBRACE[0]: - return nil - case JSON_COMMA[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - break - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - if len(b) > 0 { - switch b[0] { - case JSON_COLON[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - break - } - return nil -} - -func (p *TSimpleJSONProtocol) ParsePostValue() error { - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) - switch cxt { - case _CONTEXT_IN_LIST_FIRST: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST)) - break - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) - break - } - return nil -} - -func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error { - for { - b, _ := p.reader.Peek(1) - if len(b) < 1 { - return nil - } - switch b[0] { - case ' ', '\r', '\n', '\t': - p.reader.ReadByte() - continue - default: - break - } - break - } - return nil -} - -func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewTProtocolException(err) - } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { - v, ok := jsonUnquote(string(JSON_QUOTE) + line) - if !ok { - return "", NewTProtocolException(err) - } - return v, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewTProtocolException(err) - } - str := string(JSON_QUOTE) + line + s - v, ok := jsonUnquote(str) - if !ok { - e := fmt.Errorf("Unable to parse as JSON string %s", str) - return "", NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, nil -} - -func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewTProtocolException(err) - } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { - return line, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewTProtocolException(err) - } - v := line + s - return v, nil -} - -func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { - line, err := p.reader.ReadBytes(JSON_QUOTE) - if err != nil { - return line, NewTProtocolException(err) - } - line2 := line[0 : len(line)-1] - l := len(line2) - if (l % 4) != 0 { - pad := 4 - (l % 4) - fill := [...]byte{'=', '=', '='} - line2 = append(line2, fill[:pad]...) - l = len(line2) - } - output := make([]byte, base64.StdEncoding.DecodedLen(l)) - n, err := base64.StdEncoding.Decode(output, line2) - return output[0:n], NewTProtocolException(err) -} - -func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value int64 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Int64() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value float64 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Float64() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { - if err := p.ParsePreValue(); err != nil { - return false, err - } - var b []byte - b, err := p.reader.Peek(1) - if err != nil { - return false, err - } - if len(b) > 0 && b[0] == JSON_LBRACE[0] { - p.reader.ReadByte() - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST)) - return false, nil - } else if p.safePeekContains(JSON_NULL) { - return true, nil - } - e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b)) - return false, NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -func (p *TSimpleJSONProtocol) ParseObjectEnd() error { - if isNull, err := p.readIfNull(); isNull || err != nil { - return err - } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) - if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { - e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - line, err := p.reader.ReadString(JSON_RBRACE[0]) - if err != nil { - return NewTProtocolException(err) - } - for _, char := range line { - switch char { - default: - e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - case ' ', '\n', '\r', '\t', '}': - break - } - } - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - return p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { - if e := p.ParsePreValue(); e != nil { - return false, e - } - var b []byte - b, err = p.reader.Peek(1) - if err != nil { - return false, err - } - if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST)) - p.reader.ReadByte() - isNull = false - } else if p.safePeekContains(JSON_NULL) { - isNull = true - } else { - err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b) - } - return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err) -} - -func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - bElemType, err := p.ReadByte() - elemType = TType(bElemType) - if err != nil { - return elemType, size, err - } - nSize, err2 := p.ReadI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *TSimpleJSONProtocol) ParseListEnd() error { - if isNull, err := p.readIfNull(); isNull || err != nil { - return err - } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) - if cxt != _CONTEXT_IN_LIST { - e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - line, err := p.reader.ReadString(JSON_RBRACKET[0]) - if err != nil { - return NewTProtocolException(err) - } - for _, char := range line { - switch char { - default: - e := fmt.Errorf("Expecting end of list \"]\", but found: \"%s\"", line) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): - break - } - } - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL { - return nil - } - return p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) { - e := p.readNonSignificantWhitespace() - if e != nil { - return nil, VOID, NewTProtocolException(e) - } - b, e := p.reader.Peek(1) - if len(b) > 0 { - c := b[0] - switch c { - case JSON_NULL[0]: - buf := make([]byte, len(JSON_NULL)) - _, e := p.reader.Read(buf) - if e != nil { - return nil, VOID, NewTProtocolException(e) - } - if string(JSON_NULL) != string(buf) { - e = mismatch(string(JSON_NULL), string(buf)) - return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return nil, VOID, nil - case JSON_QUOTE: - p.reader.ReadByte() - v, e := p.ParseStringBody() - if e != nil { - return v, UTF8, NewTProtocolException(e) - } - if v == JSON_INFINITY { - return INFINITY, DOUBLE, nil - } else if v == JSON_NEGATIVE_INFINITY { - return NEGATIVE_INFINITY, DOUBLE, nil - } else if v == JSON_NAN { - return NAN, DOUBLE, nil - } - return v, UTF8, nil - case JSON_TRUE[0]: - buf := make([]byte, len(JSON_TRUE)) - _, e := p.reader.Read(buf) - if e != nil { - return true, BOOL, NewTProtocolException(e) - } - if string(JSON_TRUE) != string(buf) { - e := mismatch(string(JSON_TRUE), string(buf)) - return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return true, BOOL, nil - case JSON_FALSE[0]: - buf := make([]byte, len(JSON_FALSE)) - _, e := p.reader.Read(buf) - if e != nil { - return false, BOOL, NewTProtocolException(e) - } - if string(JSON_FALSE) != string(buf) { - e := mismatch(string(JSON_FALSE), string(buf)) - return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return false, BOOL, nil - case JSON_LBRACKET[0]: - _, e := p.reader.ReadByte() - return make([]interface{}, 0), LIST, NewTProtocolException(e) - case JSON_LBRACE[0]: - _, e := p.reader.ReadByte() - return make(map[string]interface{}), STRUCT, NewTProtocolException(e) - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]: - // assume numeric - v, e := p.readNumeric() - return v, DOUBLE, e - default: - e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c)) - return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - e = fmt.Errorf("Cannot read a single element while parsing JSON.") - return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) - -} - -func (p *TSimpleJSONProtocol) readIfNull() (bool, error) { - cont := true - for cont { - b, _ := p.reader.Peek(1) - if len(b) < 1 { - return false, nil - } - switch b[0] { - default: - return false, nil - case JSON_NULL[0]: - cont = false - break - case ' ', '\n', '\r', '\t': - p.reader.ReadByte() - break - } - } - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - return true, nil - } - return false, nil -} - -func (p *TSimpleJSONProtocol) readQuoteIfNext() { - b, _ := p.reader.Peek(1) - if len(b) > 0 && b[0] == JSON_QUOTE { - p.reader.ReadByte() - } -} - -func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { - isNull, err := p.readIfNull() - if isNull || err != nil { - return NUMERIC_NULL, err - } - hasDecimalPoint := false - nextCanBeSign := true - hasE := false - MAX_LEN := 40 - buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN)) - continueFor := true - inQuotes := false - for continueFor { - c, err := p.reader.ReadByte() - if err != nil { - if err == io.EOF { - break - } - return NUMERIC_NULL, NewTProtocolException(err) - } - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - buf.WriteByte(c) - nextCanBeSign = false - case '.': - if hasDecimalPoint { - e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if hasE { - e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - hasDecimalPoint, nextCanBeSign = true, false - case 'e', 'E': - if hasE { - e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - hasE, nextCanBeSign = true, true - case '-', '+': - if !nextCanBeSign { - e := fmt.Errorf("Negative sign within number") - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - nextCanBeSign = false - case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]: - p.reader.UnreadByte() - continueFor = false - case JSON_NAN[0]: - if buf.Len() == 0 { - buffer := make([]byte, len(JSON_NAN)) - buffer[0] = c - _, e := p.reader.Read(buffer[1:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_NAN != string(buffer) { - e := mismatch(JSON_NAN, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return NAN, nil - } else { - e := fmt.Errorf("Unable to parse number starting with character '%c'", c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_INFINITY[0]: - if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') { - buffer := make([]byte, len(JSON_INFINITY)) - buffer[0] = c - _, e := p.reader.Read(buffer[1:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_INFINITY != string(buffer) { - e := mismatch(JSON_INFINITY, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return INFINITY, nil - } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] { - buffer := make([]byte, len(JSON_NEGATIVE_INFINITY)) - buffer[0] = JSON_NEGATIVE_INFINITY[0] - buffer[1] = c - _, e := p.reader.Read(buffer[2:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_NEGATIVE_INFINITY != string(buffer) { - e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return NEGATIVE_INFINITY, nil - } else { - e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_QUOTE: - if !inQuotes { - inQuotes = true - } else { - break - } - default: - e := fmt.Errorf("Unable to parse number starting with character '%c'", c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - if buf.Len() == 0 { - e := fmt.Errorf("Unable to parse number from empty string ''") - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return NewNumericFromJSONString(buf.String(), false), nil -} - -// Safely peeks into the buffer, reading only what is necessary -func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { - for i := 0; i < len(b); i++ { - a, _ := p.reader.Peek(i + 1) - if len(a) == 0 || a[i] != b[i] { - return false - } - } - return true -} - -// Reset the context stack to its initial state. -func (p *TSimpleJSONProtocol) resetContextStack() { - p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)} - p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)} -} - -func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { - n, err := p.writer.Write(b) - if err != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - } - return n, err -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go deleted file mode 100644 index 453899651fc2..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" -) - -var errTransportInterrupted = errors.New("Transport Interrupted") - -type Flusher interface { - Flush() (err error) -} - -type ReadSizeProvider interface { - RemainingBytes() (num_bytes uint64) -} - - -// Encapsulates the I/O layer -type TTransport interface { - io.ReadWriteCloser - Flusher - ReadSizeProvider - - // Opens the transport for communication - Open() error - - // Returns true if the transport is open - IsOpen() bool -} - -type stringWriter interface { - WriteString(s string) (n int, err error) -} - - -// This is "enchanced" transport with extra capabilities. You need to use one of these -// to construct protocol. -// Notably, TSocket does not implement this interface, and it is always a mistake to use -// TSocket directly in protocol. -type TRichTransport interface { - io.ReadWriter - io.ByteReader - io.ByteWriter - stringWriter - Flusher - ReadSizeProvider -} - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go deleted file mode 100644 index 9505b44612d0..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" -) - -type timeoutable interface { - Timeout() bool -} - -// Thrift Transport exception -type TTransportException interface { - TException - TypeId() int - Err() error -} - -const ( - UNKNOWN_TRANSPORT_EXCEPTION = 0 - NOT_OPEN = 1 - ALREADY_OPEN = 2 - TIMED_OUT = 3 - END_OF_FILE = 4 -) - -type tTransportException struct { - typeId int - err error -} - -func (p *tTransportException) TypeId() int { - return p.typeId -} - -func (p *tTransportException) Error() string { - return p.err.Error() -} - -func (p *tTransportException) Err() error { - return p.err -} - -func NewTTransportException(t int, e string) TTransportException { - return &tTransportException{typeId: t, err: errors.New(e)} -} - -func NewTTransportExceptionFromError(e error) TTransportException { - if e == nil { - return nil - } - - if t, ok := e.(TTransportException); ok { - return t - } - - switch v := e.(type) { - case TTransportException: - return v - case timeoutable: - if v.Timeout() { - return &tTransportException{typeId: TIMED_OUT, err: e} - } - } - - if e == io.EOF { - return &tTransportException{typeId: END_OF_FILE, err: e} - } - - return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e} -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go deleted file mode 100644 index 533d1b437533..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Factory class used to create wrapped instance of Transports. -// This is used primarily in servers, which get Transports from -// a ServerTransport and then may want to mutate them (i.e. create -// a BufferedTransport from the underlying base transport) -type TTransportFactory interface { - GetTransport(trans TTransport) TTransport -} - -type tTransportFactory struct{} - -// Return a wrapped instance of the base Transport. -func (p *tTransportFactory) GetTransport(trans TTransport) TTransport { - return trans -} - -func NewTTransportFactory() TTransportFactory { - return &tTransportFactory{} -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/type.go b/vendor/github.com/uber/jaeger-client-go/thrift/type.go deleted file mode 100644 index 4292ffcadb13..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/type.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Type constants in the Thrift protocol -type TType byte - -const ( - STOP = 0 - VOID = 1 - BOOL = 2 - BYTE = 3 - I08 = 3 - DOUBLE = 4 - I16 = 6 - I32 = 8 - I64 = 10 - STRING = 11 - UTF7 = 11 - STRUCT = 12 - MAP = 13 - SET = 14 - LIST = 15 - UTF8 = 16 - UTF16 = 17 - //BINARY = 18 wrong and unusued -) - -var typeNames = map[int]string{ - STOP: "STOP", - VOID: "VOID", - BOOL: "BOOL", - BYTE: "BYTE", - DOUBLE: "DOUBLE", - I16: "I16", - I32: "I32", - I64: "I64", - STRING: "STRING", - STRUCT: "STRUCT", - MAP: "MAP", - SET: "SET", - LIST: "LIST", - UTF8: "UTF8", - UTF16: "UTF16", -} - -func (p TType) String() string { - if s, ok := typeNames[int(p)]; ok { - return s - } - return "Unknown" -} diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go deleted file mode 100644 index a457a7418b22..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/tracer.go +++ /dev/null @@ -1,437 +0,0 @@ -// Copyright (c) 2017-2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "fmt" - "io" - "math/rand" - "os" - "reflect" - "strconv" - "sync" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - - "github.com/uber/jaeger-client-go/internal/baggage" - "github.com/uber/jaeger-client-go/internal/throttler" - "github.com/uber/jaeger-client-go/log" - "github.com/uber/jaeger-client-go/utils" -) - -// Tracer implements opentracing.Tracer. -type Tracer struct { - serviceName string - hostIPv4 uint32 // this is for zipkin endpoint conversion - - sampler Sampler - reporter Reporter - metrics Metrics - logger log.Logger - - timeNow func() time.Time - randomNumber func() uint64 - - options struct { - gen128Bit bool // whether to generate 128bit trace IDs - zipkinSharedRPCSpan bool - highTraceIDGenerator func() uint64 // custom high trace ID generator - maxTagValueLength int - // more options to come - } - // allocator of Span objects - spanAllocator SpanAllocator - - injectors map[interface{}]Injector - extractors map[interface{}]Extractor - - observer compositeObserver - - tags []Tag - process Process - - baggageRestrictionManager baggage.RestrictionManager - baggageSetter *baggageSetter - - debugThrottler throttler.Throttler -} - -// NewTracer creates Tracer implementation that reports tracing to Jaeger. -// The returned io.Closer can be used in shutdown hooks to ensure that the internal -// queue of the Reporter is drained and all buffered spans are submitted to collectors. -func NewTracer( - serviceName string, - sampler Sampler, - reporter Reporter, - options ...TracerOption, -) (opentracing.Tracer, io.Closer) { - t := &Tracer{ - serviceName: serviceName, - sampler: sampler, - reporter: reporter, - injectors: make(map[interface{}]Injector), - extractors: make(map[interface{}]Extractor), - metrics: *NewNullMetrics(), - spanAllocator: simpleSpanAllocator{}, - } - - for _, option := range options { - option(t) - } - - // register default injectors/extractors unless they are already provided via options - textPropagator := NewTextMapPropagator(getDefaultHeadersConfig(), t.metrics) - t.addCodec(opentracing.TextMap, textPropagator, textPropagator) - - httpHeaderPropagator := NewHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics) - t.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator) - - binaryPropagator := NewBinaryPropagator(t) - t.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator) - - // TODO remove after TChannel supports OpenTracing - interopPropagator := &jaegerTraceContextPropagator{tracer: t} - t.addCodec(SpanContextFormat, interopPropagator, interopPropagator) - - zipkinPropagator := &zipkinPropagator{tracer: t} - t.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator) - - if t.baggageRestrictionManager != nil { - t.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics) - } else { - t.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics) - } - if t.debugThrottler == nil { - t.debugThrottler = throttler.DefaultThrottler{} - } - - if t.randomNumber == nil { - seedGenerator := utils.NewRand(time.Now().UnixNano()) - pool := sync.Pool{ - New: func() interface{} { - return rand.NewSource(seedGenerator.Int63()) - }, - } - - t.randomNumber = func() uint64 { - generator := pool.Get().(rand.Source) - number := uint64(generator.Int63()) - pool.Put(generator) - return number - } - } - if t.timeNow == nil { - t.timeNow = time.Now - } - if t.logger == nil { - t.logger = log.NullLogger - } - // Set tracer-level tags - t.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion}) - if hostname, err := os.Hostname(); err == nil { - t.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname}) - } - if ip, err := utils.HostIP(); err == nil { - t.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()}) - t.hostIPv4 = utils.PackIPAsUint32(ip) - } else { - t.logger.Error("Unable to determine this host's IP address: " + err.Error()) - } - - if t.options.gen128Bit { - if t.options.highTraceIDGenerator == nil { - t.options.highTraceIDGenerator = t.randomNumber - } - } else if t.options.highTraceIDGenerator != nil { - t.logger.Error("Overriding high trace ID generator but not generating " + - "128 bit trace IDs, consider enabling the \"Gen128Bit\" option") - } - if t.options.maxTagValueLength == 0 { - t.options.maxTagValueLength = DefaultMaxTagValueLength - } - t.process = Process{ - Service: serviceName, - UUID: strconv.FormatUint(t.randomNumber(), 16), - Tags: t.tags, - } - if throttler, ok := t.debugThrottler.(ProcessSetter); ok { - throttler.SetProcess(t.process) - } - - return t, t -} - -// addCodec adds registers injector and extractor for given propagation format if not already defined. -func (t *Tracer) addCodec(format interface{}, injector Injector, extractor Extractor) { - if _, ok := t.injectors[format]; !ok { - t.injectors[format] = injector - } - if _, ok := t.extractors[format]; !ok { - t.extractors[format] = extractor - } -} - -// StartSpan implements StartSpan() method of opentracing.Tracer. -func (t *Tracer) StartSpan( - operationName string, - options ...opentracing.StartSpanOption, -) opentracing.Span { - sso := opentracing.StartSpanOptions{} - for _, o := range options { - o.Apply(&sso) - } - return t.startSpanWithOptions(operationName, sso) -} - -func (t *Tracer) startSpanWithOptions( - operationName string, - options opentracing.StartSpanOptions, -) opentracing.Span { - if options.StartTime.IsZero() { - options.StartTime = t.timeNow() - } - - // Predicate whether the given span context is a valid reference - // which may be used as parent / debug ID / baggage items source - isValidReference := func(ctx SpanContext) bool { - return ctx.IsValid() || ctx.isDebugIDContainerOnly() || len(ctx.baggage) != 0 - } - - var references []Reference - var parent SpanContext - var hasParent bool // need this because `parent` is a value, not reference - for _, ref := range options.References { - ctx, ok := ref.ReferencedContext.(SpanContext) - if !ok { - t.logger.Error(fmt.Sprintf( - "Reference contains invalid type of SpanReference: %s", - reflect.ValueOf(ref.ReferencedContext))) - continue - } - if !isValidReference(ctx) { - continue - } - references = append(references, Reference{Type: ref.Type, Context: ctx}) - if !hasParent { - parent = ctx - hasParent = ref.Type == opentracing.ChildOfRef - } - } - if !hasParent && isValidReference(parent) { - // If ChildOfRef wasn't found but a FollowFromRef exists, use the context from - // the FollowFromRef as the parent - hasParent = true - } - - rpcServer := false - if v, ok := options.Tags[ext.SpanKindRPCServer.Key]; ok { - rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum)) - } - - var samplerTags []Tag - var ctx SpanContext - newTrace := false - if !hasParent || !parent.IsValid() { - newTrace = true - ctx.traceID.Low = t.randomID() - if t.options.gen128Bit { - ctx.traceID.High = t.options.highTraceIDGenerator() - } - ctx.spanID = SpanID(ctx.traceID.Low) - ctx.parentID = 0 - ctx.flags = byte(0) - if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) { - ctx.flags |= (flagSampled | flagDebug) - samplerTags = []Tag{{key: JaegerDebugHeader, value: parent.debugID}} - } else if sampled, tags := t.sampler.IsSampled(ctx.traceID, operationName); sampled { - ctx.flags |= flagSampled - samplerTags = tags - } - } else { - ctx.traceID = parent.traceID - if rpcServer && t.options.zipkinSharedRPCSpan { - // Support Zipkin's one-span-per-RPC model - ctx.spanID = parent.spanID - ctx.parentID = parent.parentID - } else { - ctx.spanID = SpanID(t.randomID()) - ctx.parentID = parent.spanID - } - ctx.flags = parent.flags - } - if hasParent { - // copy baggage items - if l := len(parent.baggage); l > 0 { - ctx.baggage = make(map[string]string, len(parent.baggage)) - for k, v := range parent.baggage { - ctx.baggage[k] = v - } - } - } - - sp := t.newSpan() - sp.context = ctx - sp.observer = t.observer.OnStartSpan(sp, operationName, options) - return t.startSpanInternal( - sp, - operationName, - options.StartTime, - samplerTags, - options.Tags, - newTrace, - rpcServer, - references, - ) -} - -// Inject implements Inject() method of opentracing.Tracer -func (t *Tracer) Inject(ctx opentracing.SpanContext, format interface{}, carrier interface{}) error { - c, ok := ctx.(SpanContext) - if !ok { - return opentracing.ErrInvalidSpanContext - } - if injector, ok := t.injectors[format]; ok { - return injector.Inject(c, carrier) - } - return opentracing.ErrUnsupportedFormat -} - -// Extract implements Extract() method of opentracing.Tracer -func (t *Tracer) Extract( - format interface{}, - carrier interface{}, -) (opentracing.SpanContext, error) { - if extractor, ok := t.extractors[format]; ok { - spanCtx, err := extractor.Extract(carrier) - if err != nil { - return nil, err // ensure returned spanCtx is nil - } - return spanCtx, nil - } - return nil, opentracing.ErrUnsupportedFormat -} - -// Close releases all resources used by the Tracer and flushes any remaining buffered spans. -func (t *Tracer) Close() error { - t.reporter.Close() - t.sampler.Close() - if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok { - mgr.Close() - } - if throttler, ok := t.debugThrottler.(io.Closer); ok { - throttler.Close() - } - return nil -} - -// Tags returns a slice of tracer-level tags. -func (t *Tracer) Tags() []opentracing.Tag { - tags := make([]opentracing.Tag, len(t.tags)) - for i, tag := range t.tags { - tags[i] = opentracing.Tag{Key: tag.key, Value: tag.value} - } - return tags -} - -// newSpan returns an instance of a clean Span object. -// If options.PoolSpans is true, the spans are retrieved from an object pool. -func (t *Tracer) newSpan() *Span { - return t.spanAllocator.Get() -} - -func (t *Tracer) startSpanInternal( - sp *Span, - operationName string, - startTime time.Time, - internalTags []Tag, - tags opentracing.Tags, - newTrace bool, - rpcServer bool, - references []Reference, -) *Span { - sp.tracer = t - sp.operationName = operationName - sp.startTime = startTime - sp.duration = 0 - sp.references = references - sp.firstInProcess = rpcServer || sp.context.parentID == 0 - if len(tags) > 0 || len(internalTags) > 0 { - sp.tags = make([]Tag, len(internalTags), len(tags)+len(internalTags)) - copy(sp.tags, internalTags) - for k, v := range tags { - sp.observer.OnSetTag(k, v) - if k == string(ext.SamplingPriority) && !setSamplingPriority(sp, v) { - continue - } - sp.setTagNoLocking(k, v) - } - } - // emit metrics - if sp.context.IsSampled() { - t.metrics.SpansStartedSampled.Inc(1) - if newTrace { - // We cannot simply check for parentID==0 because in Zipkin model the - // server-side RPC span has the exact same trace/span/parent IDs as the - // calling client-side span, but obviously the server side span is - // no longer a root span of the trace. - t.metrics.TracesStartedSampled.Inc(1) - } else if sp.firstInProcess { - t.metrics.TracesJoinedSampled.Inc(1) - } - } else { - t.metrics.SpansStartedNotSampled.Inc(1) - if newTrace { - t.metrics.TracesStartedNotSampled.Inc(1) - } else if sp.firstInProcess { - t.metrics.TracesJoinedNotSampled.Inc(1) - } - } - return sp -} - -func (t *Tracer) reportSpan(sp *Span) { - t.metrics.SpansFinished.Inc(1) - - // Note: if the reporter is processing Span asynchronously need to Retain() it - // otherwise, in the racing condition will be rewritten span data before it will be sent - // * To remove object use method span.Release() - if sp.context.IsSampled() { - t.reporter.Report(sp) - } - - sp.Release() -} - -// randomID generates a random trace/span ID, using tracer.random() generator. -// It never returns 0. -func (t *Tracer) randomID() uint64 { - val := t.randomNumber() - for val == 0 { - val = t.randomNumber() - } - return val -} - -// (NB) span must hold the lock before making this call -func (t *Tracer) setBaggage(sp *Span, key, value string) { - t.baggageSetter.setBaggage(sp, key, value) -} - -// (NB) span must hold the lock before making this call -func (t *Tracer) isDebugAllowed(operation string) bool { - return t.debugThrottler.IsAllowed(operation) -} diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go deleted file mode 100644 index ecb172762e25..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/tracer_options.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "time" - - "github.com/opentracing/opentracing-go" - - "github.com/uber/jaeger-client-go/internal/baggage" - "github.com/uber/jaeger-client-go/internal/throttler" -) - -// TracerOption is a function that sets some option on the tracer -type TracerOption func(tracer *Tracer) - -// TracerOptions is a factory for all available TracerOption's -var TracerOptions tracerOptions - -type tracerOptions struct{} - -// Metrics creates a TracerOption that initializes Metrics on the tracer, -// which is used to emit statistics. -func (tracerOptions) Metrics(m *Metrics) TracerOption { - return func(tracer *Tracer) { - tracer.metrics = *m - } -} - -// Logger creates a TracerOption that gives the tracer a Logger. -func (tracerOptions) Logger(logger Logger) TracerOption { - return func(tracer *Tracer) { - tracer.logger = logger - } -} - -func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption { - return func(tracer *Tracer) { - if headerKeys == nil { - return - } - textPropagator := NewTextMapPropagator(headerKeys.ApplyDefaults(), tracer.metrics) - tracer.addCodec(opentracing.TextMap, textPropagator, textPropagator) - - httpHeaderPropagator := NewHTTPHeaderPropagator(headerKeys.ApplyDefaults(), tracer.metrics) - tracer.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator) - } -} - -// TimeNow creates a TracerOption that gives the tracer a function -// used to generate timestamps for spans. -func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption { - return func(tracer *Tracer) { - tracer.timeNow = timeNow - } -} - -// RandomNumber creates a TracerOption that gives the tracer -// a thread-safe random number generator function for generating trace IDs. -func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption { - return func(tracer *Tracer) { - tracer.randomNumber = randomNumber - } -} - -// PoolSpans creates a TracerOption that tells the tracer whether it should use -// an object pool to minimize span allocations. -// This should be used with care, only if the service is not running any async tasks -// that can access parent spans after those spans have been finished. -func (tracerOptions) PoolSpans(poolSpans bool) TracerOption { - return func(tracer *Tracer) { - if poolSpans { - tracer.spanAllocator = newSyncPollSpanAllocator() - } else { - tracer.spanAllocator = simpleSpanAllocator{} - } - } -} - -// Deprecated: HostIPv4 creates a TracerOption that identifies the current service/process. -// If not set, the factory method will obtain the current IP address. -// The TracerOption is deprecated; the tracer will attempt to automatically detect the IP. -func (tracerOptions) HostIPv4(hostIPv4 uint32) TracerOption { - return func(tracer *Tracer) { - tracer.hostIPv4 = hostIPv4 - } -} - -func (tracerOptions) Injector(format interface{}, injector Injector) TracerOption { - return func(tracer *Tracer) { - tracer.injectors[format] = injector - } -} - -func (tracerOptions) Extractor(format interface{}, extractor Extractor) TracerOption { - return func(tracer *Tracer) { - tracer.extractors[format] = extractor - } -} - -func (t tracerOptions) Observer(observer Observer) TracerOption { - return t.ContribObserver(&oldObserver{obs: observer}) -} - -func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption { - return func(tracer *Tracer) { - tracer.observer.append(observer) - } -} - -func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption { - return func(tracer *Tracer) { - tracer.options.gen128Bit = gen128Bit - } -} - -func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption { - return func(tracer *Tracer) { - tracer.options.highTraceIDGenerator = highTraceIDGenerator - } -} - -func (tracerOptions) MaxTagValueLength(maxTagValueLength int) TracerOption { - return func(tracer *Tracer) { - tracer.options.maxTagValueLength = maxTagValueLength - } -} - -func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption { - return func(tracer *Tracer) { - tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan - } -} - -func (tracerOptions) Tag(key string, value interface{}) TracerOption { - return func(tracer *Tracer) { - tracer.tags = append(tracer.tags, Tag{key: key, value: value}) - } -} - -func (tracerOptions) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption { - return func(tracer *Tracer) { - tracer.baggageRestrictionManager = mgr - } -} - -func (tracerOptions) DebugThrottler(throttler throttler.Throttler) TracerOption { - return func(tracer *Tracer) { - tracer.debugThrottler = throttler - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/transport.go b/vendor/github.com/uber/jaeger-client-go/transport.go deleted file mode 100644 index c5f5b19551fe..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/transport.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "io" -) - -// Transport abstracts the method of sending spans out of process. -// Implementations are NOT required to be thread-safe; the RemoteReporter -// is expected to only call methods on the Transport from the same go-routine. -type Transport interface { - // Append converts the span to the wire representation and adds it - // to sender's internal buffer. If the buffer exceeds its designated - // size, the transport should call Flush() and return the number of spans - // flushed, otherwise return 0. If error is returned, the returned number - // of spans is treated as failed span, and reported to metrics accordingly. - Append(span *Span) (int, error) - - // Flush submits the internal buffer to the remote server. It returns the - // number of spans flushed. If error is returned, the returned number of - // spans is treated as failed span, and reported to metrics accordingly. - Flush() (int, error) - - io.Closer -} diff --git a/vendor/github.com/uber/jaeger-client-go/transport/doc.go b/vendor/github.com/uber/jaeger-client-go/transport/doc.go deleted file mode 100644 index 6b961fb637e5..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/transport/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport defines various transports that can be used with -// RemoteReporter to send spans out of process. Transport is responsible -// for serializing the spans into a specific format suitable for sending -// to the tracing backend. Examples may include Thrift over UDP, Thrift -// or JSON over HTTP, Thrift over Kafka, etc. -// -// Implementations are NOT required to be thread-safe; the RemoteReporter -// is expected to only call methods on the Transport from the same go-routine. -package transport diff --git a/vendor/github.com/uber/jaeger-client-go/transport/http.go b/vendor/github.com/uber/jaeger-client-go/transport/http.go deleted file mode 100644 index bc1b3e6b03cd..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/transport/http.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/uber/jaeger-client-go/thrift" - - "github.com/uber/jaeger-client-go" - j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" -) - -// Default timeout for http request in seconds -const defaultHTTPTimeout = time.Second * 5 - -// HTTPTransport implements Transport by forwarding spans to a http server. -type HTTPTransport struct { - url string - client *http.Client - batchSize int - spans []*j.Span - process *j.Process - httpCredentials *HTTPBasicAuthCredentials -} - -// HTTPBasicAuthCredentials stores credentials for HTTP basic auth. -type HTTPBasicAuthCredentials struct { - username string - password string -} - -// HTTPOption sets a parameter for the HttpCollector -type HTTPOption func(c *HTTPTransport) - -// HTTPTimeout sets maximum timeout for http request. -func HTTPTimeout(duration time.Duration) HTTPOption { - return func(c *HTTPTransport) { c.client.Timeout = duration } -} - -// HTTPBatchSize sets the maximum batch size, after which a collect will be -// triggered. The default batch size is 100 spans. -func HTTPBatchSize(n int) HTTPOption { - return func(c *HTTPTransport) { c.batchSize = n } -} - -// HTTPBasicAuth sets the credentials required to perform HTTP basic auth -func HTTPBasicAuth(username string, password string) HTTPOption { - return func(c *HTTPTransport) { - c.httpCredentials = &HTTPBasicAuthCredentials{username: username, password: password} - } -} - -// HTTPRoundTripper configures the underlying Transport on the *http.Client -// that is used -func HTTPRoundTripper(transport http.RoundTripper) HTTPOption { - return func(c *HTTPTransport) { - c.client.Transport = transport - } -} - -// NewHTTPTransport returns a new HTTP-backend transport. url should be an http -// url of the collector to handle POST request, typically something like: -// http://hostname:14268/api/traces?format=jaeger.thrift -func NewHTTPTransport(url string, options ...HTTPOption) *HTTPTransport { - c := &HTTPTransport{ - url: url, - client: &http.Client{Timeout: defaultHTTPTimeout}, - batchSize: 100, - spans: []*j.Span{}, - } - - for _, option := range options { - option(c) - } - return c -} - -// Append implements Transport. -func (c *HTTPTransport) Append(span *jaeger.Span) (int, error) { - if c.process == nil { - c.process = jaeger.BuildJaegerProcessThrift(span) - } - jSpan := jaeger.BuildJaegerThrift(span) - c.spans = append(c.spans, jSpan) - if len(c.spans) >= c.batchSize { - return c.Flush() - } - return 0, nil -} - -// Flush implements Transport. -func (c *HTTPTransport) Flush() (int, error) { - count := len(c.spans) - if count == 0 { - return 0, nil - } - err := c.send(c.spans) - c.spans = c.spans[:0] - return count, err -} - -// Close implements Transport. -func (c *HTTPTransport) Close() error { - return nil -} - -func (c *HTTPTransport) send(spans []*j.Span) error { - batch := &j.Batch{ - Spans: spans, - Process: c.process, - } - body, err := serializeThrift(batch) - if err != nil { - return err - } - req, err := http.NewRequest("POST", c.url, body) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/x-thrift") - - if c.httpCredentials != nil { - req.SetBasicAuth(c.httpCredentials.username, c.httpCredentials.password) - } - - resp, err := c.client.Do(req) - if err != nil { - return err - } - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - if resp.StatusCode >= http.StatusBadRequest { - return fmt.Errorf("error from collector: %d", resp.StatusCode) - } - return nil -} - -func serializeThrift(obj thrift.TStruct) (*bytes.Buffer, error) { - t := thrift.NewTMemoryBuffer() - p := thrift.NewTBinaryProtocolTransport(t) - if err := obj.Write(p); err != nil { - return nil, err - } - return t.Buffer, nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go deleted file mode 100644 index 7b9ccf937449..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/transport_udp.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "errors" - "fmt" - - "github.com/uber/jaeger-client-go/thrift" - - j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" - "github.com/uber/jaeger-client-go/utils" -) - -// Empirically obtained constant for how many bytes in the message are used for envelope. -// The total datagram size is: -// sizeof(Span) * numSpans + processByteSize + emitBatchOverhead <= maxPacketSize -// There is a unit test `TestEmitBatchOverhead` that validates this number. -// Note that due to the use of Compact Thrift protocol, overhead grows with the number of spans -// in the batch, because the length of the list is encoded as varint32, as well as SeqId. -const emitBatchOverhead = 30 - -var errSpanTooLarge = errors.New("Span is too large") - -type udpSender struct { - client *utils.AgentClientUDP - maxPacketSize int // max size of datagram in bytes - maxSpanBytes int // max number of bytes to record spans (excluding envelope) in the datagram - byteBufferSize int // current number of span bytes accumulated in the buffer - spanBuffer []*j.Span // spans buffered before a flush - thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span - thriftProtocol thrift.TProtocol - process *j.Process - processByteSize int -} - -// NewUDPTransport creates a reporter that submits spans to jaeger-agent -func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) { - if len(hostPort) == 0 { - hostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort) - } - if maxPacketSize == 0 { - maxPacketSize = utils.UDPPacketMaxLength - } - - protocolFactory := thrift.NewTCompactProtocolFactory() - - // Each span is first written to thriftBuffer to determine its size in bytes. - thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize) - thriftProtocol := protocolFactory.GetProtocol(thriftBuffer) - - client, err := utils.NewAgentClientUDP(hostPort, maxPacketSize) - if err != nil { - return nil, err - } - - sender := &udpSender{ - client: client, - maxSpanBytes: maxPacketSize - emitBatchOverhead, - thriftBuffer: thriftBuffer, - thriftProtocol: thriftProtocol} - return sender, nil -} - -func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int { - s.thriftBuffer.Reset() - thriftStruct.Write(s.thriftProtocol) - return s.thriftBuffer.Len() -} - -func (s *udpSender) Append(span *Span) (int, error) { - if s.process == nil { - s.process = BuildJaegerProcessThrift(span) - s.processByteSize = s.calcSizeOfSerializedThrift(s.process) - s.byteBufferSize += s.processByteSize - } - jSpan := BuildJaegerThrift(span) - spanSize := s.calcSizeOfSerializedThrift(jSpan) - if spanSize > s.maxSpanBytes { - return 1, errSpanTooLarge - } - - s.byteBufferSize += spanSize - if s.byteBufferSize <= s.maxSpanBytes { - s.spanBuffer = append(s.spanBuffer, jSpan) - if s.byteBufferSize < s.maxSpanBytes { - return 0, nil - } - return s.Flush() - } - // the latest span did not fit in the buffer - n, err := s.Flush() - s.spanBuffer = append(s.spanBuffer, jSpan) - s.byteBufferSize = spanSize + s.processByteSize - return n, err -} - -func (s *udpSender) Flush() (int, error) { - n := len(s.spanBuffer) - if n == 0 { - return 0, nil - } - err := s.client.EmitBatch(&j.Batch{Process: s.process, Spans: s.spanBuffer}) - s.resetBuffers() - - return n, err -} - -func (s *udpSender) Close() error { - return s.client.Close() -} - -func (s *udpSender) resetBuffers() { - for i := range s.spanBuffer { - s.spanBuffer[i] = nil - } - s.spanBuffer = s.spanBuffer[:0] - s.byteBufferSize = s.processByteSize -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go deleted file mode 100644 index 237211f8224b..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" -) - -// GetJSON makes an HTTP call to the specified URL and parses the returned JSON into `out`. -func GetJSON(url string, out interface{}) error { - resp, err := http.Get(url) - if err != nil { - return err - } - return ReadJSON(resp, out) -} - -// ReadJSON reads JSON from http.Response and parses it into `out` -func ReadJSON(resp *http.Response, out interface{}) error { - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - return fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body) - } - - if out == nil { - io.Copy(ioutil.Discard, resp.Body) - return nil - } - - decoder := json.NewDecoder(resp.Body) - return decoder.Decode(out) -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/localip.go b/vendor/github.com/uber/jaeger-client-go/utils/localip.go deleted file mode 100644 index b51af7713f7c..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/localip.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "errors" - "net" -) - -// This code is borrowed from https://github.com/uber/tchannel-go/blob/dev/localip.go - -// scoreAddr scores how likely the given addr is to be a remote address and returns the -// IP to use when listening. Any address which receives a negative score should not be used. -// Scores are calculated as: -// -1 for any unknown IP addresses. -// +300 for IPv4 addresses -// +100 for non-local addresses, extra +100 for "up" interaces. -func scoreAddr(iface net.Interface, addr net.Addr) (int, net.IP) { - var ip net.IP - if netAddr, ok := addr.(*net.IPNet); ok { - ip = netAddr.IP - } else if netIP, ok := addr.(*net.IPAddr); ok { - ip = netIP.IP - } else { - return -1, nil - } - - var score int - if ip.To4() != nil { - score += 300 - } - if iface.Flags&net.FlagLoopback == 0 && !ip.IsLoopback() { - score += 100 - if iface.Flags&net.FlagUp != 0 { - score += 100 - } - } - return score, ip -} - -// HostIP tries to find an IP that can be used by other machines to reach this machine. -func HostIP() (net.IP, error) { - interfaces, err := net.Interfaces() - if err != nil { - return nil, err - } - - bestScore := -1 - var bestIP net.IP - // Select the highest scoring IP as the best IP. - for _, iface := range interfaces { - addrs, err := iface.Addrs() - if err != nil { - // Skip this interface if there is an error. - continue - } - - for _, addr := range addrs { - score, ip := scoreAddr(iface, addr) - if score > bestScore { - bestScore = score - bestIP = ip - } - } - } - - if bestScore == -1 { - return nil, errors.New("no addresses to listen on") - } - - return bestIP, nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rand.go b/vendor/github.com/uber/jaeger-client-go/utils/rand.go deleted file mode 100644 index 9875f7f55cbd..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/rand.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "math/rand" - "sync" -) - -// lockedSource allows a random number generator to be used by multiple goroutines concurrently. -// The code is very similar to math/rand.lockedSource, which is unfortunately not exposed. -type lockedSource struct { - mut sync.Mutex - src rand.Source -} - -// NewRand returns a rand.Rand that is threadsafe. -func NewRand(seed int64) *rand.Rand { - return rand.New(&lockedSource{src: rand.NewSource(seed)}) -} - -func (r *lockedSource) Int63() (n int64) { - r.mut.Lock() - n = r.src.Int63() - r.mut.Unlock() - return -} - -// Seed implements Seed() of Source -func (r *lockedSource) Seed(seed int64) { - r.mut.Lock() - r.src.Seed(seed) - r.mut.Unlock() -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go deleted file mode 100644 index 1b8db9758486..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "sync" - "time" -) - -// RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits. -type RateLimiter interface { - CheckCredit(itemCost float64) bool -} - -type rateLimiter struct { - sync.Mutex - - creditsPerSecond float64 - balance float64 - maxBalance float64 - lastTick time.Time - - timeNow func() time.Time -} - -// NewRateLimiter creates a new rate limiter based on leaky bucket algorithm, formulated in terms of a -// credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional -// to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost -// of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased" -// and the balance reduced, indicated by returned value of true. Otherwise the balance is unchanged and return false. -// -// This can be used to limit a rate of messages emitted by a service by instantiating the Rate Limiter with the -// max number of messages a service is allowed to emit per second, and calling CheckCredit(1.0) for each message -// to determine if the message is within the rate limit. -// -// It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput -// as bytes/second, and calling CheckCredit() with the actual message size. -func NewRateLimiter(creditsPerSecond, maxBalance float64) RateLimiter { - return &rateLimiter{ - creditsPerSecond: creditsPerSecond, - balance: maxBalance, - maxBalance: maxBalance, - lastTick: time.Now(), - timeNow: time.Now} -} - -func (b *rateLimiter) CheckCredit(itemCost float64) bool { - b.Lock() - defer b.Unlock() - // calculate how much time passed since the last tick, and update current tick - currentTime := b.timeNow() - elapsedTime := currentTime.Sub(b.lastTick) - b.lastTick = currentTime - // calculate how much credit have we accumulated since the last tick - b.balance += elapsedTime.Seconds() * b.creditsPerSecond - if b.balance > b.maxBalance { - b.balance = b.maxBalance - } - // if we have enough credits to pay for current item, then reduce balance and allow - if b.balance >= itemCost { - b.balance -= itemCost - return true - } - return false -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go deleted file mode 100644 index 6f042073d631..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "errors" - "fmt" - "io" - "net" - - "github.com/uber/jaeger-client-go/thrift" - - "github.com/uber/jaeger-client-go/thrift-gen/agent" - "github.com/uber/jaeger-client-go/thrift-gen/jaeger" - "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" -) - -// UDPPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent -const UDPPacketMaxLength = 65000 - -// AgentClientUDP is a UDP client to Jaeger agent that implements agent.Agent interface. -type AgentClientUDP struct { - agent.Agent - io.Closer - - connUDP *net.UDPConn - client *agent.AgentClient - maxPacketSize int // max size of datagram in bytes - thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span -} - -// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. -func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) { - if maxPacketSize == 0 { - maxPacketSize = UDPPacketMaxLength - } - - thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize) - protocolFactory := thrift.NewTCompactProtocolFactory() - client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory) - - destAddr, err := net.ResolveUDPAddr("udp", hostPort) - if err != nil { - return nil, err - } - - connUDP, err := net.DialUDP(destAddr.Network(), nil, destAddr) - if err != nil { - return nil, err - } - if err := connUDP.SetWriteBuffer(maxPacketSize); err != nil { - return nil, err - } - - clientUDP := &AgentClientUDP{ - connUDP: connUDP, - client: client, - maxPacketSize: maxPacketSize, - thriftBuffer: thriftBuffer} - return clientUDP, nil -} - -// EmitZipkinBatch implements EmitZipkinBatch() of Agent interface -func (a *AgentClientUDP) EmitZipkinBatch(spans []*zipkincore.Span) error { - return errors.New("Not implemented") -} - -// EmitBatch implements EmitBatch() of Agent interface -func (a *AgentClientUDP) EmitBatch(batch *jaeger.Batch) error { - a.thriftBuffer.Reset() - a.client.SeqId = 0 // we have no need for distinct SeqIds for our one-way UDP messages - if err := a.client.EmitBatch(batch); err != nil { - return err - } - if a.thriftBuffer.Len() > a.maxPacketSize { - return fmt.Errorf("Data does not fit within one UDP packet; size %d, max %d, spans %d", - a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans)) - } - _, err := a.connUDP.Write(a.thriftBuffer.Bytes()) - return err -} - -// Close implements Close() of io.Closer and closes the underlying UDP connection. -func (a *AgentClientUDP) Close() error { - return a.connUDP.Close() -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/utils.go b/vendor/github.com/uber/jaeger-client-go/utils/utils.go deleted file mode 100644 index ac3c325d1ede..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/utils.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "encoding/binary" - "errors" - "net" - "strconv" - "strings" - "time" -) - -var ( - // ErrEmptyIP an error for empty ip strings - ErrEmptyIP = errors.New("empty string given for ip") - - // ErrNotHostColonPort an error for invalid host port string - ErrNotHostColonPort = errors.New("expecting host:port") - - // ErrNotFourOctets an error for the wrong number of octets after splitting a string - ErrNotFourOctets = errors.New("Wrong number of octets") -) - -// ParseIPToUint32 converts a string ip (e.g. "x.y.z.w") to an uint32 -func ParseIPToUint32(ip string) (uint32, error) { - if ip == "" { - return 0, ErrEmptyIP - } - - if ip == "localhost" { - return 127<<24 | 1, nil - } - - octets := strings.Split(ip, ".") - if len(octets) != 4 { - return 0, ErrNotFourOctets - } - - var intIP uint32 - for i := 0; i < 4; i++ { - octet, err := strconv.Atoi(octets[i]) - if err != nil { - return 0, err - } - intIP = (intIP << 8) | uint32(octet) - } - - return intIP, nil -} - -// ParsePort converts port number from string to uin16 -func ParsePort(portString string) (uint16, error) { - port, err := strconv.ParseUint(portString, 10, 16) - return uint16(port), err -} - -// PackIPAsUint32 packs an IPv4 as uint32 -func PackIPAsUint32(ip net.IP) uint32 { - if ipv4 := ip.To4(); ipv4 != nil { - return binary.BigEndian.Uint32(ipv4) - } - return 0 -} - -// TimeToMicrosecondsSinceEpochInt64 converts Go time.Time to a long -// representing time since epoch in microseconds, which is used expected -// in the Jaeger spans encoded as Thrift. -func TimeToMicrosecondsSinceEpochInt64(t time.Time) int64 { - // ^^^ Passing time.Time by value is faster than passing a pointer! - // BenchmarkTimeByValue-8 2000000000 1.37 ns/op - // BenchmarkTimeByPtr-8 2000000000 1.98 ns/op - - return t.UnixNano() / 1000 -} diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go deleted file mode 100644 index 636952b7f1d0..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/zipkin.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "github.com/opentracing/opentracing-go" -) - -// ZipkinSpanFormat is an OpenTracing carrier format constant -const ZipkinSpanFormat = "zipkin-span-format" - -// ExtractableZipkinSpan is a type of Carrier used for integration with Zipkin-aware -// RPC frameworks (like TChannel). It does not support baggage, only trace IDs. -type ExtractableZipkinSpan interface { - TraceID() uint64 - SpanID() uint64 - ParentID() uint64 - Flags() byte -} - -// InjectableZipkinSpan is a type of Carrier used for integration with Zipkin-aware -// RPC frameworks (like TChannel). It does not support baggage, only trace IDs. -type InjectableZipkinSpan interface { - SetTraceID(traceID uint64) - SetSpanID(spanID uint64) - SetParentID(parentID uint64) - SetFlags(flags byte) -} - -type zipkinPropagator struct { - tracer *Tracer -} - -func (p *zipkinPropagator) Inject( - ctx SpanContext, - abstractCarrier interface{}, -) error { - carrier, ok := abstractCarrier.(InjectableZipkinSpan) - if !ok { - return opentracing.ErrInvalidCarrier - } - - carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs - carrier.SetSpanID(uint64(ctx.SpanID())) - carrier.SetParentID(uint64(ctx.ParentID())) - carrier.SetFlags(ctx.flags) - return nil -} - -func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { - carrier, ok := abstractCarrier.(ExtractableZipkinSpan) - if !ok { - return emptyContext, opentracing.ErrInvalidCarrier - } - if carrier.TraceID() == 0 { - return emptyContext, opentracing.ErrSpanContextNotFound - } - var ctx SpanContext - ctx.traceID.Low = carrier.TraceID() - ctx.spanID = SpanID(carrier.SpanID()) - ctx.parentID = SpanID(carrier.ParentID()) - ctx.flags = carrier.Flags() - return ctx, nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go deleted file mode 100644 index eb31c4369e42..000000000000 --- a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "encoding/binary" - "fmt" - "time" - - "github.com/opentracing/opentracing-go/ext" - - "github.com/uber/jaeger-client-go/internal/spanlog" - z "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" - "github.com/uber/jaeger-client-go/utils" -) - -const ( - // Zipkin UI does not work well with non-string tag values - allowPackedNumbers = false -) - -var specialTagHandlers = map[string]func(*zipkinSpan, interface{}){ - string(ext.SpanKind): setSpanKind, - string(ext.PeerHostIPv4): setPeerIPv4, - string(ext.PeerPort): setPeerPort, - string(ext.PeerService): setPeerService, - TracerIPTagKey: removeTag, -} - -// BuildZipkinThrift builds thrift span based on internal span. -func BuildZipkinThrift(s *Span) *z.Span { - span := &zipkinSpan{Span: s} - span.handleSpecialTags() - parentID := int64(span.context.parentID) - var ptrParentID *int64 - if parentID != 0 { - ptrParentID = &parentID - } - traceIDHigh := int64(span.context.traceID.High) - var ptrTraceIDHigh *int64 - if traceIDHigh != 0 { - ptrTraceIDHigh = &traceIDHigh - } - timestamp := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime) - duration := span.duration.Nanoseconds() / int64(time.Microsecond) - endpoint := &z.Endpoint{ - ServiceName: span.tracer.serviceName, - Ipv4: int32(span.tracer.hostIPv4)} - thriftSpan := &z.Span{ - TraceID: int64(span.context.traceID.Low), - TraceIDHigh: ptrTraceIDHigh, - ID: int64(span.context.spanID), - ParentID: ptrParentID, - Name: span.operationName, - Timestamp: ×tamp, - Duration: &duration, - Debug: span.context.IsDebug(), - Annotations: buildAnnotations(span, endpoint), - BinaryAnnotations: buildBinaryAnnotations(span, endpoint)} - return thriftSpan -} - -func buildAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.Annotation { - // automatically adding 2 Zipkin CoreAnnotations - annotations := make([]*z.Annotation, 0, 2+len(span.logs)) - var startLabel, endLabel string - if span.spanKind == string(ext.SpanKindRPCClientEnum) { - startLabel, endLabel = z.CLIENT_SEND, z.CLIENT_RECV - } else if span.spanKind == string(ext.SpanKindRPCServerEnum) { - startLabel, endLabel = z.SERVER_RECV, z.SERVER_SEND - } - if !span.startTime.IsZero() && startLabel != "" { - start := &z.Annotation{ - Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(span.startTime), - Value: startLabel, - Host: endpoint} - annotations = append(annotations, start) - if span.duration != 0 { - endTs := span.startTime.Add(span.duration) - end := &z.Annotation{ - Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(endTs), - Value: endLabel, - Host: endpoint} - annotations = append(annotations, end) - } - } - for _, log := range span.logs { - anno := &z.Annotation{ - Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp), - Host: endpoint} - if content, err := spanlog.MaterializeWithJSON(log.Fields); err == nil { - anno.Value = truncateString(string(content), span.tracer.options.maxTagValueLength) - } else { - anno.Value = err.Error() - } - annotations = append(annotations, anno) - } - return annotations -} - -func buildBinaryAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.BinaryAnnotation { - // automatically adding local component or server/client address tag, and client version - annotations := make([]*z.BinaryAnnotation, 0, 2+len(span.tags)) - - if span.peerDefined() && span.isRPC() { - peer := z.Endpoint{ - Ipv4: span.peer.Ipv4, - Port: span.peer.Port, - ServiceName: span.peer.ServiceName} - label := z.CLIENT_ADDR - if span.isRPCClient() { - label = z.SERVER_ADDR - } - anno := &z.BinaryAnnotation{ - Key: label, - Value: []byte{1}, - AnnotationType: z.AnnotationType_BOOL, - Host: &peer} - annotations = append(annotations, anno) - } - if !span.isRPC() { - componentName := endpoint.ServiceName - for _, tag := range span.tags { - if tag.key == string(ext.Component) { - componentName = stringify(tag.value) - break - } - } - local := &z.BinaryAnnotation{ - Key: z.LOCAL_COMPONENT, - Value: []byte(componentName), - AnnotationType: z.AnnotationType_STRING, - Host: endpoint} - annotations = append(annotations, local) - } - for _, tag := range span.tags { - // "Special tags" are already handled by this point, we'd be double reporting the - // tags if we don't skip here - if _, ok := specialTagHandlers[tag.key]; ok { - continue - } - if anno := buildBinaryAnnotation(tag.key, tag.value, span.tracer.options.maxTagValueLength, nil); anno != nil { - annotations = append(annotations, anno) - } - } - return annotations -} - -func buildBinaryAnnotation(key string, val interface{}, maxTagValueLength int, endpoint *z.Endpoint) *z.BinaryAnnotation { - bann := &z.BinaryAnnotation{Key: key, Host: endpoint} - if value, ok := val.(string); ok { - bann.Value = []byte(truncateString(value, maxTagValueLength)) - bann.AnnotationType = z.AnnotationType_STRING - } else if value, ok := val.([]byte); ok { - if len(value) > maxTagValueLength { - value = value[:maxTagValueLength] - } - bann.Value = value - bann.AnnotationType = z.AnnotationType_BYTES - } else if value, ok := val.(int32); ok && allowPackedNumbers { - bann.Value = int32ToBytes(value) - bann.AnnotationType = z.AnnotationType_I32 - } else if value, ok := val.(int64); ok && allowPackedNumbers { - bann.Value = int64ToBytes(value) - bann.AnnotationType = z.AnnotationType_I64 - } else if value, ok := val.(int); ok && allowPackedNumbers { - bann.Value = int64ToBytes(int64(value)) - bann.AnnotationType = z.AnnotationType_I64 - } else if value, ok := val.(bool); ok { - bann.Value = []byte{boolToByte(value)} - bann.AnnotationType = z.AnnotationType_BOOL - } else { - value := stringify(val) - bann.Value = []byte(truncateString(value, maxTagValueLength)) - bann.AnnotationType = z.AnnotationType_STRING - } - return bann -} - -func stringify(value interface{}) string { - if s, ok := value.(string); ok { - return s - } - return fmt.Sprintf("%+v", value) -} - -func truncateString(value string, maxLength int) string { - // we ignore the problem of utf8 runes possibly being sliced in the middle, - // as it is rather expensive to iterate through each tag just to find rune - // boundaries. - if len(value) > maxLength { - return value[:maxLength] - } - return value -} - -func boolToByte(b bool) byte { - if b { - return 1 - } - return 0 -} - -// int32ToBytes converts int32 to bytes. -func int32ToBytes(i int32) []byte { - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, uint32(i)) - return buf -} - -// int64ToBytes converts int64 to bytes. -func int64ToBytes(i int64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(i)) - return buf -} - -type zipkinSpan struct { - *Span - - // peer points to the peer service participating in this span, - // e.g. the Client if this span is a server span, - // or Server if this span is a client span - peer struct { - Ipv4 int32 - Port int16 - ServiceName string - } - - // used to distinguish local vs. RPC Server vs. RPC Client spans - spanKind string -} - -func (s *zipkinSpan) handleSpecialTags() { - s.Lock() - defer s.Unlock() - if s.firstInProcess { - // append the process tags - s.tags = append(s.tags, s.tracer.tags...) - } - filteredTags := make([]Tag, 0, len(s.tags)) - for _, tag := range s.tags { - if handler, ok := specialTagHandlers[tag.key]; ok { - handler(s, tag.value) - } else { - filteredTags = append(filteredTags, tag) - } - } - s.tags = filteredTags -} - -func setSpanKind(s *zipkinSpan, value interface{}) { - if val, ok := value.(string); ok { - s.spanKind = val - return - } - if val, ok := value.(ext.SpanKindEnum); ok { - s.spanKind = string(val) - } -} - -func setPeerIPv4(s *zipkinSpan, value interface{}) { - if val, ok := value.(string); ok { - if ip, err := utils.ParseIPToUint32(val); err == nil { - s.peer.Ipv4 = int32(ip) - return - } - } - if val, ok := value.(uint32); ok { - s.peer.Ipv4 = int32(val) - return - } - if val, ok := value.(int32); ok { - s.peer.Ipv4 = val - } -} - -func setPeerPort(s *zipkinSpan, value interface{}) { - if val, ok := value.(string); ok { - if port, err := utils.ParsePort(val); err == nil { - s.peer.Port = int16(port) - return - } - } - if val, ok := value.(uint16); ok { - s.peer.Port = int16(val) - return - } - if val, ok := value.(int); ok { - s.peer.Port = int16(val) - } -} - -func setPeerService(s *zipkinSpan, value interface{}) { - if val, ok := value.(string); ok { - s.peer.ServiceName = val - } -} - -func removeTag(s *zipkinSpan, value interface{}) {} - -func (s *zipkinSpan) peerDefined() bool { - return s.peer.ServiceName != "" || s.peer.Ipv4 != 0 || s.peer.Port != 0 -} - -func (s *zipkinSpan) isRPC() bool { - s.RLock() - defer s.RUnlock() - return s.spanKind == string(ext.SpanKindRPCClientEnum) || s.spanKind == string(ext.SpanKindRPCServerEnum) -} - -func (s *zipkinSpan) isRPCClient() bool { - s.RLock() - defer s.RUnlock() - return s.spanKind == string(ext.SpanKindRPCClientEnum) -} diff --git a/vendor/github.com/uber/jaeger-lib/LICENSE b/vendor/github.com/uber/jaeger-lib/LICENSE deleted file mode 100644 index 261eeb9e9f8b..000000000000 --- a/vendor/github.com/uber/jaeger-lib/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/uber/jaeger-lib/metrics/counter.go b/vendor/github.com/uber/jaeger-lib/metrics/counter.go deleted file mode 100644 index 2a6a43efdb45..000000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/counter.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -// Counter tracks the number of times an event has occurred -type Counter interface { - // Inc adds the given value to the counter. - Inc(int64) -} - -// NullCounter counter that does nothing -var NullCounter Counter = nullCounter{} - -type nullCounter struct{} - -func (nullCounter) Inc(int64) {} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/factory.go b/vendor/github.com/uber/jaeger-lib/metrics/factory.go deleted file mode 100644 index 0ead061ebd64..000000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/factory.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "time" -) - -// NSOptions defines the name and tags map associated with a factory namespace -type NSOptions struct { - Name string - Tags map[string]string -} - -// Options defines the information associated with a metric -type Options struct { - Name string - Tags map[string]string - Help string -} - -// TimerOptions defines the information associated with a metric -type TimerOptions struct { - Name string - Tags map[string]string - Help string - Buckets []time.Duration -} - -// HistogramOptions defines the information associated with a metric -type HistogramOptions struct { - Name string - Tags map[string]string - Help string - Buckets []float64 -} - -// Factory creates new metrics -type Factory interface { - Counter(metric Options) Counter - Timer(metric TimerOptions) Timer - Gauge(metric Options) Gauge - Histogram(metric HistogramOptions) Histogram - - // Namespace returns a nested metrics factory. - Namespace(scope NSOptions) Factory -} - -// NullFactory is a metrics factory that returns NullCounter, NullTimer, and NullGauge. -var NullFactory Factory = nullFactory{} - -type nullFactory struct{} - -func (nullFactory) Counter(options Options) Counter { - return NullCounter -} -func (nullFactory) Timer(options TimerOptions) Timer { - return NullTimer -} -func (nullFactory) Gauge(options Options) Gauge { - return NullGauge -} -func (nullFactory) Histogram(options HistogramOptions) Histogram { - return NullHistogram -} -func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory } diff --git a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go deleted file mode 100644 index 3c606391a095..000000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -// Gauge returns instantaneous measurements of something as an int64 value -type Gauge interface { - // Update the gauge to the value passed in. - Update(int64) -} - -// NullGauge gauge that does nothing -var NullGauge Gauge = nullGauge{} - -type nullGauge struct{} - -func (nullGauge) Update(int64) {} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go deleted file mode 100644 index d3bd6174fe84..000000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -// Histogram that keeps track of a distribution of values. -type Histogram interface { - // Records the value passed in. - Record(float64) -} - -// NullHistogram that does nothing -var NullHistogram Histogram = nullHistogram{} - -type nullHistogram struct{} - -func (nullHistogram) Record(float64) {} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/keys.go b/vendor/github.com/uber/jaeger-lib/metrics/keys.go deleted file mode 100644 index c24445a10690..000000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/keys.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "sort" -) - -// GetKey converts name+tags into a single string of the form -// "name|tag1=value1|...|tagN=valueN", where tag names are -// sorted alphabetically. -func GetKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string { - keys := make([]string, 0, len(tags)) - for k := range tags { - keys = append(keys, k) - } - sort.Strings(keys) - key := name - for _, k := range keys { - key = key + tagsSep + k + tagKVSep + tags[k] - } - return key -} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go deleted file mode 100644 index 0c639688858e..000000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -// MustInit initializes the passed in metrics and initializes its fields using the passed in factory. -// -// It uses reflection to initialize a struct containing metrics fields -// by assigning new Counter/Gauge/Timer values with the metric name retrieved -// from the `metric` tag and stats tags retrieved from the `tags` tag. -// -// Note: all fields of the struct must be exported, have a `metric` tag, and be -// of type Counter or Gauge or Timer. -// -// Errors during Init lead to a panic. -func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) { - if err := Init(metrics, factory, globalTags); err != nil { - panic(err.Error()) - } -} - -// Init does the same as Init, but returns an error instead of -// panicking. -func Init(m interface{}, factory Factory, globalTags map[string]string) error { - // Allow user to opt out of reporting metrics by passing in nil. - if factory == nil { - factory = NullFactory - } - - counterPtrType := reflect.TypeOf((*Counter)(nil)).Elem() - gaugePtrType := reflect.TypeOf((*Gauge)(nil)).Elem() - timerPtrType := reflect.TypeOf((*Timer)(nil)).Elem() - histogramPtrType := reflect.TypeOf((*Histogram)(nil)).Elem() - - v := reflect.ValueOf(m).Elem() - t := v.Type() - for i := 0; i < t.NumField(); i++ { - tags := make(map[string]string) - for k, v := range globalTags { - tags[k] = v - } - var buckets []float64 - field := t.Field(i) - metric := field.Tag.Get("metric") - if metric == "" { - return fmt.Errorf("Field %s is missing a tag 'metric'", field.Name) - } - if tagString := field.Tag.Get("tags"); tagString != "" { - tagPairs := strings.Split(tagString, ",") - for _, tagPair := range tagPairs { - tag := strings.Split(tagPair, "=") - if len(tag) != 2 { - return fmt.Errorf( - "Field [%s]: Tag [%s] is not of the form key=value in 'tags' string [%s]", - field.Name, tagPair, tagString) - } - tags[tag[0]] = tag[1] - } - } - if bucketString := field.Tag.Get("buckets"); bucketString != "" { - if field.Type.AssignableTo(timerPtrType) { - // TODO: Parse timer duration buckets - return fmt.Errorf( - "Field [%s]: Buckets are not currently initialized for timer metrics", - field.Name) - } else if field.Type.AssignableTo(histogramPtrType) { - bucketValues := strings.Split(bucketString, ",") - for _, bucket := range bucketValues { - b, err := strconv.ParseFloat(bucket, 64) - if err != nil { - return fmt.Errorf( - "Field [%s]: Bucket [%s] could not be converted to float64 in 'buckets' string [%s]", - field.Name, bucket, bucketString) - } - buckets = append(buckets, b) - } - } else { - return fmt.Errorf( - "Field [%s]: Buckets should only be defined for Timer and Histogram metric types", - field.Name) - } - } - help := field.Tag.Get("help") - var obj interface{} - if field.Type.AssignableTo(counterPtrType) { - obj = factory.Counter(Options{ - Name: metric, - Tags: tags, - Help: help, - }) - } else if field.Type.AssignableTo(gaugePtrType) { - obj = factory.Gauge(Options{ - Name: metric, - Tags: tags, - Help: help, - }) - } else if field.Type.AssignableTo(timerPtrType) { - // TODO: Add buckets once parsed (see TODO above) - obj = factory.Timer(TimerOptions{ - Name: metric, - Tags: tags, - Help: help, - }) - } else if field.Type.AssignableTo(histogramPtrType) { - obj = factory.Histogram(HistogramOptions{ - Name: metric, - Tags: tags, - Help: help, - Buckets: buckets, - }) - } else { - return fmt.Errorf( - "Field %s is not a pointer to timer, gauge, or counter", - field.Name) - } - v.Field(i).Set(reflect.ValueOf(obj)) - } - return nil -} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go deleted file mode 100644 index 4a8abdb539f9..000000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "time" -) - -// StartStopwatch begins recording the executing time of an event, returning -// a Stopwatch that should be used to stop the recording the time for -// that event. Multiple events can be occurring simultaneously each -// represented by different active Stopwatches -func StartStopwatch(timer Timer) Stopwatch { - return Stopwatch{t: timer, start: time.Now()} -} - -// A Stopwatch tracks the execution time of a specific event -type Stopwatch struct { - t Timer - start time.Time -} - -// Stop stops executing of the stopwatch and records the amount of elapsed time -func (s Stopwatch) Stop() { - s.t.Record(s.ElapsedTime()) -} - -// ElapsedTime returns the amount of elapsed time (in time.Duration) -func (s Stopwatch) ElapsedTime() time.Duration { - return time.Since(s.start) -} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/timer.go b/vendor/github.com/uber/jaeger-lib/metrics/timer.go deleted file mode 100644 index e18d222abb4a..000000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/timer.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "time" -) - -// Timer accumulates observations about how long some operation took, -// and also maintains a historgam of percentiles. -type Timer interface { - // Records the time passed in. - Record(time.Duration) -} - -// NullTimer timer that does nothing -var NullTimer Timer = nullTimer{} - -type nullTimer struct{} - -func (nullTimer) Record(time.Duration) {} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go deleted file mode 100644 index 593f6530084f..000000000000 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC -2898 / PKCS #5 v2.0. - -A key derivation function is useful when encrypting data based on a password -or any other not-fully-random data. It uses a pseudorandom function to derive -a secure encryption key based on the password. - -While v2.0 of the standard defines only one pseudorandom function to use, -HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved -Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To -choose, you can pass the `New` functions from the different SHA packages to -pbkdf2.Key. -*/ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" - -import ( - "crypto/hmac" - "hash" -) - -// Key derives a key from the password, salt and iteration count, returning a -// []byte of length keylen that can be used as cryptographic key. The key is -// derived based on the method described as PBKDF2 with the HMAC variant using -// the supplied hash function. -// -// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -// doing: -// -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) -// -// Remember to get a good random salt. At least 8 bytes is recommended by the -// RFC. -// -// Using a higher iteration count will increase the cost of an exhaustive -// search but will also make derivation proportionally slower. -func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hashLen := prf.Size() - numBlocks := (keyLen + hashLen - 1) / hashLen - - var buf [4]byte - dk := make([]byte, 0, numBlocks*hashLen) - U := make([]byte, hashLen) - for block := 1; block <= numBlocks; block++ { - // N.B.: || means concatenation, ^ means XOR - // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter - // U_1 = PRF(password, salt || uint(i)) - prf.Reset() - prf.Write(salt) - buf[0] = byte(block >> 24) - buf[1] = byte(block >> 16) - buf[2] = byte(block >> 8) - buf[3] = byte(block) - prf.Write(buf[:4]) - dk = prf.Sum(dk) - T := dk[len(dk)-hashLen:] - copy(U, T) - - // U_n = PRF(password, U_(n-1)) - for n := 2; n <= iter; n++ { - prf.Reset() - prf.Write(U) - U = U[:0] - U = prf.Sum(U) - for x := range U { - T[x] ^= U[x] - } - } - } - return dk[:keyLen] -} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go deleted file mode 100644 index 2f81fe4148e9..000000000000 --- a/vendor/golang.org/x/crypto/scrypt/scrypt.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package scrypt implements the scrypt key derivation function as defined in -// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard -// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). -package scrypt // import "golang.org/x/crypto/scrypt" - -import ( - "crypto/sha256" - "errors" - "math/bits" - - "golang.org/x/crypto/pbkdf2" -) - -const maxInt = int(^uint(0) >> 1) - -// blockCopy copies n numbers from src into dst. -func blockCopy(dst, src []uint32, n int) { - copy(dst, src[:n]) -} - -// blockXOR XORs numbers from dst with n numbers from src. -func blockXOR(dst, src []uint32, n int) { - for i, v := range src[:n] { - dst[i] ^= v - } -} - -// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, -// and puts the result into both tmp and out. -func salsaXOR(tmp *[16]uint32, in, out []uint32) { - w0 := tmp[0] ^ in[0] - w1 := tmp[1] ^ in[1] - w2 := tmp[2] ^ in[2] - w3 := tmp[3] ^ in[3] - w4 := tmp[4] ^ in[4] - w5 := tmp[5] ^ in[5] - w6 := tmp[6] ^ in[6] - w7 := tmp[7] ^ in[7] - w8 := tmp[8] ^ in[8] - w9 := tmp[9] ^ in[9] - w10 := tmp[10] ^ in[10] - w11 := tmp[11] ^ in[11] - w12 := tmp[12] ^ in[12] - w13 := tmp[13] ^ in[13] - w14 := tmp[14] ^ in[14] - w15 := tmp[15] ^ in[15] - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 - x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 - - for i := 0; i < 8; i += 2 { - x4 ^= bits.RotateLeft32(x0+x12, 7) - x8 ^= bits.RotateLeft32(x4+x0, 9) - x12 ^= bits.RotateLeft32(x8+x4, 13) - x0 ^= bits.RotateLeft32(x12+x8, 18) - - x9 ^= bits.RotateLeft32(x5+x1, 7) - x13 ^= bits.RotateLeft32(x9+x5, 9) - x1 ^= bits.RotateLeft32(x13+x9, 13) - x5 ^= bits.RotateLeft32(x1+x13, 18) - - x14 ^= bits.RotateLeft32(x10+x6, 7) - x2 ^= bits.RotateLeft32(x14+x10, 9) - x6 ^= bits.RotateLeft32(x2+x14, 13) - x10 ^= bits.RotateLeft32(x6+x2, 18) - - x3 ^= bits.RotateLeft32(x15+x11, 7) - x7 ^= bits.RotateLeft32(x3+x15, 9) - x11 ^= bits.RotateLeft32(x7+x3, 13) - x15 ^= bits.RotateLeft32(x11+x7, 18) - - x1 ^= bits.RotateLeft32(x0+x3, 7) - x2 ^= bits.RotateLeft32(x1+x0, 9) - x3 ^= bits.RotateLeft32(x2+x1, 13) - x0 ^= bits.RotateLeft32(x3+x2, 18) - - x6 ^= bits.RotateLeft32(x5+x4, 7) - x7 ^= bits.RotateLeft32(x6+x5, 9) - x4 ^= bits.RotateLeft32(x7+x6, 13) - x5 ^= bits.RotateLeft32(x4+x7, 18) - - x11 ^= bits.RotateLeft32(x10+x9, 7) - x8 ^= bits.RotateLeft32(x11+x10, 9) - x9 ^= bits.RotateLeft32(x8+x11, 13) - x10 ^= bits.RotateLeft32(x9+x8, 18) - - x12 ^= bits.RotateLeft32(x15+x14, 7) - x13 ^= bits.RotateLeft32(x12+x15, 9) - x14 ^= bits.RotateLeft32(x13+x12, 13) - x15 ^= bits.RotateLeft32(x14+x13, 18) - } - x0 += w0 - x1 += w1 - x2 += w2 - x3 += w3 - x4 += w4 - x5 += w5 - x6 += w6 - x7 += w7 - x8 += w8 - x9 += w9 - x10 += w10 - x11 += w11 - x12 += w12 - x13 += w13 - x14 += w14 - x15 += w15 - - out[0], tmp[0] = x0, x0 - out[1], tmp[1] = x1, x1 - out[2], tmp[2] = x2, x2 - out[3], tmp[3] = x3, x3 - out[4], tmp[4] = x4, x4 - out[5], tmp[5] = x5, x5 - out[6], tmp[6] = x6, x6 - out[7], tmp[7] = x7, x7 - out[8], tmp[8] = x8, x8 - out[9], tmp[9] = x9, x9 - out[10], tmp[10] = x10, x10 - out[11], tmp[11] = x11, x11 - out[12], tmp[12] = x12, x12 - out[13], tmp[13] = x13, x13 - out[14], tmp[14] = x14, x14 - out[15], tmp[15] = x15, x15 -} - -func blockMix(tmp *[16]uint32, in, out []uint32, r int) { - blockCopy(tmp[:], in[(2*r-1)*16:], 16) - for i := 0; i < 2*r; i += 2 { - salsaXOR(tmp, in[i*16:], out[i*8:]) - salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) - } -} - -func integer(b []uint32, r int) uint64 { - j := (2*r - 1) * 16 - return uint64(b[j]) | uint64(b[j+1])<<32 -} - -func smix(b []byte, r, N int, v, xy []uint32) { - var tmp [16]uint32 - x := xy - y := xy[32*r:] - - j := 0 - for i := 0; i < 32*r; i++ { - x[i] = uint32(b[j]) | uint32(b[j+1])<<8 | uint32(b[j+2])<<16 | uint32(b[j+3])<<24 - j += 4 - } - for i := 0; i < N; i += 2 { - blockCopy(v[i*(32*r):], x, 32*r) - blockMix(&tmp, x, y, r) - - blockCopy(v[(i+1)*(32*r):], y, 32*r) - blockMix(&tmp, y, x, r) - } - for i := 0; i < N; i += 2 { - j := int(integer(x, r) & uint64(N-1)) - blockXOR(x, v[j*(32*r):], 32*r) - blockMix(&tmp, x, y, r) - - j = int(integer(y, r) & uint64(N-1)) - blockXOR(y, v[j*(32*r):], 32*r) - blockMix(&tmp, y, x, r) - } - j = 0 - for _, v := range x[:32*r] { - b[j+0] = byte(v >> 0) - b[j+1] = byte(v >> 8) - b[j+2] = byte(v >> 16) - b[j+3] = byte(v >> 24) - j += 4 - } -} - -// Key derives a key from the password, salt, and cost parameters, returning -// a byte slice of length keyLen that can be used as cryptographic key. -// -// N is a CPU/memory cost parameter, which must be a power of two greater than 1. -// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the -// limits, the function returns a nil byte slice and an error. -// -// For example, you can get a derived key for e.g. AES-256 (which needs a -// 32-byte key) by doing: -// -// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) -// -// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 -// and p=1. The parameters N, r, and p should be increased as memory latency and -// CPU parallelism increases; consider setting N to the highest power of 2 you -// can derive within 100 milliseconds. Remember to get a good random salt. -func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { - if N <= 1 || N&(N-1) != 0 { - return nil, errors.New("scrypt: N must be > 1 and a power of 2") - } - if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { - return nil, errors.New("scrypt: parameters are too large") - } - - xy := make([]uint32, 64*r) - v := make([]uint32, 32*N*r) - b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) - - for i := 0; i < p; i++ { - smix(b[i*128*r:], r, N, v, xy) - } - - return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil -} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go deleted file mode 100644 index 11383d7402d3..000000000000 --- a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go +++ /dev/null @@ -1,198 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: helloworld.proto - -package helloworld - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The request message containing the user's name. -type HelloRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HelloRequest) Reset() { *m = HelloRequest{} } -func (m *HelloRequest) String() string { return proto.CompactTextString(m) } -func (*HelloRequest) ProtoMessage() {} -func (*HelloRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_helloworld_71e208cbdc16936b, []int{0} -} -func (m *HelloRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HelloRequest.Unmarshal(m, b) -} -func (m *HelloRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HelloRequest.Marshal(b, m, deterministic) -} -func (dst *HelloRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_HelloRequest.Merge(dst, src) -} -func (m *HelloRequest) XXX_Size() int { - return xxx_messageInfo_HelloRequest.Size(m) -} -func (m *HelloRequest) XXX_DiscardUnknown() { - xxx_messageInfo_HelloRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_HelloRequest proto.InternalMessageInfo - -func (m *HelloRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// The response message containing the greetings -type HelloReply struct { - Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HelloReply) Reset() { *m = HelloReply{} } -func (m *HelloReply) String() string { return proto.CompactTextString(m) } -func (*HelloReply) ProtoMessage() {} -func (*HelloReply) Descriptor() ([]byte, []int) { - return fileDescriptor_helloworld_71e208cbdc16936b, []int{1} -} -func (m *HelloReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HelloReply.Unmarshal(m, b) -} -func (m *HelloReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HelloReply.Marshal(b, m, deterministic) -} -func (dst *HelloReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_HelloReply.Merge(dst, src) -} -func (m *HelloReply) XXX_Size() int { - return xxx_messageInfo_HelloReply.Size(m) -} -func (m *HelloReply) XXX_DiscardUnknown() { - xxx_messageInfo_HelloReply.DiscardUnknown(m) -} - -var xxx_messageInfo_HelloReply proto.InternalMessageInfo - -func (m *HelloReply) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func init() { - proto.RegisterType((*HelloRequest)(nil), "helloworld.HelloRequest") - proto.RegisterType((*HelloReply)(nil), "helloworld.HelloReply") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// GreeterClient is the client API for Greeter service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type GreeterClient interface { - // Sends a greeting - SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) -} - -type greeterClient struct { - cc *grpc.ClientConn -} - -func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { - return &greeterClient{cc} -} - -func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { - out := new(HelloReply) - err := c.cc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// GreeterServer is the server API for Greeter service. -type GreeterServer interface { - // Sends a greeting - SayHello(context.Context, *HelloRequest) (*HelloReply, error) -} - -func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { - s.RegisterService(&_Greeter_serviceDesc, srv) -} - -func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HelloRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GreeterServer).SayHello(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/helloworld.Greeter/SayHello", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Greeter_serviceDesc = grpc.ServiceDesc{ - ServiceName: "helloworld.Greeter", - HandlerType: (*GreeterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SayHello", - Handler: _Greeter_SayHello_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "helloworld.proto", -} - -func init() { proto.RegisterFile("helloworld.proto", fileDescriptor_helloworld_71e208cbdc16936b) } - -var fileDescriptor_helloworld_71e208cbdc16936b = []byte{ - // 175 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9, - 0xc9, 0x2f, 0xcf, 0x2f, 0xca, 0x49, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88, - 0x28, 0x29, 0x71, 0xf1, 0x78, 0x80, 0x78, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, - 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x92, - 0x1a, 0x17, 0x17, 0x54, 0x4d, 0x41, 0x4e, 0xa5, 0x90, 0x04, 0x17, 0x7b, 0x6e, 0x6a, 0x71, 0x71, - 0x62, 0x3a, 0x4c, 0x11, 0x8c, 0x6b, 0xe4, 0xc9, 0xc5, 0xee, 0x5e, 0x94, 0x9a, 0x5a, 0x92, 0x5a, - 0x24, 0x64, 0xc7, 0xc5, 0x11, 0x9c, 0x58, 0x09, 0xd6, 0x25, 0x24, 0xa1, 0x87, 0xe4, 0x02, 0x64, - 0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x14, 0xe4, 0x54, 0x2a, 0x31, 0x38, 0x19, 0x70, 0x49, 0x67, 0xe6, - 0xeb, 0xa5, 0x17, 0x15, 0x24, 0xeb, 0xa5, 0x56, 0x24, 0xe6, 0x16, 0xe4, 0xa4, 0x16, 0x23, 0xa9, - 0x75, 0xe2, 0x07, 0x2b, 0x0e, 0x07, 0xb1, 0x03, 0x40, 0x5e, 0x0a, 0x60, 0x4c, 0x62, 0x03, 0xfb, - 0xcd, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb7, 0xcd, 0xf2, 0xef, 0x00, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto deleted file mode 100644 index d79a6a0d1f57..000000000000 --- a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -option java_multiple_files = true; -option java_package = "io.grpc.examples.helloworld"; -option java_outer_classname = "HelloWorldProto"; - -package helloworld; - -// The greeting service definition. -service Greeter { - // Sends a greeting - rpc SayHello (HelloRequest) returns (HelloReply) {} -} - -// The request message containing the user's name. -message HelloRequest { - string name = 1; -} - -// The response message containing the greetings -message HelloReply { - string message = 1; -} diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/CHANGELOG.md b/vendor/gopkg.in/Masterminds/sprig.v2/CHANGELOG.md deleted file mode 100644 index 445937138ac8..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/CHANGELOG.md +++ /dev/null @@ -1,153 +0,0 @@ -# Changelog - -## Release 2.15.0 (2018-04-02) - -### Added - -- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) -- #66: Add ternary function (thanks @binoculars) -- #67: Allow keys function to take multiple dicts (thanks @binoculars) -- #89: Added sha1sum to crypto function (thanks @benkeil) -- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) -- #92: Add travis testing for go 1.10 -- #93: Adding appveyor config for windows testing - -### Changed - -- #90: Updating to more recent dependencies -- #73: replace satori/go.uuid with google/uuid (thanks @petterw) - -### Fixed - -- #76: Fixed documentation typos (thanks @Thiht) -- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older - -## Release 2.14.1 (2017-12-01) - -### Fixed - -- #60: Fix typo in function name documentation (thanks @neil-ca-moore) -- #61: Removing line with {{ due to blocking github pages genertion -- #64: Update the list functions to handle int, string, and other slices for compatibility - -## Release 2.14.0 (2017-10-06) - -This new version of Sprig adds a set of functions for generating and working with SSL certificates. - -- `genCA` generates an SSL Certificate Authority -- `genSelfSignedCert` generates an SSL self-signed certificate -- `genSignedCert` generates an SSL certificate and key based on a given CA - -## Release 2.13.0 (2017-09-18) - -This release adds new functions, including: - -- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions -- `floor`, `ceil`, and `round` math functions -- `toDate` converts a string to a date -- `nindent` is just like `indent` but also prepends a new line -- `ago` returns the time from `time.Now` - -### Added - -- #40: Added basic regex functionality (thanks @alanquillin) -- #41: Added ceil floor and round functions (thanks @alanquillin) -- #48: Added toDate function (thanks @andreynering) -- #50: Added nindent function (thanks @binoculars) -- #46: Added ago function (thanks @slayer) - -### Changed - -- #51: Updated godocs to include new string functions (thanks @curtisallen) -- #49: Added ability to merge multiple dicts (thanks @binoculars) - -## Release 2.12.0 (2017-05-17) - -- `snakecase`, `camelcase`, and `shuffle` are three new string functions -- `fail` allows you to bail out of a template render when conditions are not met - -## Release 2.11.0 (2017-05-02) - -- Added `toJson` and `toPrettyJson` -- Added `merge` -- Refactored documentation - -## Release 2.10.0 (2017-03-15) - -- Added `semver` and `semverCompare` for Semantic Versions -- `list` replaces `tuple` -- Fixed issue with `join` -- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` - -## Release 2.9.0 (2017-02-23) - -- Added `splitList` to split a list -- Added crypto functions of `genPrivateKey` and `derivePassword` - -## Release 2.8.0 (2016-12-21) - -- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) -- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) - -## Release 2.7.0 (2016-12-01) - -- Added `sha256sum` to generate a hash of an input -- Added functions to convert a numeric or string to `int`, `int64`, `float64` - -## Release 2.6.0 (2016-10-03) - -- Added a `uuidv4` template function for generating UUIDs inside of a template. - -## Release 2.5.0 (2016-08-19) - -- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions -- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) -- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 - -## Release 2.4.0 (2016-08-16) - -- Adds two functions: `until` and `untilStep` - -## Release 2.3.0 (2016-06-21) - -- cat: Concatenate strings with whitespace separators. -- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" -- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" -- indent: Indent blocks of text in a way that is sensitive to "\n" characters. - -## Release 2.2.0 (2016-04-21) - -- Added a `genPrivateKey` function (Thanks @bacongobbler) - -## Release 2.1.0 (2016-03-30) - -- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. -- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. - -## Release 2.0.0 (2016-03-29) - -Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. - -- `min` complements `max` (formerly `biggest`) -- `empty` indicates that a value is the empty value for its type -- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` -- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` -- Date formatters have been added for HTML dates (as used in `date` input fields) -- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). - -## Release 1.2.0 (2016-02-01) - -- Added quote and squote -- Added b32enc and b32dec -- add now takes varargs -- biggest now takes varargs - -## Release 1.1.0 (2015-12-29) - -- Added #4: Added contains function. strings.Contains, but with the arguments - switched to simplify common pipelines. (thanks krancour) -- Added Travis-CI testing support - -## Release 1.0.0 (2015-12-23) - -- Initial release diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/LICENSE.txt b/vendor/gopkg.in/Masterminds/sprig.v2/LICENSE.txt deleted file mode 100644 index 5c95accc2e2d..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/LICENSE.txt +++ /dev/null @@ -1,20 +0,0 @@ -Sprig -Copyright (C) 2013 Masterminds - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/Makefile b/vendor/gopkg.in/Masterminds/sprig.v2/Makefile deleted file mode 100644 index 63a93fdf798a..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/Makefile +++ /dev/null @@ -1,13 +0,0 @@ - -HAS_GLIDE := $(shell command -v glide;) - -.PHONY: test -test: - go test -v . - -.PHONY: setup -setup: -ifndef HAS_GLIDE - go get -u github.com/Masterminds/glide -endif - glide install diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/README.md b/vendor/gopkg.in/Masterminds/sprig.v2/README.md deleted file mode 100644 index 25bf3d4f4b77..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# Sprig: Template functions for Go templates -[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) -[![Build Status](https://travis-ci.org/Masterminds/sprig.svg?branch=master)](https://travis-ci.org/Masterminds/sprig) - -The Go language comes with a [built-in template -language](http://golang.org/pkg/text/template/), but not -very many template functions. This library provides a group of commonly -used template functions. - -It is inspired by the template functions found in -[Twig](http://twig.sensiolabs.org/documentation) and also in various -JavaScript libraries, such as [underscore.js](http://underscorejs.org/). - -## Usage - -Template developers can read the [Sprig function documentation](http://masterminds.github.io/sprig/) to -learn about the >100 template functions available. - -For Go developers wishing to include Sprig as a library in their programs, -API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig), but -read on for standard usage. - -### Load the Sprig library - -To load the Sprig `FuncMap`: - -```go - -import ( - "github.com/Masterminds/sprig" - "html/template" -) - -// This example illustrates that the FuncMap *must* be set before the -// templates themselves are loaded. -tpl := template.Must( - template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") -) - - -``` - -### Call the functions inside of templates - -By convention, all functions are lowercase. This seems to follow the Go -idiom for template functions (as opposed to template methods, which are -TitleCase). - - -Example: - -``` -{{ "hello!" | upper | repeat 5 }} -``` - -Produces: - -``` -HELLO!HELLO!HELLO!HELLO!HELLO! -``` - -## Principles: - -The following principles were used in deciding on which functions to add, and -determining how to implement them. - -- Template functions should be used to build layout. Therefore, the following - types of operations are within the domain of template functions: - - Formatting - - Layout - - Simple type conversions - - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) -- Template functions should not return errors unless there is no way to print - a sensible value. For example, converting a string to an integer should not - produce an error if conversion fails. Instead, it should display a default - value that can be displayed. -- Simple math is necessary for grid layouts, pagers, and so on. Complex math - (anything other than arithmetic) should be done outside of templates. -- Template functions only deal with the data passed into them. They never retrieve - data from a source. -- Finally, do not override core Go template functions. diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/appveyor.yml b/vendor/gopkg.in/Masterminds/sprig.v2/appveyor.yml deleted file mode 100644 index d545a987a3b7..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/appveyor.yml +++ /dev/null @@ -1,26 +0,0 @@ - -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\sprig -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -install: - - go get -u github.com/Masterminds/glide - - set PATH=%GOPATH%\bin;%PATH% - - go version - - go env - -build_script: - - glide install - - go install ./... - -test_script: - - go test -v - -deploy: off diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/crypto.go b/vendor/gopkg.in/Masterminds/sprig.v2/crypto.go deleted file mode 100644 index 7427deb83888..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/crypto.go +++ /dev/null @@ -1,441 +0,0 @@ -package sprig - -import ( - "bytes" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/hmac" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "hash/adler32" - "math/big" - "net" - "time" - - "github.com/google/uuid" - "golang.org/x/crypto/scrypt" -) - -func sha256sum(input string) string { - hash := sha256.Sum256([]byte(input)) - return hex.EncodeToString(hash[:]) -} - -func sha1sum(input string) string { - hash := sha1.Sum([]byte(input)) - return hex.EncodeToString(hash[:]) -} - -func adler32sum(input string) string { - hash := adler32.Checksum([]byte(input)) - return fmt.Sprintf("%d", hash) -} - -// uuidv4 provides a safe and secure UUID v4 implementation -func uuidv4() string { - return fmt.Sprintf("%s", uuid.New()) -} - -var master_password_seed = "com.lyndir.masterpassword" - -var password_type_templates = map[string][][]byte{ - "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, - "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), - []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), - []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), - []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), - []byte("CvccCvcvCvccno")}, - "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, - "short": {[]byte("Cvcn")}, - "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, - "pin": {[]byte("nnnn")}, -} - -var template_characters = map[byte]string{ - 'V': "AEIOU", - 'C': "BCDFGHJKLMNPQRSTVWXYZ", - 'v': "aeiou", - 'c': "bcdfghjklmnpqrstvwxyz", - 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", - 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", - 'n': "0123456789", - 'o': "@&%?,=[]_:-+*$#!'^~;()/.", - 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", -} - -func derivePassword(counter uint32, password_type, password, user, site string) string { - var templates = password_type_templates[password_type] - if templates == nil { - return fmt.Sprintf("cannot find password template %s", password_type) - } - - var buffer bytes.Buffer - buffer.WriteString(master_password_seed) - binary.Write(&buffer, binary.BigEndian, uint32(len(user))) - buffer.WriteString(user) - - salt := buffer.Bytes() - key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) - if err != nil { - return fmt.Sprintf("failed to derive password: %s", err) - } - - buffer.Truncate(len(master_password_seed)) - binary.Write(&buffer, binary.BigEndian, uint32(len(site))) - buffer.WriteString(site) - binary.Write(&buffer, binary.BigEndian, counter) - - var hmacv = hmac.New(sha256.New, key) - hmacv.Write(buffer.Bytes()) - var seed = hmacv.Sum(nil) - var temp = templates[int(seed[0])%len(templates)] - - buffer.Truncate(0) - for i, element := range temp { - pass_chars := template_characters[element] - pass_char := pass_chars[int(seed[i+1])%len(pass_chars)] - buffer.WriteByte(pass_char) - } - - return buffer.String() -} - -func generatePrivateKey(typ string) string { - var priv interface{} - var err error - switch typ { - case "", "rsa": - // good enough for government work - priv, err = rsa.GenerateKey(rand.Reader, 4096) - case "dsa": - key := new(dsa.PrivateKey) - // again, good enough for government work - if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { - return fmt.Sprintf("failed to generate dsa params: %s", err) - } - err = dsa.GenerateKey(key, rand.Reader) - priv = key - case "ecdsa": - // again, good enough for government work - priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - default: - return "Unknown type " + typ - } - if err != nil { - return fmt.Sprintf("failed to generate private key: %s", err) - } - - return string(pem.EncodeToMemory(pemBlockForKey(priv))) -} - -type DSAKeyFormat struct { - Version int - P, Q, G, Y, X *big.Int -} - -func pemBlockForKey(priv interface{}) *pem.Block { - switch k := priv.(type) { - case *rsa.PrivateKey: - return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} - case *dsa.PrivateKey: - val := DSAKeyFormat{ - P: k.P, Q: k.Q, G: k.G, - Y: k.Y, X: k.X, - } - bytes, _ := asn1.Marshal(val) - return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} - case *ecdsa.PrivateKey: - b, _ := x509.MarshalECPrivateKey(k) - return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} - default: - return nil - } -} - -type certificate struct { - Cert string - Key string -} - -func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { - crt := certificate{} - - cert, err := base64.StdEncoding.DecodeString(b64cert) - if err != nil { - return crt, errors.New("unable to decode base64 certificate") - } - - key, err := base64.StdEncoding.DecodeString(b64key) - if err != nil { - return crt, errors.New("unable to decode base64 private key") - } - - decodedCert, _ := pem.Decode(cert) - if decodedCert == nil { - return crt, errors.New("unable to decode certificate") - } - _, err = x509.ParseCertificate(decodedCert.Bytes) - if err != nil { - return crt, fmt.Errorf( - "error parsing certificate: decodedCert.Bytes: %s", - err, - ) - } - - decodedKey, _ := pem.Decode(key) - if decodedKey == nil { - return crt, errors.New("unable to decode key") - } - _, err = x509.ParsePKCS1PrivateKey(decodedKey.Bytes) - if err != nil { - return crt, fmt.Errorf( - "error parsing prive key: decodedKey.Bytes: %s", - err, - ) - } - - crt.Cert = string(cert) - crt.Key = string(key) - - return crt, nil -} - -func generateCertificateAuthority( - cn string, - daysValid int, -) (certificate, error) { - ca := certificate{} - - template, err := getBaseCertTemplate(cn, nil, nil, daysValid) - if err != nil { - return ca, err - } - // Override KeyUsage and IsCA - template.KeyUsage = x509.KeyUsageKeyEncipherment | - x509.KeyUsageDigitalSignature | - x509.KeyUsageCertSign - template.IsCA = true - - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return ca, fmt.Errorf("error generating rsa key: %s", err) - } - - ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) - if err != nil { - return ca, err - } - - return ca, nil -} - -func generateSelfSignedCertificate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, -) (certificate, error) { - cert := certificate{} - - template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) - if err != nil { - return cert, err - } - - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return cert, fmt.Errorf("error generating rsa key: %s", err) - } - - cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) - if err != nil { - return cert, err - } - - return cert, nil -} - -func generateSignedCertificate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - ca certificate, -) (certificate, error) { - cert := certificate{} - - decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) - if decodedSignerCert == nil { - return cert, errors.New("unable to decode certificate") - } - signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) - if err != nil { - return cert, fmt.Errorf( - "error parsing certificate: decodedSignerCert.Bytes: %s", - err, - ) - } - decodedSignerKey, _ := pem.Decode([]byte(ca.Key)) - if decodedSignerKey == nil { - return cert, errors.New("unable to decode key") - } - signerKey, err := x509.ParsePKCS1PrivateKey(decodedSignerKey.Bytes) - if err != nil { - return cert, fmt.Errorf( - "error parsing prive key: decodedSignerKey.Bytes: %s", - err, - ) - } - - template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) - if err != nil { - return cert, err - } - - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return cert, fmt.Errorf("error generating rsa key: %s", err) - } - - cert.Cert, cert.Key, err = getCertAndKey( - template, - priv, - signerCert, - signerKey, - ) - if err != nil { - return cert, err - } - - return cert, nil -} - -func getCertAndKey( - template *x509.Certificate, - signeeKey *rsa.PrivateKey, - parent *x509.Certificate, - signingKey *rsa.PrivateKey, -) (string, string, error) { - derBytes, err := x509.CreateCertificate( - rand.Reader, - template, - parent, - &signeeKey.PublicKey, - signingKey, - ) - if err != nil { - return "", "", fmt.Errorf("error creating certificate: %s", err) - } - - certBuffer := bytes.Buffer{} - if err := pem.Encode( - &certBuffer, - &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, - ); err != nil { - return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) - } - - keyBuffer := bytes.Buffer{} - if err := pem.Encode( - &keyBuffer, - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(signeeKey), - }, - ); err != nil { - return "", "", fmt.Errorf("error pem-encoding key: %s", err) - } - - return string(certBuffer.Bytes()), string(keyBuffer.Bytes()), nil -} - -func getBaseCertTemplate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, -) (*x509.Certificate, error) { - ipAddresses, err := getNetIPs(ips) - if err != nil { - return nil, err - } - dnsNames, err := getAlternateDNSStrs(alternateDNS) - if err != nil { - return nil, err - } - serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) - if err != nil { - return nil, err - } - return &x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: cn, - }, - IPAddresses: ipAddresses, - DNSNames: dnsNames, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, - }, - BasicConstraintsValid: true, - }, nil -} - -func getNetIPs(ips []interface{}) ([]net.IP, error) { - if ips == nil { - return []net.IP{}, nil - } - var ipStr string - var ok bool - var netIP net.IP - netIPs := make([]net.IP, len(ips)) - for i, ip := range ips { - ipStr, ok = ip.(string) - if !ok { - return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) - } - netIP = net.ParseIP(ipStr) - if netIP == nil { - return nil, fmt.Errorf("error parsing ip: %s", ipStr) - } - netIPs[i] = netIP - } - return netIPs, nil -} - -func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { - if alternateDNS == nil { - return []string{}, nil - } - var dnsStr string - var ok bool - alternateDNSStrs := make([]string, len(alternateDNS)) - for i, dns := range alternateDNS { - dnsStr, ok = dns.(string) - if !ok { - return nil, fmt.Errorf( - "error processing alternate dns name: %v is not a string", - dns, - ) - } - alternateDNSStrs[i] = dnsStr - } - return alternateDNSStrs, nil -} diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/date.go b/vendor/gopkg.in/Masterminds/sprig.v2/date.go deleted file mode 100644 index 1c2c3653c89a..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/date.go +++ /dev/null @@ -1,76 +0,0 @@ -package sprig - -import ( - "time" -) - -// Given a format and a date, format the date string. -// -// Date can be a `time.Time` or an `int, int32, int64`. -// In the later case, it is treated as seconds since UNIX -// epoch. -func date(fmt string, date interface{}) string { - return dateInZone(fmt, date, "Local") -} - -func htmlDate(date interface{}) string { - return dateInZone("2006-01-02", date, "Local") -} - -func htmlDateInZone(date interface{}, zone string) string { - return dateInZone("2006-01-02", date, zone) -} - -func dateInZone(fmt string, date interface{}, zone string) string { - var t time.Time - switch date := date.(type) { - default: - t = time.Now() - case time.Time: - t = date - case int64: - t = time.Unix(date, 0) - case int: - t = time.Unix(int64(date), 0) - case int32: - t = time.Unix(int64(date), 0) - } - - loc, err := time.LoadLocation(zone) - if err != nil { - loc, _ = time.LoadLocation("UTC") - } - - return t.In(loc).Format(fmt) -} - -func dateModify(fmt string, date time.Time) time.Time { - d, err := time.ParseDuration(fmt) - if err != nil { - return date - } - return date.Add(d) -} - -func dateAgo(date interface{}) string { - var t time.Time - - switch date := date.(type) { - default: - t = time.Now() - case time.Time: - t = date - case int64: - t = time.Unix(date, 0) - case int: - t = time.Unix(int64(date), 0) - } - // Drop resolution to seconds - duration := time.Since(t).Round(time.Second) - return duration.String() -} - -func toDate(fmt, str string) time.Time { - t, _ := time.ParseInLocation(fmt, str, time.Local) - return t -} diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/defaults.go b/vendor/gopkg.in/Masterminds/sprig.v2/defaults.go deleted file mode 100644 index ed6a8ab291c4..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/defaults.go +++ /dev/null @@ -1,83 +0,0 @@ -package sprig - -import ( - "encoding/json" - "reflect" -) - -// dfault checks whether `given` is set, and returns default if not set. -// -// This returns `d` if `given` appears not to be set, and `given` otherwise. -// -// For numeric types 0 is unset. -// For strings, maps, arrays, and slices, len() = 0 is considered unset. -// For bool, false is unset. -// Structs are never considered unset. -// -// For everything else, including pointers, a nil value is unset. -func dfault(d interface{}, given ...interface{}) interface{} { - - if empty(given) || empty(given[0]) { - return d - } - return given[0] -} - -// empty returns true if the given value has the zero value for its type. -func empty(given interface{}) bool { - g := reflect.ValueOf(given) - if !g.IsValid() { - return true - } - - // Basically adapted from text/template.isTrue - switch g.Kind() { - default: - return g.IsNil() - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return g.Len() == 0 - case reflect.Bool: - return g.Bool() == false - case reflect.Complex64, reflect.Complex128: - return g.Complex() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return g.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return g.Uint() == 0 - case reflect.Float32, reflect.Float64: - return g.Float() == 0 - case reflect.Struct: - return false - } -} - -// coalesce returns the first non-empty value. -func coalesce(v ...interface{}) interface{} { - for _, val := range v { - if !empty(val) { - return val - } - } - return nil -} - -// toJson encodes an item into a JSON string -func toJson(v interface{}) string { - output, _ := json.Marshal(v) - return string(output) -} - -// toPrettyJson encodes an item into a pretty (indented) JSON string -func toPrettyJson(v interface{}) string { - output, _ := json.MarshalIndent(v, "", " ") - return string(output) -} - -// ternary returns the first value if the last value is true, otherwise returns the second value. -func ternary(vt interface{}, vf interface{}, v bool) interface{} { - if v { - return vt - } - - return vf -} diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/dict.go b/vendor/gopkg.in/Masterminds/sprig.v2/dict.go deleted file mode 100644 index 3713e58a46c6..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/dict.go +++ /dev/null @@ -1,97 +0,0 @@ -package sprig - -import "github.com/imdario/mergo" - -func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { - d[key] = value - return d -} - -func unset(d map[string]interface{}, key string) map[string]interface{} { - delete(d, key) - return d -} - -func hasKey(d map[string]interface{}, key string) bool { - _, ok := d[key] - return ok -} - -func pluck(key string, d ...map[string]interface{}) []interface{} { - res := []interface{}{} - for _, dict := range d { - if val, ok := dict[key]; ok { - res = append(res, val) - } - } - return res -} - -func keys(dicts ...map[string]interface{}) []string { - k := []string{} - for _, dict := range dicts { - for key := range dict { - k = append(k, key) - } - } - return k -} - -func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { - res := map[string]interface{}{} - for _, k := range keys { - if v, ok := dict[k]; ok { - res[k] = v - } - } - return res -} - -func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { - res := map[string]interface{}{} - - omit := make(map[string]bool, len(keys)) - for _, k := range keys { - omit[k] = true - } - - for k, v := range dict { - if _, ok := omit[k]; !ok { - res[k] = v - } - } - return res -} - -func dict(v ...interface{}) map[string]interface{} { - dict := map[string]interface{}{} - lenv := len(v) - for i := 0; i < lenv; i += 2 { - key := strval(v[i]) - if i+1 >= lenv { - dict[key] = "" - continue - } - dict[key] = v[i+1] - } - return dict -} - -func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { - for _, src := range srcs { - if err := mergo.Merge(&dst, src); err != nil { - // Swallow errors inside of a template. - return "" - } - } - return dst -} - -func values(dict map[string]interface{}) []interface{} { - values := []interface{}{} - for _, value := range dict { - values = append(values, value) - } - - return values -} diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/doc.go b/vendor/gopkg.in/Masterminds/sprig.v2/doc.go deleted file mode 100644 index 8f8f1d73703b..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Sprig: Template functions for Go. - -This package contains a number of utility functions for working with data -inside of Go `html/template` and `text/template` files. - -To add these functions, use the `template.Funcs()` method: - - t := templates.New("foo").Funcs(sprig.FuncMap()) - -Note that you should add the function map before you parse any template files. - - In several cases, Sprig reverses the order of arguments from the way they - appear in the standard library. This is to make it easier to pipe - arguments into functions. - -See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. -*/ -package sprig diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/functions.go b/vendor/gopkg.in/Masterminds/sprig.v2/functions.go deleted file mode 100644 index cf0f71c93f51..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/functions.go +++ /dev/null @@ -1,287 +0,0 @@ -package sprig - -import ( - "errors" - "html/template" - "os" - "path" - "strconv" - "strings" - ttemplate "text/template" - "time" - - util "github.com/aokoli/goutils" - "github.com/huandu/xstrings" -) - -// Produce the function map. -// -// Use this to pass the functions into the template engine: -// -// tpl := template.New("foo").Funcs(sprig.FuncMap())) -// -func FuncMap() template.FuncMap { - return HtmlFuncMap() -} - -// HermeticTextFuncMap returns a 'text/template'.FuncMap with only repeatable functions. -func HermeticTxtFuncMap() ttemplate.FuncMap { - r := TxtFuncMap() - for _, name := range nonhermeticFunctions { - delete(r, name) - } - return r -} - -// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. -func HermeticHtmlFuncMap() template.FuncMap { - r := HtmlFuncMap() - for _, name := range nonhermeticFunctions { - delete(r, name) - } - return r -} - -// TextFuncMap returns a 'text/template'.FuncMap -func TxtFuncMap() ttemplate.FuncMap { - return ttemplate.FuncMap(GenericFuncMap()) -} - -// HtmlFuncMap returns an 'html/template'.Funcmap -func HtmlFuncMap() template.FuncMap { - return template.FuncMap(GenericFuncMap()) -} - -// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. -func GenericFuncMap() map[string]interface{} { - gfm := make(map[string]interface{}, len(genericMap)) - for k, v := range genericMap { - gfm[k] = v - } - return gfm -} - -// These functions are not guaranteed to evaluate to the same result for given input, because they -// refer to the environemnt or global state. -var nonhermeticFunctions = []string{ - // Date functions - "date", - "date_in_zone", - "date_modify", - "now", - "htmlDate", - "htmlDateInZone", - "dateInZone", - "dateModify", - - // Strings - "randAlphaNum", - "randAlpha", - "randAscii", - "randNumeric", - "uuidv4", - - // OS - "env", - "expandenv", -} - -var genericMap = map[string]interface{}{ - "hello": func() string { return "Hello!" }, - - // Date functions - "date": date, - "date_in_zone": dateInZone, - "date_modify": dateModify, - "now": func() time.Time { return time.Now() }, - "htmlDate": htmlDate, - "htmlDateInZone": htmlDateInZone, - "dateInZone": dateInZone, - "dateModify": dateModify, - "ago": dateAgo, - "toDate": toDate, - - // Strings - "abbrev": abbrev, - "abbrevboth": abbrevboth, - "trunc": trunc, - "trim": strings.TrimSpace, - "upper": strings.ToUpper, - "lower": strings.ToLower, - "title": strings.Title, - "untitle": untitle, - "substr": substring, - // Switch order so that "foo" | repeat 5 - "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, - // Deprecated: Use trimAll. - "trimall": func(a, b string) string { return strings.Trim(b, a) }, - // Switch order so that "$foo" | trimall "$" - "trimAll": func(a, b string) string { return strings.Trim(b, a) }, - "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, - "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, - "nospace": util.DeleteWhiteSpace, - "initials": initials, - "randAlphaNum": randAlphaNumeric, - "randAlpha": randAlpha, - "randAscii": randAscii, - "randNumeric": randNumeric, - "swapcase": util.SwapCase, - "shuffle": xstrings.Shuffle, - "snakecase": xstrings.ToSnakeCase, - "camelcase": xstrings.ToCamelCase, - "kebabcase": xstrings.ToKebabCase, - "wrap": func(l int, s string) string { return util.Wrap(s, l) }, - "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, - // Switch order so that "foobar" | contains "foo" - "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, - "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, - "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, - "quote": quote, - "squote": squote, - "cat": cat, - "indent": indent, - "nindent": nindent, - "replace": replace, - "plural": plural, - "sha1sum": sha1sum, - "sha256sum": sha256sum, - "adler32sum": adler32sum, - "toString": strval, - - // Wrap Atoi to stop errors. - "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, - "int64": toInt64, - "int": toInt, - "float64": toFloat64, - - //"gt": func(a, b int) bool {return a > b}, - //"gte": func(a, b int) bool {return a >= b}, - //"lt": func(a, b int) bool {return a < b}, - //"lte": func(a, b int) bool {return a <= b}, - - // split "/" foo/bar returns map[int]string{0: foo, 1: bar} - "split": split, - "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, - // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} - "splitn": splitn, - "toStrings": strslice, - - "until": until, - "untilStep": untilStep, - - // VERY basic arithmetic. - "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, - "add": func(i ...interface{}) int64 { - var a int64 = 0 - for _, b := range i { - a += toInt64(b) - } - return a - }, - "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, - "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, - "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, - "mul": func(a interface{}, v ...interface{}) int64 { - val := toInt64(a) - for _, b := range v { - val = val * toInt64(b) - } - return val - }, - "biggest": max, - "max": max, - "min": min, - "ceil": ceil, - "floor": floor, - "round": round, - - // string slices. Note that we reverse the order b/c that's better - // for template processing. - "join": join, - "sortAlpha": sortAlpha, - - // Defaults - "default": dfault, - "empty": empty, - "coalesce": coalesce, - "compact": compact, - "toJson": toJson, - "toPrettyJson": toPrettyJson, - "ternary": ternary, - - // Reflection - "typeOf": typeOf, - "typeIs": typeIs, - "typeIsLike": typeIsLike, - "kindOf": kindOf, - "kindIs": kindIs, - - // OS: - "env": func(s string) string { return os.Getenv(s) }, - "expandenv": func(s string) string { return os.ExpandEnv(s) }, - - // File Paths: - "base": path.Base, - "dir": path.Dir, - "clean": path.Clean, - "ext": path.Ext, - "isAbs": path.IsAbs, - - // Encoding: - "b64enc": base64encode, - "b64dec": base64decode, - "b32enc": base32encode, - "b32dec": base32decode, - - // Data Structures: - "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. - "list": list, - "dict": dict, - "set": set, - "unset": unset, - "hasKey": hasKey, - "pluck": pluck, - "keys": keys, - "pick": pick, - "omit": omit, - "merge": merge, - "values": values, - - "append": push, "push": push, - "prepend": prepend, - "first": first, - "rest": rest, - "last": last, - "initial": initial, - "reverse": reverse, - "uniq": uniq, - "without": without, - "has": has, - "slice": slice, - - // Crypto: - "genPrivateKey": generatePrivateKey, - "derivePassword": derivePassword, - "buildCustomCert": buildCustomCertificate, - "genCA": generateCertificateAuthority, - "genSelfSignedCert": generateSelfSignedCertificate, - "genSignedCert": generateSignedCertificate, - - // UUIDs: - "uuidv4": uuidv4, - - // SemVer: - "semver": semver, - "semverCompare": semverCompare, - - // Flow Control: - "fail": func(msg string) (string, error) { return "", errors.New(msg) }, - - // Regex - "regexMatch": regexMatch, - "regexFindAll": regexFindAll, - "regexFind": regexFind, - "regexReplaceAll": regexReplaceAll, - "regexReplaceAllLiteral": regexReplaceAllLiteral, - "regexSplit": regexSplit, -} diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/glide.lock b/vendor/gopkg.in/Masterminds/sprig.v2/glide.lock deleted file mode 100644 index e6e675a087c8..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/glide.lock +++ /dev/null @@ -1,33 +0,0 @@ -hash: f9e13000d2d99ee559a37e9b35d310bab6687b64141e98bd122e772c7e1da63a -updated: 2019-01-03T16:14:49.53019-07:00 -imports: -- name: github.com/aokoli/goutils - version: 9c37978a95bd5c709a15883b6242714ea6709e64 -- name: github.com/google/uuid - version: 064e2069ce9c359c118179501254f67d7d37ba24 -- name: github.com/huandu/xstrings - version: f02667b379e2fb5916c3cda2cf31e0eb885d79f8 -- name: github.com/imdario/mergo - version: 7fe0c75c13abdee74b09fcacef5ea1c6bba6a874 -- name: github.com/Masterminds/goutils - version: 41ac8693c5c10a92ea1ff5ac3a7f95646f6123b0 -- name: github.com/Masterminds/semver - version: 59c29afe1a994eacb71c833025ca7acf874bb1da -- name: github.com/stretchr/testify - version: c679ae2cc0cb27ec3293fea7e254e47386f05d69 - subpackages: - - assert -- name: golang.org/x/crypto - version: de0752318171da717af4ce24d0a2e8626afaeb11 - subpackages: - - pbkdf2 - - scrypt -testImports: -- name: github.com/davecgh/go-spew - version: 782f4967f2dc4564575ca782fe2d04090b5faca8 - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/glide.yaml b/vendor/gopkg.in/Masterminds/sprig.v2/glide.yaml deleted file mode 100644 index 83189ac4b278..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/glide.yaml +++ /dev/null @@ -1,16 +0,0 @@ -package: github.com/Masterminds/sprig -import: -- package: github.com/Masterminds/goutils - version: ^1.0.0 -- package: github.com/google/uuid - version: ^0.2 -- package: golang.org/x/crypto - subpackages: - - scrypt -- package: github.com/Masterminds/semver - version: v1.2.2 -- package: github.com/stretchr/testify -- package: github.com/imdario/mergo - version: ~0.2.2 -- package: github.com/huandu/xstrings - version: ^1.2 diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/list.go b/vendor/gopkg.in/Masterminds/sprig.v2/list.go deleted file mode 100644 index 184c1ca13da2..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/list.go +++ /dev/null @@ -1,291 +0,0 @@ -package sprig - -import ( - "fmt" - "reflect" - "sort" -) - -// Reflection is used in these functions so that slices and arrays of strings, -// ints, and other types not implementing []interface{} can be worked with. -// For example, this is useful if you need to work on the output of regexs. - -func list(v ...interface{}) []interface{} { - return v -} - -func push(list interface{}, v interface{}) []interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[i] = l2.Index(i).Interface() - } - - return append(nl, v) - - default: - panic(fmt.Sprintf("Cannot push on type %s", tp)) - } -} - -func prepend(list interface{}, v interface{}) []interface{} { - //return append([]interface{}{v}, list...) - - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[i] = l2.Index(i).Interface() - } - - return append([]interface{}{v}, nl...) - - default: - panic(fmt.Sprintf("Cannot prepend on type %s", tp)) - } -} - -func last(list interface{}) interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil - } - - return l2.Index(l - 1).Interface() - default: - panic(fmt.Sprintf("Cannot find last on type %s", tp)) - } -} - -func first(list interface{}) interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil - } - - return l2.Index(0).Interface() - default: - panic(fmt.Sprintf("Cannot find first on type %s", tp)) - } -} - -func rest(list interface{}) []interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil - } - - nl := make([]interface{}, l-1) - for i := 1; i < l; i++ { - nl[i-1] = l2.Index(i).Interface() - } - - return nl - default: - panic(fmt.Sprintf("Cannot find rest on type %s", tp)) - } -} - -func initial(list interface{}) []interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil - } - - nl := make([]interface{}, l-1) - for i := 0; i < l-1; i++ { - nl[i] = l2.Index(i).Interface() - } - - return nl - default: - panic(fmt.Sprintf("Cannot find initial on type %s", tp)) - } -} - -func sortAlpha(list interface{}) []string { - k := reflect.Indirect(reflect.ValueOf(list)).Kind() - switch k { - case reflect.Slice, reflect.Array: - a := strslice(list) - s := sort.StringSlice(a) - s.Sort() - return s - } - return []string{strval(list)} -} - -func reverse(v interface{}) []interface{} { - tp := reflect.TypeOf(v).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(v) - - l := l2.Len() - // We do not sort in place because the incoming array should not be altered. - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[l-i-1] = l2.Index(i).Interface() - } - - return nl - default: - panic(fmt.Sprintf("Cannot find reverse on type %s", tp)) - } -} - -func compact(list interface{}) []interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !empty(item) { - nl = append(nl, item) - } - } - - return nl - default: - panic(fmt.Sprintf("Cannot compact on type %s", tp)) - } -} - -func uniq(list interface{}) []interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - dest := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !inList(dest, item) { - dest = append(dest, item) - } - } - - return dest - default: - panic(fmt.Sprintf("Cannot find uniq on type %s", tp)) - } -} - -func inList(haystack []interface{}, needle interface{}) bool { - for _, h := range haystack { - if reflect.DeepEqual(needle, h) { - return true - } - } - return false -} - -func without(list interface{}, omit ...interface{}) []interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - res := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !inList(omit, item) { - res = append(res, item) - } - } - - return res - default: - panic(fmt.Sprintf("Cannot find without on type %s", tp)) - } -} - -func has(needle interface{}, haystack interface{}) bool { - tp := reflect.TypeOf(haystack).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(haystack) - var item interface{} - l := l2.Len() - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if reflect.DeepEqual(needle, item) { - return true - } - } - - return false - default: - panic(fmt.Sprintf("Cannot find has on type %s", tp)) - } -} - -// $list := [1, 2, 3, 4, 5] -// slice $list -> list[0:5] = list[:] -// slice $list 0 3 -> list[0:3] = list[:3] -// slice $list 3 5 -> list[3:5] -// slice $list 3 -> list[3:5] = list[3:] -func slice(list interface{}, indices ...interface{}) interface{} { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil - } - - var start, end int - if len(indices) > 0 { - start = toInt(indices[0]) - } - if len(indices) < 2 { - end = l - } else { - end = toInt(indices[1]) - } - - return l2.Slice(start, end).Interface() - default: - panic(fmt.Sprintf("list should be type of slice or array but %s", tp)) - } -} diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/numeric.go b/vendor/gopkg.in/Masterminds/sprig.v2/numeric.go deleted file mode 100644 index 4bd89bf7f80d..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/numeric.go +++ /dev/null @@ -1,159 +0,0 @@ -package sprig - -import ( - "math" - "reflect" - "strconv" -) - -// toFloat64 converts 64-bit floats -func toFloat64(v interface{}) float64 { - if str, ok := v.(string); ok { - iv, err := strconv.ParseFloat(str, 64) - if err != nil { - return 0 - } - return iv - } - - val := reflect.Indirect(reflect.ValueOf(v)) - switch val.Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return float64(val.Int()) - case reflect.Uint8, reflect.Uint16, reflect.Uint32: - return float64(val.Uint()) - case reflect.Uint, reflect.Uint64: - return float64(val.Uint()) - case reflect.Float32, reflect.Float64: - return val.Float() - case reflect.Bool: - if val.Bool() == true { - return 1 - } - return 0 - default: - return 0 - } -} - -func toInt(v interface{}) int { - //It's not optimal. Bud I don't want duplicate toInt64 code. - return int(toInt64(v)) -} - -// toInt64 converts integer types to 64-bit integers -func toInt64(v interface{}) int64 { - if str, ok := v.(string); ok { - iv, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return 0 - } - return iv - } - - val := reflect.Indirect(reflect.ValueOf(v)) - switch val.Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return val.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32: - return int64(val.Uint()) - case reflect.Uint, reflect.Uint64: - tv := val.Uint() - if tv <= math.MaxInt64 { - return int64(tv) - } - // TODO: What is the sensible thing to do here? - return math.MaxInt64 - case reflect.Float32, reflect.Float64: - return int64(val.Float()) - case reflect.Bool: - if val.Bool() == true { - return 1 - } - return 0 - default: - return 0 - } -} - -func max(a interface{}, i ...interface{}) int64 { - aa := toInt64(a) - for _, b := range i { - bb := toInt64(b) - if bb > aa { - aa = bb - } - } - return aa -} - -func min(a interface{}, i ...interface{}) int64 { - aa := toInt64(a) - for _, b := range i { - bb := toInt64(b) - if bb < aa { - aa = bb - } - } - return aa -} - -func until(count int) []int { - step := 1 - if count < 0 { - step = -1 - } - return untilStep(0, count, step) -} - -func untilStep(start, stop, step int) []int { - v := []int{} - - if stop < start { - if step >= 0 { - return v - } - for i := start; i > stop; i += step { - v = append(v, i) - } - return v - } - - if step <= 0 { - return v - } - for i := start; i < stop; i += step { - v = append(v, i) - } - return v -} - -func floor(a interface{}) float64 { - aa := toFloat64(a) - return math.Floor(aa) -} - -func ceil(a interface{}) float64 { - aa := toFloat64(a) - return math.Ceil(aa) -} - -func round(a interface{}, p int, r_opt ...float64) float64 { - roundOn := .5 - if len(r_opt) > 0 { - roundOn = r_opt[0] - } - val := toFloat64(a) - places := toFloat64(p) - - var round float64 - pow := math.Pow(10, places) - digit := pow * val - _, div := math.Modf(digit) - if div >= roundOn { - round = math.Ceil(digit) - } else { - round = math.Floor(digit) - } - return round / pow -} diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/reflect.go b/vendor/gopkg.in/Masterminds/sprig.v2/reflect.go deleted file mode 100644 index 8a65c132f08f..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/reflect.go +++ /dev/null @@ -1,28 +0,0 @@ -package sprig - -import ( - "fmt" - "reflect" -) - -// typeIs returns true if the src is the type named in target. -func typeIs(target string, src interface{}) bool { - return target == typeOf(src) -} - -func typeIsLike(target string, src interface{}) bool { - t := typeOf(src) - return target == t || "*"+target == t -} - -func typeOf(src interface{}) string { - return fmt.Sprintf("%T", src) -} - -func kindIs(target string, src interface{}) bool { - return target == kindOf(src) -} - -func kindOf(src interface{}) string { - return reflect.ValueOf(src).Kind().String() -} diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/regex.go b/vendor/gopkg.in/Masterminds/sprig.v2/regex.go deleted file mode 100644 index 2016f66336f4..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/regex.go +++ /dev/null @@ -1,35 +0,0 @@ -package sprig - -import ( - "regexp" -) - -func regexMatch(regex string, s string) bool { - match, _ := regexp.MatchString(regex, s) - return match -} - -func regexFindAll(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.FindAllString(s, n) -} - -func regexFind(regex string, s string) string { - r := regexp.MustCompile(regex) - return r.FindString(s) -} - -func regexReplaceAll(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllString(s, repl) -} - -func regexReplaceAllLiteral(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllLiteralString(s, repl) -} - -func regexSplit(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.Split(s, n) -} diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/semver.go b/vendor/gopkg.in/Masterminds/sprig.v2/semver.go deleted file mode 100644 index c2bf8a1fdf3e..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/semver.go +++ /dev/null @@ -1,23 +0,0 @@ -package sprig - -import ( - sv2 "github.com/Masterminds/semver" -) - -func semverCompare(constraint, version string) (bool, error) { - c, err := sv2.NewConstraint(constraint) - if err != nil { - return false, err - } - - v, err := sv2.NewVersion(version) - if err != nil { - return false, err - } - - return c.Check(v), nil -} - -func semver(version string) (*sv2.Version, error) { - return sv2.NewVersion(version) -} diff --git a/vendor/gopkg.in/Masterminds/sprig.v2/strings.go b/vendor/gopkg.in/Masterminds/sprig.v2/strings.go deleted file mode 100644 index 3a6967cfc4f4..000000000000 --- a/vendor/gopkg.in/Masterminds/sprig.v2/strings.go +++ /dev/null @@ -1,210 +0,0 @@ -package sprig - -import ( - "encoding/base32" - "encoding/base64" - "fmt" - "reflect" - "strconv" - "strings" - - util "github.com/aokoli/goutils" -) - -func base64encode(v string) string { - return base64.StdEncoding.EncodeToString([]byte(v)) -} - -func base64decode(v string) string { - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return err.Error() - } - return string(data) -} - -func base32encode(v string) string { - return base32.StdEncoding.EncodeToString([]byte(v)) -} - -func base32decode(v string) string { - data, err := base32.StdEncoding.DecodeString(v) - if err != nil { - return err.Error() - } - return string(data) -} - -func abbrev(width int, s string) string { - if width < 4 { - return s - } - r, _ := util.Abbreviate(s, width) - return r -} - -func abbrevboth(left, right int, s string) string { - if right < 4 || left > 0 && right < 7 { - return s - } - r, _ := util.AbbreviateFull(s, left, right) - return r -} -func initials(s string) string { - // Wrap this just to eliminate the var args, which templates don't do well. - return util.Initials(s) -} - -func randAlphaNumeric(count int) string { - // It is not possible, it appears, to actually generate an error here. - r, _ := util.RandomAlphaNumeric(count) - return r -} - -func randAlpha(count int) string { - r, _ := util.RandomAlphabetic(count) - return r -} - -func randAscii(count int) string { - r, _ := util.RandomAscii(count) - return r -} - -func randNumeric(count int) string { - r, _ := util.RandomNumeric(count) - return r -} - -func untitle(str string) string { - return util.Uncapitalize(str) -} - -func quote(str ...interface{}) string { - out := make([]string, len(str)) - for i, s := range str { - out[i] = fmt.Sprintf("%q", strval(s)) - } - return strings.Join(out, " ") -} - -func squote(str ...interface{}) string { - out := make([]string, len(str)) - for i, s := range str { - out[i] = fmt.Sprintf("'%v'", s) - } - return strings.Join(out, " ") -} - -func cat(v ...interface{}) string { - r := strings.TrimSpace(strings.Repeat("%v ", len(v))) - return fmt.Sprintf(r, v...) -} - -func indent(spaces int, v string) string { - pad := strings.Repeat(" ", spaces) - return pad + strings.Replace(v, "\n", "\n"+pad, -1) -} - -func nindent(spaces int, v string) string { - return "\n" + indent(spaces, v) -} - -func replace(old, new, src string) string { - return strings.Replace(src, old, new, -1) -} - -func plural(one, many string, count int) string { - if count == 1 { - return one - } - return many -} - -func strslice(v interface{}) []string { - switch v := v.(type) { - case []string: - return v - case []interface{}: - l := len(v) - b := make([]string, l) - for i := 0; i < l; i++ { - b[i] = strval(v[i]) - } - return b - default: - val := reflect.ValueOf(v) - switch val.Kind() { - case reflect.Array, reflect.Slice: - l := val.Len() - b := make([]string, l) - for i := 0; i < l; i++ { - b[i] = strval(val.Index(i).Interface()) - } - return b - default: - return []string{strval(v)} - } - } -} - -func strval(v interface{}) string { - switch v := v.(type) { - case string: - return v - case []byte: - return string(v) - case error: - return v.Error() - case fmt.Stringer: - return v.String() - default: - return fmt.Sprintf("%v", v) - } -} - -func trunc(c int, s string) string { - if len(s) <= c { - return s - } - return s[0:c] -} - -func join(sep string, v interface{}) string { - return strings.Join(strslice(v), sep) -} - -func split(sep, orig string) map[string]string { - parts := strings.Split(orig, sep) - res := make(map[string]string, len(parts)) - for i, v := range parts { - res["_"+strconv.Itoa(i)] = v - } - return res -} - -func splitn(sep string, n int, orig string) map[string]string { - parts := strings.SplitN(orig, sep, n) - res := make(map[string]string, len(parts)) - for i, v := range parts { - res["_"+strconv.Itoa(i)] = v - } - return res -} - -// substring creates a substring of the given string. -// -// If start is < 0, this calls string[:length]. -// -// If start is >= 0 and length < 0, this calls string[start:] -// -// Otherwise, this calls string[start, length]. -func substring(start, length int, s string) string { - if start < 0 { - return s[:length] - } - if length < 0 { - return s[start:] - } - return s[start:length] -} diff --git a/vendor/vendor.json b/vendor/vendor.json index 9a999deb6fc6..e30401170fec 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -10,12 +10,6 @@ "version": "1.0", "versionExact": "1.0" }, - { - "checksumSHA1": "4Ox59zHnNrsNy7v+I0hC0lDzwv8=", - "path": "github.com/Masterminds/semver", - "revision": "059deebd1619b9ae33232c797f7ab0e8d6c6fd69", - "revisionTime": "2019-04-29T19:09:51Z" - }, { "checksumSHA1": "NRonvQTe073YVead6//CQ5oVgK4=", "path": "github.com/Sirupsen/logrus", @@ -60,12 +54,6 @@ "revision": "dcb3e4bb79906c5695321440deca10592f4eeb2d", "revisionTime": "2017-02-22T15:40:38Z" }, - { - "checksumSHA1": "pk+Fj0KfcNdwugEfI9657E7flgI=", - "path": "github.com/TykTechnologies/gorpc", - "revision": "2fd6ca5242c4dbee5ab151010ee844f3fb5507a8", - "revisionTime": "2018-09-28T16:00:09Z" - }, { "checksumSHA1": "FEq3KG6Kgarh8P5XIUx3bH13zDM=", "path": "github.com/TykTechnologies/goverify", @@ -132,12 +120,6 @@ "revision": "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a", "revisionTime": "2015-10-22T06:55:26Z" }, - { - "checksumSHA1": "XqRLTmOEU7eSYOd9bij8J48kpq8=", - "path": "github.com/aokoli/goutils", - "revision": "41ac8693c5c10a92ea1ff5ac3a7f95646f6123b0", - "revisionTime": "2018-12-03T09:12:26Z" - }, { "checksumSHA1": "9dh0q0rB99j2KWdfon8u6GtLf6U=", "path": "github.com/bshuster-repo/logrus-logstash-hook", @@ -245,10 +227,10 @@ "revisionTime": "2016-07-11T12:47:28Z" }, { - "checksumSHA1": "Y2MOwzNZfl4NRNDbLCZa6sgx7O0=", + "checksumSHA1": "Pyou8mceOASSFxc7GeXZuVdSMi0=", "path": "github.com/golang/protobuf/proto", - "revision": "1d3f30b51784bec5aad268e59fd3c2fc1c2fe73f", - "revisionTime": "2018-11-28T19:23:52Z" + "revision": "b4deda0973fb4c70b50d226b1af49f3da59f5265", + "revisionTime": "2018-04-30T18:52:41Z" }, { "checksumSHA1": "tkJPssYejSjuAwE2tdEnoEIj93Q=", @@ -280,12 +262,6 @@ "revision": "b47395aa17662ff86cb5c52f3bf669bf842ec27a", "revisionTime": "2019-02-26T17:44:33Z" }, - { - "checksumSHA1": "s5tdcjh84+tiKS3oSq77mzjB970=", - "path": "github.com/google/uuid", - "revision": "c2e93f3ae59f2904160ceaab466009f965df46d6", - "revisionTime": "2019-04-16T17:24:45Z" - }, { "checksumSHA1": "g/V4qrXjUGG9B+e3hB+4NAYJ5Gs=", "path": "github.com/gorilla/context", @@ -306,48 +282,6 @@ "revision": "b89020ee79b89a7f932c5617d218fc06db382f53", "revisionTime": "2017-12-01T01:43:01Z" }, - { - "checksumSHA1": "cJZUWi8GxC1KItxFayZLgZOzaHU=", - "path": "github.com/hashicorp/hil", - "revision": "59d7c1fee952b29ee4c9ceba65c9d02ee28eb281", - "revisionTime": "2019-01-29T15:56:52Z" - }, - { - "checksumSHA1": "0S0KeBcfqVFYBPeZkuJ4fhQ5mCA=", - "path": "github.com/hashicorp/hil/ast", - "revision": "59d7c1fee952b29ee4c9ceba65c9d02ee28eb281", - "revisionTime": "2019-01-29T15:56:52Z" - }, - { - "checksumSHA1": "P5PZ3k7SmqWmxgJ8Q0gLzeNpGhE=", - "path": "github.com/hashicorp/hil/parser", - "revision": "59d7c1fee952b29ee4c9ceba65c9d02ee28eb281", - "revisionTime": "2019-01-29T15:56:52Z" - }, - { - "checksumSHA1": "ekmuVa77ebGDPXI+4FaskrID8lQ=", - "path": "github.com/hashicorp/hil/scanner", - "revision": "59d7c1fee952b29ee4c9ceba65c9d02ee28eb281", - "revisionTime": "2019-01-29T15:56:52Z" - }, - { - "checksumSHA1": "P8gNPDuOzmiK4Lz9xG7OBy4Rlm8=", - "path": "github.com/hashicorp/terraform/flatmap", - "revision": "4596c44c05ca0be0b34ff30e26abc20b887c34e1", - "revisionTime": "2018-12-17T20:43:59Z" - }, - { - "checksumSHA1": "GgSlQYD/MBe2lqMqF6BP51d+67o=", - "path": "github.com/huandu/xstrings", - "revision": "8bbcf2f9ccb55755e748b7644164cd4bdce94c1d", - "revisionTime": "2018-09-06T15:17:51Z" - }, - { - "checksumSHA1": "/G+e65c6drXxJkkOhFqbtSL8vDk=", - "path": "github.com/imdario/mergo", - "revision": "f757d8626a734c8a74b2b1f173d4eb4f2abbef32", - "revisionTime": "2019-05-31T06:39:13Z" - }, { "checksumSHA1": "tP/Ohi+jei+mhMoXH5iEGav3ZTE=", "path": "github.com/jeffail/tunny", @@ -374,6 +308,12 @@ "revision": "ed3ca8a15a931b141440a7e98e4f716eec255f7d", "revisionTime": "2014-12-02T16:54:02Z" }, + { + "checksumSHA1": "OQ6h/9ehSr5avWJD3CbaZdZ8pwU=", + "path": "github.com/lonelycode/gorpc", + "revision": "5108a99af7137601fdc00c899caa4a3f0a8b31ef", + "revisionTime": "2015-06-11T14:24:37Z" + }, { "checksumSHA1": "5OpChAvGZcoxGkZAp3qf5ZxKR84=", "path": "github.com/lonelycode/osin", @@ -490,66 +430,6 @@ "version": "v1.9.0", "versionExact": "v1.9.0" }, - { - "checksumSHA1": "6EIQaeaWECn3zlechdGkqmIKld4=", - "path": "github.com/opentracing/opentracing-go", - "revision": "659c90643e714681897ec2521c60567dd21da733", - "revisionTime": "2019-03-23T20:25:03Z" - }, - { - "checksumSHA1": "hZnCURJIhg56jbKk0UFRfZkcQ+c=", - "path": "github.com/opentracing/opentracing-go/ext", - "revision": "659c90643e714681897ec2521c60567dd21da733", - "revisionTime": "2019-03-23T20:25:03Z" - }, - { - "checksumSHA1": "tnkdNJbJxNKuPZMWapP1xhKIIGw=", - "path": "github.com/opentracing/opentracing-go/log", - "revision": "659c90643e714681897ec2521c60567dd21da733", - "revisionTime": "2019-03-23T20:25:03Z" - }, - { - "checksumSHA1": "dFMmX9FSHumkkEI2OaZLX97lB6o=", - "path": "github.com/openzipkin/zipkin-go", - "revision": "1277a5f30075b9c13d37775aed4f0f3b44d1a710", - "revisionTime": "2019-07-09T15:00:55Z" - }, - { - "checksumSHA1": "3JQxW7T5X72fM7FCPp+Aj+Wdbpc=", - "path": "github.com/openzipkin/zipkin-go/idgenerator", - "revision": "1277a5f30075b9c13d37775aed4f0f3b44d1a710", - "revisionTime": "2019-07-09T15:00:55Z" - }, - { - "checksumSHA1": "xDKODRXa9xpJ5dU9G5wYD4DYyNM=", - "path": "github.com/openzipkin/zipkin-go/model", - "revision": "1277a5f30075b9c13d37775aed4f0f3b44d1a710", - "revisionTime": "2019-07-09T15:00:55Z" - }, - { - "checksumSHA1": "wiE2o/B3MDs8Ma0N22Q9gdeve9g=", - "path": "github.com/openzipkin/zipkin-go/propagation", - "revision": "1277a5f30075b9c13d37775aed4f0f3b44d1a710", - "revisionTime": "2019-07-09T15:00:55Z" - }, - { - "checksumSHA1": "eGW3FcVBCPA+oL5vbZb92u43W7Q=", - "path": "github.com/openzipkin/zipkin-go/propagation/b3", - "revision": "1277a5f30075b9c13d37775aed4f0f3b44d1a710", - "revisionTime": "2019-07-09T15:00:55Z" - }, - { - "checksumSHA1": "UqHH+7beZuR/f0ovM5pJKoHbuwY=", - "path": "github.com/openzipkin/zipkin-go/reporter", - "revision": "1277a5f30075b9c13d37775aed4f0f3b44d1a710", - "revisionTime": "2019-07-09T15:00:55Z" - }, - { - "checksumSHA1": "QFmesSCNJXbC+yhdKzrNIMt3r/4=", - "path": "github.com/openzipkin/zipkin-go/reporter/http", - "revision": "1277a5f30075b9c13d37775aed4f0f3b44d1a710", - "revisionTime": "2019-07-09T15:00:55Z" - }, { "checksumSHA1": "xmWd9nj+yZQ5TiHznuoyj4fkknk=", "path": "github.com/oschwald/maxminddb-golang", @@ -664,114 +544,6 @@ "revision": "63d7cfa0284d0bc9bf41d58f802037559c45ce8f", "revisionTime": "2016-10-27T01:03:14Z" }, - { - "checksumSHA1": "9W312a36vZ/J33+kGZb4SsHYNEQ=", - "path": "github.com/uber/jaeger-client-go", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "/PA9bYu1glNCL5ucsKj8s+NkkHc=", - "path": "github.com/uber/jaeger-client-go/config", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "KM5UXTWkHULmw0dDRNuk8ogWyGs=", - "path": "github.com/uber/jaeger-client-go/internal/baggage", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "tZqlcHV1XoLdZp9jfnydzsZAvYo=", - "path": "github.com/uber/jaeger-client-go/internal/baggage/remote", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "QB0L0GrzyMGQp6ivkkxp7a1DPsE=", - "path": "github.com/uber/jaeger-client-go/internal/spanlog", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "79HRO/+ekkpwqDB/OMiW+AHJtlE=", - "path": "github.com/uber/jaeger-client-go/internal/throttler", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "OVQDWFtFMs+NODe0F/S5kYViQco=", - "path": "github.com/uber/jaeger-client-go/internal/throttler/remote", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "tMP/vxbHwNAbOEaUhic5/meKfac=", - "path": "github.com/uber/jaeger-client-go/log", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "empqDwPkKUkGNeGHCu/EWoGI21o=", - "path": "github.com/uber/jaeger-client-go/rpcmetrics", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "+ffspyTBQLql2UiU6muvfWR/m1o=", - "path": "github.com/uber/jaeger-client-go/thrift", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "fMIQ4sJFCkqFYhXvvLKIlofqxvY=", - "path": "github.com/uber/jaeger-client-go/thrift-gen/agent", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "fRR2p+JAp7paApf32YuQuWU7yzY=", - "path": "github.com/uber/jaeger-client-go/thrift-gen/baggage", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "JZkMEOmiOFFEuGCsDOVLK5RzvMM=", - "path": "github.com/uber/jaeger-client-go/thrift-gen/jaeger", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "0teQUhTqTE1fLs+vbnTTzWOqdEQ=", - "path": "github.com/uber/jaeger-client-go/thrift-gen/sampling", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "jB+fvt3/iJYRDDp6+twGm5gGIXQ=", - "path": "github.com/uber/jaeger-client-go/thrift-gen/zipkincore", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "UlW+AcyeItWM0x1W4vT9hbUiOJs=", - "path": "github.com/uber/jaeger-client-go/transport", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "DKwwIk9vq53IKO7RKccat9cnqeo=", - "path": "github.com/uber/jaeger-client-go/utils", - "revision": "896f2abd37e099bae3eae942250d1a37e4bdce0b", - "revisionTime": "2019-04-23T03:57:12Z" - }, - { - "checksumSHA1": "gF1WPb3/R8RoZw/wqEQmkwntrQc=", - "path": "github.com/uber/jaeger-lib/metrics", - "revision": "d036253de8f5b698150d81b922486f1e8e7628ec", - "revisionTime": "2019-01-22T22:26:57Z" - }, { "checksumSHA1": "ih4CCYD19rjjF9fjid+l7w/+cIg=", "path": "github.com/wadey/gocovmerge", @@ -836,18 +608,6 @@ "revision": "22ddb68eccda408bbf17759ac18d3120ce0d4f3f", "revisionTime": "2017-02-07T22:51:51Z" }, - { - "checksumSHA1": "1MGpGDQqnUoRpv7VEcQrXOBydXE=", - "path": "golang.org/x/crypto/pbkdf2", - "revision": "20be4c3c3ed52bfccdb2d59a412ee1a936d175a7", - "revisionTime": "2019-04-16T00:04:41Z" - }, - { - "checksumSHA1": "o8ysWPosGVxkSVMZHfp2tYHBTu8=", - "path": "golang.org/x/crypto/scrypt", - "revision": "20be4c3c3ed52bfccdb2d59a412ee1a936d175a7", - "revisionTime": "2019-04-16T00:04:41Z" - }, { "checksumSHA1": "uX2McdP4VcQ6zkAF0Q4oyd0rFtU=", "origin": "github.com/miekg/dns/vendor/golang.org/x/net/bpf", @@ -858,8 +618,8 @@ { "checksumSHA1": "GtamqiJoL7PGHsN454AoffBFMa8=", "path": "golang.org/x/net/context", - "revision": "3a22650c66bd7f4fb6d1e8072ffd7b75c8a27898", - "revisionTime": "2019-02-11T12:35:32Z" + "revision": "e147a9138326bc0e9d4e179541ffd8af41cff8a9", + "revisionTime": "2018-12-15T14:29:05Z" }, { "checksumSHA1": "vqc3a+oTUGX8PmD0TS+qQ7gmN8I=", @@ -1177,12 +937,6 @@ "revision": "18957c5fcde0c3037144153d3db03756542007e5", "revisionTime": "2018-05-04T17:55:21Z" }, - { - "checksumSHA1": "qQvHoPEz6V+beQsDzZSMsNkNFHk=", - "path": "google.golang.org/grpc/examples/helloworld/helloworld", - "revision": "29c406a5bd0ed61c5752d00c6aabd9fa036ed328", - "revisionTime": "2019-02-15T00:58:00Z" - }, { "checksumSHA1": "n+8rAQxWcf9LPJat2UHq2uVzH20=", "path": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", @@ -1283,12 +1037,6 @@ "version": "v1.0.5", "versionExact": "v1.0.5" }, - { - "checksumSHA1": "EkdMN0N+5HzwrdFYID39Ufi5TVM=", - "path": "gopkg.in/Masterminds/sprig.v2", - "revision": "544a9b1d90f323f6509491b389714fbbd126bee3", - "revisionTime": "2019-01-03T23:15:13Z" - }, { "checksumSHA1": "3SZTatHIy9OTKc95YlVfXKnoySg=", "path": "gopkg.in/alecthomas/kingpin.v2", diff --git a/version.go b/version.go new file mode 100644 index 000000000000..5743424b4aa6 --- /dev/null +++ b/version.go @@ -0,0 +1,3 @@ +package main + +const VERSION = "v2.8.3"