diff --git a/changelog.d/13400.feature b/changelog.d/13400.feature new file mode 100644 index 000000000000..b8b4a9670f76 --- /dev/null +++ b/changelog.d/13400.feature @@ -0,0 +1 @@ +Migrate from OpenTracing to OpenTelemetry (config changes necessary). diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 8d68719958d6..e398895b36fa 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -88,7 +88,7 @@ - [Git Usage](development/git.md) - [Testing]() - [Demo scripts](development/demo.md) - - [OpenTracing](opentracing.md) + - [Tracing](tracing.md) - [Database Schemas](development/database_schema.md) - [Experimental features](development/experimental_features.md) - [Dependency management](development/dependencies.md) diff --git a/docs/opentracing.md b/docs/opentracing.md index abb94b565f21..976605a4c762 100644 --- a/docs/opentracing.md +++ b/docs/opentracing.md @@ -1,94 +1,3 @@ # OpenTracing -## Background - -OpenTracing is a semi-standard being adopted by a number of distributed -tracing platforms. It is a common api for facilitating vendor-agnostic -tracing instrumentation. That is, we can use the OpenTracing api and -select one of a number of tracer implementations to do the heavy lifting -in the background. Our current selected implementation is Jaeger. - -OpenTracing is a tool which gives an insight into the causal -relationship of work done in and between servers. The servers each track -events and report them to a centralised server - in Synapse's case: -Jaeger. The basic unit used to represent events is the span. The span -roughly represents a single piece of work that was done and the time at -which it occurred. A span can have child spans, meaning that the work of -the child had to be completed for the parent span to complete, or it can -have follow-on spans which represent work that is undertaken as a result -of the parent but is not depended on by the parent to in order to -finish. - -Since this is undertaken in a distributed environment a request to -another server, such as an RPC or a simple GET, can be considered a span -(a unit or work) for the local server. This causal link is what -OpenTracing aims to capture and visualise. In order to do this metadata -about the local server's span, i.e the 'span context', needs to be -included with the request to the remote. - -It is up to the remote server to decide what it does with the spans it -creates. This is called the sampling policy and it can be configured -through Jaeger's settings. - -For OpenTracing concepts see -. - -For more information about Jaeger's implementation see - - -## Setting up OpenTracing - -To receive OpenTracing spans, start up a Jaeger server. This can be done -using docker like so: - -```sh -docker run -d --name jaeger \ - -p 6831:6831/udp \ - -p 6832:6832/udp \ - -p 5778:5778 \ - -p 16686:16686 \ - -p 14268:14268 \ - jaegertracing/all-in-one:1 -``` - -Latest documentation is probably at -https://www.jaegertracing.io/docs/latest/getting-started. - -## Enable OpenTracing in Synapse - -OpenTracing is not enabled by default. It must be enabled in the -homeserver config by adding the `opentracing` option to your config file. You can find -documentation about how to do this in the [config manual under the header 'Opentracing'](usage/configuration/config_documentation.md#opentracing). -See below for an example Opentracing configuration: - -```yaml -opentracing: - enabled: true - homeserver_whitelist: - - "mytrustedhomeserver.org" - - "*.myotherhomeservers.com" -``` - -## Homeserver whitelisting - -The homeserver whitelist is configured using regular expressions. A list -of regular expressions can be given and their union will be compared -when propagating any spans contexts to another homeserver. - -Though it's mostly safe to send and receive span contexts to and from -untrusted users since span contexts are usually opaque ids it can lead -to two problems, namely: - -- If the span context is marked as sampled by the sending homeserver - the receiver will sample it. Therefore two homeservers with wildly - different sampling policies could incur higher sampling counts than - intended. -- Sending servers can attach arbitrary data to spans, known as - 'baggage'. For safety this has been disabled in Synapse but that - doesn't prevent another server sending you baggage which will be - logged to OpenTracing's logs. - -## Configuring Jaeger - -Sampling strategies can be set as in this document: -. +Synapse now uses OpenTelemetry and the [documentation for tracing has moved](./tracing.md). diff --git a/docs/tracing.md b/docs/tracing.md new file mode 100644 index 000000000000..37f07fc28e51 --- /dev/null +++ b/docs/tracing.md @@ -0,0 +1,90 @@ +# Tracing + +## Background + +OpenTelemetry is a semi-standard being adopted by a number of distributed +tracing platforms. It is a common API for facilitating vendor-agnostic +tracing instrumentation. + +Tracing is a tool which gives an insight into the causal +relationship of work done in and between servers. The servers each track +events and report them to a centralised server - in Synapse's case: +Jaeger. The basic unit used to represent events is the span. The span +roughly represents a single piece of work that was done and the time at +which it occurred. A span can have child spans, meaning that the work of +the child had to be completed for the parent span to complete, or it can +have follow-on spans which represent work that is undertaken as a result +of the parent but is not depended on by the parent to in order to +finish. + +Since this is undertaken in a distributed environment a request to +another server, such as an RPC or a simple GET, can be considered a span +(a unit or work) for the local server. This causal link is what +tracing aims to capture and visualise. In order to do this metadata +about the local server's span, i.e the 'span context', needs to be +included with the request to the remote. + +It is up to the remote server to decide what it does with the spans it +creates. This is called the sampling policy and it can be configured +through Jaeger's settings. + +For OpenTelemetry concepts, see +. + +For more information about the Python implementation of OpenTelemetry we're using, see + + +For more information about Jaeger, see + + +## Setting up tracing + +To receive tracing spans, start up a Jaeger server. This can be done +using docker like so: + +```sh +docker run -d --name jaeger \ + -p 6831:6831/udp \ + -p 6832:6832/udp \ + -p 5778:5778 \ + -p 16686:16686 \ + -p 14268:14268 \ + jaegertracing/all-in-one:1 +``` + +Latest documentation is probably at +https://www.jaegertracing.io/docs/latest/getting-started. + +## Enable tracing in Synapse + +Tracing is not enabled by default. It must be enabled in the +homeserver config by adding the `tracing` option to your config file. You can find +documentation about how to do this in the [config manual under the header 'Tracing'](usage/configuration/config_documentation.md#tracing). +See below for an example tracing configuration: + +```yaml +tracing: + enabled: true + homeserver_whitelist: + - "mytrustedhomeserver.org" + - "*.myotherhomeservers.com" +``` + +## Homeserver whitelisting + +The homeserver whitelist is configured using regular expressions. A list +of regular expressions can be given and their union will be compared +when propagating any spans contexts to another homeserver. + +Though it's mostly safe to send and receive span contexts to and from +untrusted users since span contexts are usually opaque ids it can lead +to two problems, namely: + +- If the span context is marked as sampled by the sending homeserver + the receiver will sample it. Therefore two homeservers with wildly + different sampling policies could incur higher sampling counts than + intended. +- Sending servers can attach arbitrary data to spans, known as + 'baggage'. For safety this has been disabled in Synapse but that + doesn't prevent another server sending you baggage which will be + logged in the trace. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index f5937dd902ff..ed07e2180aac 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -72,6 +72,7 @@ apply if you want your config file to be read properly. A few helpful things to In addition, each setting has an example of its usage, with the proper indentation shown. + ## Modules Server admins can expand Synapse's functionality with external modules. @@ -3643,47 +3644,50 @@ default_power_level_content_override: ``` --- -## Opentracing -Configuration options related to Opentracing support. +## Tracing +Configuration options related to tracing support. --- -### `opentracing` +### `tracing` -These settings enable and configure opentracing, which implements distributed tracing. -This allows you to observe the causal chains of events across servers -including requests, key lookups etc., across any server running -synapse or any other services which support opentracing -(specifically those implemented with Jaeger). +These settings enable and configure tracing. This allows you to observe the +causal chains of events across servers including requests, key lookups etc., +across any server running synapse or any other services which support +OpenTelemetry. Sub-options include: * `enabled`: whether tracing is enabled. Set to true to enable. Disabled by default. * `homeserver_whitelist`: The list of homeservers we wish to send and receive span contexts and span baggage. - See [here](../../opentracing.md) for more. + See [here](../../tracing.md#homeserver-whitelisting) for more. This is a list of regexes which are matched against the `server_name` of the homeserver. By default, it is empty, so no servers are matched. -* `force_tracing_for_users`: # A list of the matrix IDs of users whose requests will always be traced, +* `sample_rate`: The probability that a given span and subsequent child spans in the trace will be + recorded. This controls the amount of spans that record and are exported from Synapse. +* `force_tracing_for_users`: A list of the matrix IDs of users whose requests will always be traced, even if the tracing system would otherwise drop the traces due to probabilistic sampling. By default, the list is empty. -* `jaeger_config`: Jaeger can be configured to sample traces at different rates. - All configuration options provided by Jaeger can be set here. Jaeger's configuration is - mostly related to trace sampling which is documented [here](https://www.jaegertracing.io/docs/latest/sampling/). +* `jaeger_exporter_config`: Configure authentication and where you Jaeger instance is located. + Full options available in the [`JaegerExporter` API docs](https://opentelemetry-python.readthedocs.io/en/latest/exporter/jaeger/jaeger.html#opentelemetry.exporter.jaeger.thrift.JaegerExporter). Example configuration: ```yaml -opentracing: +tracing: enabled: true homeserver_whitelist: - ".*" + + sample_rate: 1 force_tracing_for_users: - "@user1:server_name" - "@user2:server_name" - jaeger_config: - sampler: - type: const - param: 1 - logging: - false + jaeger_exporter_config: + agent_host_name: localhost + agent_port: 6831 + # Split UDP packets so they fit within the limit (UDP_PACKET_MAX_LENGTH is set to 65k in OpenTelemetry) + udp_split_oversized_batches: true + # If you define a collector, it will communicate directly to the collector, bypassing the agent + #collector_endpoint: "http://localhost:14268/api/traces?format=jaeger.thrift" ``` --- ## Coordinating workers diff --git a/mypy.ini b/mypy.ini index 4cd61e048457..72051fc5a06f 100644 --- a/mypy.ini +++ b/mypy.ini @@ -174,9 +174,6 @@ ignore_missing_imports = True [mypy-pympler.*] ignore_missing_imports = True -[mypy-rust_python_jaeger_reporter.*] -ignore_missing_imports = True - [mypy-saml2.*] ignore_missing_imports = True diff --git a/poetry.lock b/poetry.lock index d9e4803a5f56..04bf11ccdd49 100644 --- a/poetry.lock +++ b/poetry.lock @@ -132,7 +132,7 @@ optional = false python-versions = ">=3.5.0" [package.extras] -unicode-backport = ["unicodedata2"] +unicode_backport = ["unicodedata2"] [[package]] name = "click" @@ -215,7 +215,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" name = "deprecated" version = "1.2.13" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." -category = "dev" +category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" @@ -316,6 +316,34 @@ python-versions = ">=3.7" gitdb = ">=4.0.1,<5" typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} +[[package]] +name = "googleapis-common-protos" +version = "1.56.2" +description = "Common protobufs used in Google APIs" +category = "main" +optional = true +python-versions = ">=3.6" + +[package.dependencies] +protobuf = ">=3.15.0,<4.0.0dev" + +[package.extras] +grpc = ["grpcio (>=1.0.0,<2.0.0dev)"] + +[[package]] +name = "grpcio" +version = "1.50.0" +description = "HTTP/2-based RPC framework" +category = "main" +optional = true +python-versions = ">=3.7" + +[package.dependencies] +six = ">=1.5.2" + +[package.extras] +protobuf = ["grpcio-tools (>=1.50.0)"] + [[package]] name = "hiredis" version = "2.0.0" @@ -403,26 +431,9 @@ python-versions = ">=3.6.1,<4.0" [package.extras] colors = ["colorama (>=0.4.3,<0.5.0)"] -pipfile-deprecated-finder = ["pipreqs", "requirementslib"] +pipfile_deprecated_finder = ["pipreqs", "requirementslib"] plugins = ["setuptools"] -requirements-deprecated-finder = ["pip-api", "pipreqs"] - -[[package]] -name = "jaeger-client" -version = "4.8.0" -description = "Jaeger Python OpenTracing Tracer implementation" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -opentracing = ">=2.1,<3.0" -threadloop = ">=1,<2" -thrift = "*" -tornado = ">=4.3" - -[package.extras] -tests = ["codecov", "coverage", "flake8", "flake8-quotes", "flake8-typing-imports", "mock", "mypy", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "pycurl", "pytest", "pytest-benchmark[histogram]", "pytest-cov", "pytest-localserver", "pytest-timeout", "pytest-tornado", "tchannel (==2.1.0)"] +requirements_deprecated_finder = ["pip-api", "pipreqs"] [[package]] name = "jeepney" @@ -621,15 +632,77 @@ optional = false python-versions = "*" [[package]] -name = "opentracing" -version = "2.4.0" -description = "OpenTracing API for Python. See documentation at http://opentracing.io" +name = "opentelemetry-api" +version = "1.13.0" +description = "OpenTelemetry Python API" category = "main" optional = true -python-versions = "*" +python-versions = ">=3.7" -[package.extras] -tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pytest", "pytest-cov", "pytest-mock", "six (>=1.10.0,<2.0)", "sphinx_rtd_theme", "tornado"] +[package.dependencies] +deprecated = ">=1.2.6" +setuptools = ">=16.0" + +[[package]] +name = "opentelemetry-exporter-jaeger" +version = "1.13.0" +description = "Jaeger Exporters for OpenTelemetry" +category = "main" +optional = true +python-versions = ">=3.7" + +[package.dependencies] +opentelemetry-exporter-jaeger-proto-grpc = "1.13.0" +opentelemetry-exporter-jaeger-thrift = "1.13.0" + +[[package]] +name = "opentelemetry-exporter-jaeger-proto-grpc" +version = "1.13.0" +description = "Jaeger Protobuf Exporter for OpenTelemetry" +category = "main" +optional = true +python-versions = ">=3.7" + +[package.dependencies] +googleapis-common-protos = ">=1.52,<1.56.3" +grpcio = ">=1.0.0,<2.0.0" +opentelemetry-api = ">=1.3,<2.0" +opentelemetry-sdk = ">=1.11,<2.0" + +[[package]] +name = "opentelemetry-exporter-jaeger-thrift" +version = "1.13.0" +description = "Jaeger Thrift Exporter for OpenTelemetry" +category = "main" +optional = true +python-versions = ">=3.7" + +[package.dependencies] +opentelemetry-api = ">=1.3,<2.0" +opentelemetry-sdk = ">=1.11,<2.0" +thrift = ">=0.10.0" + +[[package]] +name = "opentelemetry-sdk" +version = "1.13.0" +description = "OpenTelemetry Python SDK" +category = "main" +optional = true +python-versions = ">=3.7" + +[package.dependencies] +opentelemetry-api = "1.13.0" +opentelemetry-semantic-conventions = "0.34b0" +setuptools = ">=16.0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.34b0" +description = "OpenTelemetry Semantic Conventions" +category = "main" +optional = true +python-versions = ">=3.7" [[package]] name = "packaging" @@ -723,6 +796,14 @@ python-versions = ">=3.6" [package.extras] twisted = ["twisted"] +[[package]] +name = "protobuf" +version = "3.20.3" +description = "Protocol Buffers" +category = "main" +optional = true +python-versions = ">=3.7" + [[package]] name = "psycopg2" version = "2.9.5" @@ -1010,7 +1091,7 @@ urllib3 = ">=1.21.1,<1.27" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<5)"] +use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"] [[package]] name = "requests-toolbelt" @@ -1097,8 +1178,12 @@ falcon = ["falcon (>=1.4)"] fastapi = ["fastapi (>=0.79.0)"] flask = ["blinker (>=1.1)", "flask (>=0.11)"] httpx = ["httpx (>=0.16.0)"] +<<<<<<< HEAD +pure_eval = ["asttokens", "executing", "pure-eval"] +======= pure-eval = ["asttokens", "executing", "pure-eval"] pymongo = ["pymongo (>=3.1)"] +>>>>>>> develop pyspark = ["pyspark (>=2.4.4)"] quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] rq = ["rq (>=0.6)"] @@ -1212,17 +1297,6 @@ category = "main" optional = true python-versions = "*" -[[package]] -name = "threadloop" -version = "1.0.2" -description = "Tornado IOLoop Backed Concurrent Futures" -category = "main" -optional = true -python-versions = "*" - -[package.dependencies] -tornado = "*" - [[package]] name = "thrift" version = "0.15.0" @@ -1247,14 +1321,6 @@ category = "dev" optional = false python-versions = ">=3.6" -[[package]] -name = "tornado" -version = "6.1" -description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -category = "main" -optional = true -python-versions = ">= 3.5" - [[package]] name = "towncrier" version = "22.8.0" @@ -1336,7 +1402,7 @@ typing-extensions = ">=3.6.5" [package.extras] all-non-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"] -conch-nacl = ["PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"] +conch_nacl = ["PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"] contextvars = ["contextvars (>=2.4,<3)"] dev = ["coverage (>=6b1,<7)", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)"] dev-release = ["pydoctor (>=22.9.0,<22.10.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)"] @@ -1430,14 +1496,6 @@ category = "dev" optional = false python-versions = "*" -[[package]] -name = "types-opentracing" -version = "2.4.10" -description = "Typing stubs for opentracing" -category = "dev" -optional = false -python-versions = "*" - [[package]] name = "types-pillow" version = "9.3.0.1" @@ -1541,7 +1599,7 @@ python-versions = "*" name = "wrapt" version = "1.14.1" description = "Module for decorators, wrappers and monkey patching." -category = "dev" +category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" @@ -1622,12 +1680,12 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"] test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"] [extras] -all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler"] +all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "opentelemetry-api", "opentelemetry-sdk", "opentelemetry-exporter-jaeger", "txredisapi", "hiredis", "Pympler"] cache-memory = ["Pympler"] jwt = ["authlib"] matrix-synapse-ldap3 = ["matrix-synapse-ldap3"] oidc = ["authlib"] -opentracing = ["jaeger-client", "opentracing"] +opentelemetry = ["opentelemetry-api", "opentelemetry-sdk", "opentelemetry-exporter-jaeger"] postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"] redis = ["txredisapi", "hiredis"] saml2 = ["pysaml2"] @@ -1870,6 +1928,57 @@ gitpython = [ {file = "GitPython-3.1.29-py3-none-any.whl", hash = "sha256:41eea0deec2deea139b459ac03656f0dd28fc4a3387240ec1d3c259a2c47850f"}, {file = "GitPython-3.1.29.tar.gz", hash = "sha256:cc36bfc4a3f913e66805a28e84703e419d9c264c1077e537b54f0e1af85dbefd"}, ] +googleapis-common-protos = [ + {file = "googleapis-common-protos-1.56.2.tar.gz", hash = "sha256:b09b56f5463070c2153753ef123f07d2e49235e89148e9b2459ec8ed2f68d7d3"}, + {file = "googleapis_common_protos-1.56.2-py2.py3-none-any.whl", hash = "sha256:023eaea9d8c1cceccd9587c6af6c20f33eeeb05d4148670f2b0322dc1511700c"}, +] +grpcio = [ + {file = "grpcio-1.50.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:906f4d1beb83b3496be91684c47a5d870ee628715227d5d7c54b04a8de802974"}, + {file = "grpcio-1.50.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:2d9fd6e38b16c4d286a01e1776fdf6c7a4123d99ae8d6b3f0b4a03a34bf6ce45"}, + {file = "grpcio-1.50.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:4b123fbb7a777a2fedec684ca0b723d85e1d2379b6032a9a9b7851829ed3ca9a"}, + {file = "grpcio-1.50.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2f77a90ba7b85bfb31329f8eab9d9540da2cf8a302128fb1241d7ea239a5469"}, + {file = "grpcio-1.50.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eea18a878cffc804506d39c6682d71f6b42ec1c151d21865a95fae743fda500"}, + {file = "grpcio-1.50.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b71916fa8f9eb2abd93151fafe12e18cebb302686b924bd4ec39266211da525"}, + {file = "grpcio-1.50.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:95ce51f7a09491fb3da8cf3935005bff19983b77c4e9437ef77235d787b06842"}, + {file = "grpcio-1.50.0-cp310-cp310-win32.whl", hash = "sha256:f7025930039a011ed7d7e7ef95a1cb5f516e23c5a6ecc7947259b67bea8e06ca"}, + {file = "grpcio-1.50.0-cp310-cp310-win_amd64.whl", hash = "sha256:05f7c248e440f538aaad13eee78ef35f0541e73498dd6f832fe284542ac4b298"}, + {file = "grpcio-1.50.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:ca8a2254ab88482936ce941485c1c20cdeaef0efa71a61dbad171ab6758ec998"}, + {file = "grpcio-1.50.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3b611b3de3dfd2c47549ca01abfa9bbb95937eb0ea546ea1d762a335739887be"}, + {file = "grpcio-1.50.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a4cd8cb09d1bc70b3ea37802be484c5ae5a576108bad14728f2516279165dd7"}, + {file = "grpcio-1.50.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:156f8009e36780fab48c979c5605eda646065d4695deea4cfcbcfdd06627ddb6"}, + {file = "grpcio-1.50.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de411d2b030134b642c092e986d21aefb9d26a28bf5a18c47dd08ded411a3bc5"}, + {file = "grpcio-1.50.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d144ad10eeca4c1d1ce930faa105899f86f5d99cecfe0d7224f3c4c76265c15e"}, + {file = "grpcio-1.50.0-cp311-cp311-win32.whl", hash = "sha256:92d7635d1059d40d2ec29c8bf5ec58900120b3ce5150ef7414119430a4b2dd5c"}, + {file = "grpcio-1.50.0-cp311-cp311-win_amd64.whl", hash = "sha256:ce8513aee0af9c159319692bfbf488b718d1793d764798c3d5cff827a09e25ef"}, + {file = "grpcio-1.50.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:8e8999a097ad89b30d584c034929f7c0be280cd7851ac23e9067111167dcbf55"}, + {file = "grpcio-1.50.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:a50a1be449b9e238b9bd43d3857d40edf65df9416dea988929891d92a9f8a778"}, + {file = "grpcio-1.50.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:cf151f97f5f381163912e8952eb5b3afe89dec9ed723d1561d59cabf1e219a35"}, + {file = "grpcio-1.50.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a23d47f2fc7111869f0ff547f771733661ff2818562b04b9ed674fa208e261f4"}, + {file = "grpcio-1.50.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84d04dec64cc4ed726d07c5d17b73c343c8ddcd6b59c7199c801d6bbb9d9ed1"}, + {file = "grpcio-1.50.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:67dd41a31f6fc5c7db097a5c14a3fa588af54736ffc174af4411d34c4f306f68"}, + {file = "grpcio-1.50.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8d4c8e73bf20fb53fe5a7318e768b9734cf122fe671fcce75654b98ba12dfb75"}, + {file = "grpcio-1.50.0-cp37-cp37m-win32.whl", hash = "sha256:7489dbb901f4fdf7aec8d3753eadd40839c9085967737606d2c35b43074eea24"}, + {file = "grpcio-1.50.0-cp37-cp37m-win_amd64.whl", hash = "sha256:531f8b46f3d3db91d9ef285191825d108090856b3bc86a75b7c3930f16ce432f"}, + {file = "grpcio-1.50.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:d534d169673dd5e6e12fb57cc67664c2641361e1a0885545495e65a7b761b0f4"}, + {file = "grpcio-1.50.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:1d8d02dbb616c0a9260ce587eb751c9c7dc689bc39efa6a88cc4fa3e9c138a7b"}, + {file = "grpcio-1.50.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:baab51dcc4f2aecabf4ed1e2f57bceab240987c8b03533f1cef90890e6502067"}, + {file = "grpcio-1.50.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40838061e24f960b853d7bce85086c8e1b81c6342b1f4c47ff0edd44bbae2722"}, + {file = "grpcio-1.50.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:931e746d0f75b2a5cff0a1197d21827a3a2f400c06bace036762110f19d3d507"}, + {file = "grpcio-1.50.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:15f9e6d7f564e8f0776770e6ef32dac172c6f9960c478616c366862933fa08b4"}, + {file = "grpcio-1.50.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a4c23e54f58e016761b576976da6a34d876420b993f45f66a2bfb00363ecc1f9"}, + {file = "grpcio-1.50.0-cp38-cp38-win32.whl", hash = "sha256:3e4244c09cc1b65c286d709658c061f12c61c814be0b7030a2d9966ff02611e0"}, + {file = "grpcio-1.50.0-cp38-cp38-win_amd64.whl", hash = "sha256:8e69aa4e9b7f065f01d3fdcecbe0397895a772d99954bb82eefbb1682d274518"}, + {file = "grpcio-1.50.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:af98d49e56605a2912cf330b4627e5286243242706c3a9fa0bcec6e6f68646fc"}, + {file = "grpcio-1.50.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:080b66253f29e1646ac53ef288c12944b131a2829488ac3bac8f52abb4413c0d"}, + {file = "grpcio-1.50.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:ab5d0e3590f0a16cb88de4a3fa78d10eb66a84ca80901eb2c17c1d2c308c230f"}, + {file = "grpcio-1.50.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb11464f480e6103c59d558a3875bd84eed6723f0921290325ebe97262ae1347"}, + {file = "grpcio-1.50.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e07fe0d7ae395897981d16be61f0db9791f482f03fee7d1851fe20ddb4f69c03"}, + {file = "grpcio-1.50.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d75061367a69808ab2e84c960e9dce54749bcc1e44ad3f85deee3a6c75b4ede9"}, + {file = "grpcio-1.50.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ae23daa7eda93c1c49a9ecc316e027ceb99adbad750fbd3a56fa9e4a2ffd5ae0"}, + {file = "grpcio-1.50.0-cp39-cp39-win32.whl", hash = "sha256:177afaa7dba3ab5bfc211a71b90da1b887d441df33732e94e26860b3321434d9"}, + {file = "grpcio-1.50.0-cp39-cp39-win_amd64.whl", hash = "sha256:ea8ccf95e4c7e20419b7827aa5b6da6f02720270686ac63bd3493a651830235c"}, + {file = "grpcio-1.50.0.tar.gz", hash = "sha256:12b479839a5e753580b5e6053571de14006157f2ef9b71f38c56dc9b23b95ad6"}, +] hiredis = [ {file = "hiredis-2.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048"}, {file = "hiredis-2.0.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0adea425b764a08270820531ec2218d0508f8ae15a448568109ffcae050fee26"}, @@ -2001,9 +2110,6 @@ isort = [ {file = "isort-5.10.1-py3-none-any.whl", hash = "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7"}, {file = "isort-5.10.1.tar.gz", hash = "sha256:e8443a5e7a020e9d7f97f1d7d9cd17c88bcb3bc7e218bf9cf5095fe550be2951"}, ] -jaeger-client = [ - {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"}, -] jeepney = [ {file = "jeepney-0.7.1-py3-none-any.whl", hash = "sha256:1b5a0ea5c0e7b166b2f5895b91a08c14de8915afda4407fb5022a195224958ac"}, {file = "jeepney-0.7.1.tar.gz", hash = "sha256:fa9e232dfa0c498bd0b8a3a73b8d8a31978304dcef0515adc859d4e096f96f4f"}, @@ -2021,7 +2127,10 @@ keyring = [ {file = "keyring-23.5.0.tar.gz", hash = "sha256:9012508e141a80bd1c0b6778d5c610dd9f8c464d75ac6774248500503f972fb9"}, ] ldap3 = [ + {file = "ldap3-2.9.1-py2.6.egg", hash = "sha256:5ab7febc00689181375de40c396dcad4f2659cd260fc5e94c508b6d77c17e9d5"}, + {file = "ldap3-2.9.1-py2.7.egg", hash = "sha256:2bc966556fc4d4fa9f445a1c31dc484ee81d44a51ab0e2d0fd05b62cac75daa6"}, {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"}, + {file = "ldap3-2.9.1-py3.9.egg", hash = "sha256:5630d1383e09ba94839e253e013f1aa1a2cf7a547628ba1265cb7b9a844b5687"}, {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"}, ] lxml = [ @@ -2242,8 +2351,29 @@ netaddr = [ {file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"}, {file = "netaddr-0.8.0.tar.gz", hash = "sha256:d6cc57c7a07b1d9d2e917aa8b36ae8ce61c35ba3fcd1b83ca31c5a0ee2b5a243"}, ] -opentracing = [ - {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"}, +opentelemetry-api = [ + {file = "opentelemetry_api-1.13.0-py3-none-any.whl", hash = "sha256:2db1e8713f48a119bae457cd22304a7919d5e57190a380485c442c4f731a46dd"}, + {file = "opentelemetry_api-1.13.0.tar.gz", hash = "sha256:e683e869471b99e77238c8739d6ee2f368803329f3b808dfa86a02d0b519c682"}, +] +opentelemetry-exporter-jaeger = [ + {file = "opentelemetry_exporter_jaeger-1.13.0-py3-none-any.whl", hash = "sha256:ee421cea47a6de3cafb2d5180181aede45fac2b316a31d4e51ac7c24362c7222"}, + {file = "opentelemetry_exporter_jaeger-1.13.0.tar.gz", hash = "sha256:6c80a6eb66697f9a4cbaddfc7194759b0daf3b3d8708a1e6741e8cfa728ced2b"}, +] +opentelemetry-exporter-jaeger-proto-grpc = [ + {file = "opentelemetry_exporter_jaeger_proto_grpc-1.13.0-py3-none-any.whl", hash = "sha256:a3c4738424b91b3430f9e35b1ea08c8907964a00025a53bc52199b151393ee54"}, + {file = "opentelemetry_exporter_jaeger_proto_grpc-1.13.0.tar.gz", hash = "sha256:d3bd172971fb16d2ae7337e645a3ffa5c1a8b3b476fa8609a03f26c772796403"}, +] +opentelemetry-exporter-jaeger-thrift = [ + {file = "opentelemetry_exporter_jaeger_thrift-1.13.0-py3-none-any.whl", hash = "sha256:795f396eaea008359ff195153745947816b3a278b5b8958eb36fe40dc455d920"}, + {file = "opentelemetry_exporter_jaeger_thrift-1.13.0.tar.gz", hash = "sha256:f10865c5efcdf1b53906604c56797f92261f8825fd8a9ddc913167ff053e99cb"}, +] +opentelemetry-sdk = [ + {file = "opentelemetry_sdk-1.13.0-py3-none-any.whl", hash = "sha256:c7b88e06ebedd22c226b374c207792d30b3f34074a6b8ad8c6dad04a8d16326b"}, + {file = "opentelemetry_sdk-1.13.0.tar.gz", hash = "sha256:0eddcacd5a484fe2918116b9a4e31867e3d10322ff8392b1c7b0dae1ac724d48"}, +] +opentelemetry-semantic-conventions = [ + {file = "opentelemetry_semantic_conventions-0.34b0-py3-none-any.whl", hash = "sha256:b236bd027d2d470c5f7f7a466676182c7e02f486db8296caca25fae0649c3fa3"}, + {file = "opentelemetry_semantic_conventions-0.34b0.tar.gz", hash = "sha256:0c88a5d1f45b820272e0c421fd52ff2188b74582b1bab7ba0f57891dc2f31edf"}, ] packaging = [ {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, @@ -2340,6 +2470,30 @@ prometheus-client = [ {file = "prometheus_client-0.15.0-py3-none-any.whl", hash = "sha256:db7c05cbd13a0f79975592d112320f2605a325969b270a94b71dcabc47b931d2"}, {file = "prometheus_client-0.15.0.tar.gz", hash = "sha256:be26aa452490cfcf6da953f9436e95a9f2b4d578ca80094b4458930e5f584ab1"}, ] +protobuf = [ + {file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"}, + {file = "protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e"}, + {file = "protobuf-3.20.3-cp310-cp310-win32.whl", hash = "sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c"}, + {file = "protobuf-3.20.3-cp310-cp310-win_amd64.whl", hash = "sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7"}, + {file = "protobuf-3.20.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469"}, + {file = "protobuf-3.20.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4"}, + {file = "protobuf-3.20.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4"}, + {file = "protobuf-3.20.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454"}, + {file = "protobuf-3.20.3-cp37-cp37m-win32.whl", hash = "sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905"}, + {file = "protobuf-3.20.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c"}, + {file = "protobuf-3.20.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7"}, + {file = "protobuf-3.20.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee"}, + {file = "protobuf-3.20.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050"}, + {file = "protobuf-3.20.3-cp38-cp38-win32.whl", hash = "sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86"}, + {file = "protobuf-3.20.3-cp38-cp38-win_amd64.whl", hash = "sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9"}, + {file = "protobuf-3.20.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b"}, + {file = "protobuf-3.20.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b"}, + {file = "protobuf-3.20.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402"}, + {file = "protobuf-3.20.3-cp39-cp39-win32.whl", hash = "sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480"}, + {file = "protobuf-3.20.3-cp39-cp39-win_amd64.whl", hash = "sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7"}, + {file = "protobuf-3.20.3-py2.py3-none-any.whl", hash = "sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db"}, + {file = "protobuf-3.20.3.tar.gz", hash = "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2"}, +] psycopg2 = [ {file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"}, {file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"}, @@ -2362,12 +2516,34 @@ psycopg2cffi-compat = [ {file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"}, ] pyasn1 = [ + {file = "pyasn1-0.4.8-py2.4.egg", hash = "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"}, + {file = "pyasn1-0.4.8-py2.5.egg", hash = "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf"}, + {file = "pyasn1-0.4.8-py2.6.egg", hash = "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00"}, + {file = "pyasn1-0.4.8-py2.7.egg", hash = "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8"}, {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, + {file = "pyasn1-0.4.8-py3.1.egg", hash = "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86"}, + {file = "pyasn1-0.4.8-py3.2.egg", hash = "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7"}, + {file = "pyasn1-0.4.8-py3.3.egg", hash = "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576"}, + {file = "pyasn1-0.4.8-py3.4.egg", hash = "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12"}, + {file = "pyasn1-0.4.8-py3.5.egg", hash = "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2"}, + {file = "pyasn1-0.4.8-py3.6.egg", hash = "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359"}, + {file = "pyasn1-0.4.8-py3.7.egg", hash = "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776"}, {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, ] pyasn1-modules = [ {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"}, + {file = "pyasn1_modules-0.2.8-py2.4.egg", hash = "sha256:0fe1b68d1e486a1ed5473f1302bd991c1611d319bba158e98b106ff86e1d7199"}, + {file = "pyasn1_modules-0.2.8-py2.5.egg", hash = "sha256:fe0644d9ab041506b62782e92b06b8c68cca799e1a9636ec398675459e031405"}, + {file = "pyasn1_modules-0.2.8-py2.6.egg", hash = "sha256:a99324196732f53093a84c4369c996713eb8c89d360a496b599fb1a9c47fc3eb"}, + {file = "pyasn1_modules-0.2.8-py2.7.egg", hash = "sha256:0845a5582f6a02bb3e1bde9ecfc4bfcae6ec3210dd270522fee602365430c3f8"}, {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"}, + {file = "pyasn1_modules-0.2.8-py3.1.egg", hash = "sha256:f39edd8c4ecaa4556e989147ebf219227e2cd2e8a43c7e7fcb1f1c18c5fd6a3d"}, + {file = "pyasn1_modules-0.2.8-py3.2.egg", hash = "sha256:b80486a6c77252ea3a3e9b1e360bc9cf28eaac41263d173c032581ad2f20fe45"}, + {file = "pyasn1_modules-0.2.8-py3.3.egg", hash = "sha256:65cebbaffc913f4fe9e4808735c95ea22d7a7775646ab690518c056784bc21b4"}, + {file = "pyasn1_modules-0.2.8-py3.4.egg", hash = "sha256:15b7c67fabc7fc240d87fb9aabf999cf82311a6d6fb2c70d00d3d0604878c811"}, + {file = "pyasn1_modules-0.2.8-py3.5.egg", hash = "sha256:426edb7a5e8879f1ec54a1864f16b882c2837bfd06eee62f2c982315ee2473ed"}, + {file = "pyasn1_modules-0.2.8-py3.6.egg", hash = "sha256:cbac4bc38d117f2a49aeedec4407d23e8866ea4ac27ff2cf7fb3e5b570df19e0"}, + {file = "pyasn1_modules-0.2.8-py3.7.egg", hash = "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd"}, ] pycodestyle = [ {file = "pycodestyle-2.9.1-py2.py3-none-any.whl", hash = "sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b"}, @@ -2506,13 +2682,6 @@ pyyaml = [ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, - {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, - {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, @@ -2666,10 +2835,6 @@ sortedcontainers = [ systemd-python = [ {file = "systemd-python-234.tar.gz", hash = "sha256:fd0e44bf70eadae45aadc292cb0a7eb5b0b6372cd1b391228047d33895db83e7"}, ] -threadloop = [ - {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"}, - {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"}, -] thrift = [ {file = "thrift-0.15.0.tar.gz", hash = "sha256:87c8205a71cf8bbb111cb99b1f7495070fbc9cabb671669568854210da5b3e29"}, ] @@ -2677,49 +2842,6 @@ tomli = [ {file = "tomli-1.2.3-py3-none-any.whl", hash = "sha256:e3069e4be3ead9668e21cb9b074cd948f7b3113fd9c8bba083f48247aab8b11c"}, {file = "tomli-1.2.3.tar.gz", hash = "sha256:05b6166bff487dc068d322585c7ea4ef78deed501cc124060e0f238e89a9231f"}, ] -tornado = [ - {file = "tornado-6.1-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:d371e811d6b156d82aa5f9a4e08b58debf97c302a35714f6f45e35139c332e32"}, - {file = "tornado-6.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0d321a39c36e5f2c4ff12b4ed58d41390460f798422c4504e09eb5678e09998c"}, - {file = "tornado-6.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9de9e5188a782be6b1ce866e8a51bc76a0fbaa0e16613823fc38e4fc2556ad05"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:61b32d06ae8a036a6607805e6720ef00a3c98207038444ba7fd3d169cd998910"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:3e63498f680547ed24d2c71e6497f24bca791aca2fe116dbc2bd0ac7f191691b"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:6c77c9937962577a6a76917845d06af6ab9197702a42e1346d8ae2e76b5e3675"}, - {file = "tornado-6.1-cp35-cp35m-win32.whl", hash = "sha256:6286efab1ed6e74b7028327365cf7346b1d777d63ab30e21a0f4d5b275fc17d5"}, - {file = "tornado-6.1-cp35-cp35m-win_amd64.whl", hash = "sha256:fa2ba70284fa42c2a5ecb35e322e68823288a4251f9ba9cc77be04ae15eada68"}, - {file = "tornado-6.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0a00ff4561e2929a2c37ce706cb8233b7907e0cdc22eab98888aca5dd3775feb"}, - {file = "tornado-6.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:748290bf9112b581c525e6e6d3820621ff020ed95af6f17fedef416b27ed564c"}, - {file = "tornado-6.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e385b637ac3acaae8022e7e47dfa7b83d3620e432e3ecb9a3f7f58f150e50921"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:25ad220258349a12ae87ede08a7b04aca51237721f63b1808d39bdb4b2164558"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:65d98939f1a2e74b58839f8c4dab3b6b3c1ce84972ae712be02845e65391ac7c"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:e519d64089b0876c7b467274468709dadf11e41d65f63bba207e04217f47c085"}, - {file = "tornado-6.1-cp36-cp36m-win32.whl", hash = "sha256:b87936fd2c317b6ee08a5741ea06b9d11a6074ef4cc42e031bc6403f82a32575"}, - {file = "tornado-6.1-cp36-cp36m-win_amd64.whl", hash = "sha256:cc0ee35043162abbf717b7df924597ade8e5395e7b66d18270116f8745ceb795"}, - {file = "tornado-6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7250a3fa399f08ec9cb3f7b1b987955d17e044f1ade821b32e5f435130250d7f"}, - {file = "tornado-6.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:ed3ad863b1b40cd1d4bd21e7498329ccaece75db5a5bf58cd3c9f130843e7102"}, - {file = "tornado-6.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:dcef026f608f678c118779cd6591c8af6e9b4155c44e0d1bc0c87c036fb8c8c4"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:70dec29e8ac485dbf57481baee40781c63e381bebea080991893cd297742b8fd"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:d3f7594930c423fd9f5d1a76bee85a2c36fd8b4b16921cae7e965f22575e9c01"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3447475585bae2e77ecb832fc0300c3695516a47d46cefa0528181a34c5b9d3d"}, - {file = "tornado-6.1-cp37-cp37m-win32.whl", hash = "sha256:e7229e60ac41a1202444497ddde70a48d33909e484f96eb0da9baf8dc68541df"}, - {file = "tornado-6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:cb5ec8eead331e3bb4ce8066cf06d2dfef1bfb1b2a73082dfe8a161301b76e37"}, - {file = "tornado-6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:20241b3cb4f425e971cb0a8e4ffc9b0a861530ae3c52f2b0434e6c1b57e9fd95"}, - {file = "tornado-6.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c77da1263aa361938476f04c4b6c8916001b90b2c2fdd92d8d535e1af48fba5a"}, - {file = "tornado-6.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:fba85b6cd9c39be262fcd23865652920832b61583de2a2ca907dbd8e8a8c81e5"}, - {file = "tornado-6.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:1e8225a1070cd8eec59a996c43229fe8f95689cb16e552d130b9793cb570a288"}, - {file = "tornado-6.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d14d30e7f46a0476efb0deb5b61343b1526f73ebb5ed84f23dc794bdb88f9d9f"}, - {file = "tornado-6.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8f959b26f2634a091bb42241c3ed8d3cedb506e7c27b8dd5c7b9f745318ddbb6"}, - {file = "tornado-6.1-cp38-cp38-win32.whl", hash = "sha256:34ca2dac9e4d7afb0bed4677512e36a52f09caa6fded70b4e3e1c89dbd92c326"}, - {file = "tornado-6.1-cp38-cp38-win_amd64.whl", hash = "sha256:6196a5c39286cc37c024cd78834fb9345e464525d8991c21e908cc046d1cc02c"}, - {file = "tornado-6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0ba29bafd8e7e22920567ce0d232c26d4d47c8b5cf4ed7b562b5db39fa199c5"}, - {file = "tornado-6.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:33892118b165401f291070100d6d09359ca74addda679b60390b09f8ef325ffe"}, - {file = "tornado-6.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7da13da6f985aab7f6f28debab00c67ff9cbacd588e8477034c0652ac141feea"}, - {file = "tornado-6.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:e0791ac58d91ac58f694d8d2957884df8e4e2f6687cdf367ef7eb7497f79eaa2"}, - {file = "tornado-6.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:66324e4e1beede9ac79e60f88de548da58b1f8ab4b2f1354d8375774f997e6c0"}, - {file = "tornado-6.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a48900ecea1cbb71b8c71c620dee15b62f85f7c14189bdeee54966fbd9a0c5bd"}, - {file = "tornado-6.1-cp39-cp39-win32.whl", hash = "sha256:d3d20ea5782ba63ed13bc2b8c291a053c8d807a8fa927d941bd718468f7b950c"}, - {file = "tornado-6.1-cp39-cp39-win_amd64.whl", hash = "sha256:548430be2740e327b3fe0201abe471f314741efcb0067ec4f2d7dcfb4825f3e4"}, - {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"}, -] towncrier = [ {file = "towncrier-22.8.0-py2.py3-none-any.whl", hash = "sha256:3b780c3d966e1b26414830aec3d15000654b31e64e024f3e5fd128b4c6eb8f47"}, {file = "towncrier-22.8.0.tar.gz", hash = "sha256:7d3839b033859b45fb55df82b74cfd702431933c0cc9f287a5a7ea3e05d042cb"}, @@ -2804,10 +2926,6 @@ types-jsonschema = [ {file = "types-jsonschema-4.17.0.1.tar.gz", hash = "sha256:62625d492e4930411a431909ac32301aeab6180500e70ee222f81d43204cfb3c"}, {file = "types_jsonschema-4.17.0.1-py3-none-any.whl", hash = "sha256:77badbe3881cbf79ac9561be2be2b1f37ab104b13afd2231840e6dd6e94e63c2"}, ] -types-opentracing = [ - {file = "types-opentracing-2.4.10.tar.gz", hash = "sha256:6101414f3b6d3b9c10f1c510a261e8439b6c8d67c723d5c2872084697b4580a7"}, - {file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"}, -] types-pillow = [ {file = "types-Pillow-9.3.0.1.tar.gz", hash = "sha256:f3b7cada3fa496c78d75253c6b1f07a843d625f42e5639b320a72acaff6f7cfb"}, {file = "types_Pillow-9.3.0.1-py3-none-any.whl", hash = "sha256:79837755fe9659f29efd1016e9903ac4a500e0c73260483f07296bd6ca47668b"}, diff --git a/pyproject.toml b/pyproject.toml index 7695ebc252c4..690e3c07dec7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -200,13 +200,14 @@ authlib = { version = ">=0.15.1", optional = true } systemd-python = { version = ">=231", optional = true } lxml = { version = ">=4.2.0", optional = true } sentry-sdk = { version = ">=0.7.2", optional = true } -opentracing = { version = ">=2.2.0", optional = true } -jaeger-client = { version = ">=4.0.0", optional = true } txredisapi = { version = ">=1.4.7", optional = true } hiredis = { version = "*", optional = true } Pympler = { version = "*", optional = true } parameterized = { version = ">=0.7.4", optional = true } idna = { version = ">=2.5", optional = true } +opentelemetry-api = {version = "^1.12.0", optional = true} +opentelemetry-sdk = {version = "^1.12.0", optional = true} +opentelemetry-exporter-jaeger = {version = "^1.12.0", optional = true} [tool.poetry.extras] # NB: Packages that should be part of `pip install matrix-synapse[all]` need to be specified @@ -221,7 +222,7 @@ oidc = ["authlib"] systemd = ["systemd-python"] url-preview = ["lxml"] sentry = ["sentry-sdk"] -opentracing = ["jaeger-client", "opentracing"] +opentelemetry = ["opentelemetry-api", "opentelemetry-sdk", "opentelemetry-exporter-jaeger"] jwt = ["authlib"] # hiredis is not a *strict* dependency, but it makes things much faster. # (if it is not installed, we fall back to slow code.) @@ -254,8 +255,8 @@ all = [ "lxml", # sentry "sentry-sdk", - # opentracing - "jaeger-client", "opentracing", + # opentelemetry + "opentelemetry-api", "opentelemetry-sdk", "opentelemetry-exporter-jaeger", # redis "txredisapi", "hiredis", # cache-memory @@ -279,7 +280,6 @@ mypy-zope = "*" types-bleach = ">=4.1.0" types-commonmark = ">=0.9.2" types-jsonschema = ">=3.2.0" -types-opentracing = ">=2.4.2" types-Pillow = ">=8.3.4" types-psycopg2 = ">=2.9.9" types-pyOpenSSL = ">=20.0.7" diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 3d7f986ac74a..ae5b04759ac6 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -31,10 +31,10 @@ from synapse.appservice import ApplicationService from synapse.http import get_request_user_agent from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import ( +from synapse.logging.tracing import ( SynapseTags, - active_span, force_tracing, + get_active_span, start_active_span, trace, ) @@ -144,7 +144,7 @@ async def get_user_by_req( is invalid. AuthError if access is denied for the user in the access token """ - parent_span = active_span() + parent_span = get_active_span() with start_active_span("get_user_by_req"): requester = await self._wrapped_get_user_by_req( request, allow_guest, allow_expired @@ -154,25 +154,25 @@ async def get_user_by_req( if requester.authenticated_entity in self._force_tracing_for_users: # request tracing is enabled for this user, so we need to force it # tracing on for the parent span (which will be the servlet span). - # + force_tracing(parent_span) # It's too late for the get_user_by_req span to inherit the setting, # so we also force it on for that. force_tracing() - force_tracing(parent_span) - parent_span.set_tag( + parent_span.set_attribute( "authenticated_entity", requester.authenticated_entity ) + # We tag the Synapse instance name so that it's an easy jumping # off point into the logs. Can also be used to filter for an # instance that is under load. - parent_span.set_tag( + parent_span.set_attribute( SynapseTags.INSTANCE_NAME, self.hs.get_instance_name() ) - parent_span.set_tag("user_id", requester.user.to_string()) + parent_span.set_attribute("user_id", requester.user.to_string()) if requester.device_id is not None: - parent_span.set_tag("device_id", requester.device_id) + parent_span.set_attribute("device_id", requester.device_id) if requester.app_service is not None: - parent_span.set_tag("appservice_id", requester.app_service.id) + parent_span.set_attribute("appservice_id", requester.app_service.id) return requester @cancellable @@ -184,7 +184,7 @@ async def _wrapped_get_user_by_req( ) -> Requester: """Helper for get_user_by_req - Once get_user_by_req has set up the opentracing span, this does the actual work. + Once get_user_by_req has set up the tracing span, this does the actual work. """ try: ip_addr = request.getClientAddress().host diff --git a/synapse/api/constants.py b/synapse/api/constants.py index bc04a0755b0c..8aa646890d95 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -198,6 +198,9 @@ class LimitBlockingTypes: class EventContentFields: """Fields found in events' content, regardless of type.""" + # Synapse internal content field for tracing + TRACING_CONTEXT: Final = "org.matrix.tracing_context" + # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326 LABELS: Final = "org.matrix.labels" diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 41d2732ef96d..ebac950bf21e 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -64,7 +64,7 @@ from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.http.site import SynapseSite from synapse.logging.context import PreserveLoggingContext -from synapse.logging.opentracing import init_tracer +from synapse.logging.tracing import init_tracer from synapse.metrics import install_gc_manager, register_threadpool from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.metrics.jemalloc import setup_jemalloc_stats diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index c19270c6c56c..d67498f50db5 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -24,41 +24,50 @@ class TracerConfig(Config): section = "tracing" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - opentracing_config = config.get("opentracing") - if opentracing_config is None: - opentracing_config = {} + tracing_config = config.get("tracing") + if tracing_config is None: + tracing_config = {} - self.opentracer_enabled = opentracing_config.get("enabled", False) + self.tracing_enabled = tracing_config.get("enabled", False) - self.jaeger_config = opentracing_config.get( - "jaeger_config", - {"sampler": {"type": "const", "param": 1}, "logging": False}, + self.jaeger_exporter_config = tracing_config.get( + "jaeger_exporter_config", + {}, ) self.force_tracing_for_users: Set[str] = set() - if not self.opentracer_enabled: + if not self.tracing_enabled: return - check_requirements("opentracing") + check_requirements("opentelemetry") # The tracer is enabled so sanitize the config - self.opentracer_whitelist: List[str] = opentracing_config.get( + # Default to always sample. Range: [0.0 - 1.0] + self.sample_rate: float = float(tracing_config.get("sample_rate", 1)) + if self.sample_rate < 0.0 or self.sample_rate > 1.0: + raise ConfigError( + "Tracing sample_rate must be in range [0.0, 1.0].", + ("tracing", "sample_rate"), + ) + + self.homeserver_whitelist: List[str] = tracing_config.get( "homeserver_whitelist", [] ) - if not isinstance(self.opentracer_whitelist, list): - raise ConfigError("Tracer homeserver_whitelist config is malformed") - - force_tracing_for_users = opentracing_config.get("force_tracing_for_users", []) - if not isinstance(force_tracing_for_users, list): + if not isinstance(self.homeserver_whitelist, list): raise ConfigError( - "Expected a list", ("opentracing", "force_tracing_for_users") + "Tracing homeserver_whitelist config is malformed", + ("tracing", "homeserver_whitelist"), ) + + force_tracing_for_users = tracing_config.get("force_tracing_for_users", []) + if not isinstance(force_tracing_for_users, list): + raise ConfigError("Expected a list", ("tracing", "force_tracing_for_users")) for i, u in enumerate(force_tracing_for_users): if not isinstance(u, str): raise ConfigError( "Expected a string", - ("opentracing", "force_tracing_for_users", f"index {i}"), + ("tracing", "force_tracing_for_users", f"index {i}"), ) self.force_tracing_for_users.add(u) diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index 23b799ac3250..7c5a21ff4c8b 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -28,7 +28,7 @@ from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.events.utils import prune_event, prune_event_dict -from synapse.logging.opentracing import trace +from synapse.logging.tracing import trace from synapse.types import JsonDict logger = logging.getLogger(__name__) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 623a2c71eadb..c691ef5973e4 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -32,7 +32,7 @@ import synapse from synapse.api.errors import Codes -from synapse.logging.opentracing import trace +from synapse.logging.tracing import trace from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 6bd4742140c4..d5d1b06fa4fc 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -23,7 +23,7 @@ from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import prune_event, validate_canonicaljson from synapse.http.servlet import assert_params_in_dict -from synapse.logging.opentracing import log_kv, trace +from synapse.logging.tracing import log_kv, trace from synapse.types import JsonDict, get_domain_from_id if TYPE_CHECKING: diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c4c0bc7315b4..0615ce087092 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -61,7 +61,7 @@ ) from synapse.federation.transport.client import SendJoinResponse from synapse.http.types import QueryParams -from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace +from synapse.logging.tracing import SynapseTags, log_kv, set_attribute, tag_args, trace from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache @@ -507,19 +507,19 @@ async def get_room_state_ids( state_event_ids = result["pdu_ids"] auth_event_ids = result.get("auth_chain_ids", []) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "state_event_ids", str(state_event_ids), ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "state_event_ids.length", str(len(state_event_ids)), ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "auth_event_ids", str(auth_event_ids), ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "auth_event_ids.length", str(len(auth_event_ids)), ) @@ -626,7 +626,7 @@ async def _check_sigs_and_hash_for_pulled_events_and_fetch( Returns: A list of PDUs that have valid signatures and hashes. """ - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "pdus.length", str(len(pdus)), ) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index bb20af6e91ed..4f4a88b9532d 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -61,12 +61,7 @@ nested_logging_context, run_in_background, ) -from synapse.logging.opentracing import ( - log_kv, - start_active_span_from_edu, - tag_args, - trace, -) +from synapse.logging.tracing import log_kv, start_active_span_from_edu, tag_args, trace from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.replication.http.federation import ( ReplicationFederationSendEduRestServlet, @@ -1449,7 +1444,7 @@ async def on_edu(self, edu_type: str, origin: str, content: dict) -> None: # Check if we have a handler on this instance handler = self.edu_handlers.get(edu_type) if handler: - with start_active_span_from_edu(content, "handle_edu"): + with start_active_span_from_edu("handle_edu", edu_content=content): try: await handler(origin, content) except SynapseError as e: diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 3ae5e8634c34..6575c6c8a582 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -32,7 +32,7 @@ from synapse.federation.units import Edu from synapse.handlers.presence import format_user_presence_state from synapse.logging import issue9533_logger -from synapse.logging.opentracing import SynapseTags, set_tag +from synapse.logging.tracing import SynapseTags, set_attribute from synapse.metrics import sent_transactions_counter from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ReadReceipt @@ -597,7 +597,7 @@ async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int] if not message_id: continue - set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id) + set_attribute(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id) edus = [ Edu( diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 75081810fde3..3f2c8bcfa1d0 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -21,11 +21,13 @@ from synapse.events import EventBase from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction -from synapse.logging.opentracing import ( +from synapse.logging.tracing import ( + Link, + StatusCode, extract_text_map, - set_tag, - start_active_span_follows_from, - tags, + get_span_context_from_context, + set_status, + start_active_span, whitelisted_homeserver, ) from synapse.types import JsonDict @@ -79,7 +81,7 @@ async def send_new_transaction( edus: List of EDUs to send """ - # Make a transaction-sending opentracing span. This span follows on from + # Make a transaction-sending tracing span. This span follows on from # all the edus in that transaction. This needs to be done since there is # no active span here, so if the edus were not received by the remote the # span would have no causality and it would be forgotten. @@ -88,13 +90,20 @@ async def send_new_transaction( keep_destination = whitelisted_homeserver(destination) for edu in edus: - context = edu.get_context() - if context: - span_contexts.append(extract_text_map(json_decoder.decode(context))) + tracing_context_json = edu.get_tracing_context_json() + if tracing_context_json: + context = extract_text_map(json_decoder.decode(tracing_context_json)) + if context: + span_context = get_span_context_from_context(context) + if span_context: + span_contexts.append(span_context) if keep_destination: - edu.strip_context() + edu.strip_tracing_context() - with start_active_span_follows_from("send_transaction", span_contexts): + with start_active_span( + "send_transaction", + links=[Link(span_context) for span_context in span_contexts], + ): logger.debug("TX [%s] _attempt_new_transaction", destination) txn_id = str(self._next_txn_id) @@ -166,7 +175,7 @@ def json_data_cb() -> JsonDict: except HttpResponseException as e: code = e.code - set_tag(tags.ERROR, True) + set_status(StatusCode.ERROR, e) logger.info("TX [%s] {%s} got %d response", destination, txn_id, code) raise diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index cdaf0d5de782..fd927e0af778 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -15,7 +15,6 @@ import functools import logging import re -import time from http import HTTPStatus from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional, Tuple, cast @@ -25,12 +24,15 @@ from synapse.http.servlet import parse_json_object_from_request from synapse.http.site import SynapseRequest from synapse.logging.context import run_in_background -from synapse.logging.opentracing import ( - active_span, - set_tag, - span_context_from_request, +from synapse.logging.tracing import ( + Link, + context_from_request, + create_non_recording_span, + get_active_span, + set_attribute, start_active_span, - start_active_span_follows_from, + start_span, + use_span, whitelisted_homeserver, ) from synapse.types import JsonDict @@ -309,60 +311,71 @@ async def new_func( logger.warning("authenticate_request failed: %s", e) raise - # update the active opentracing span with the authenticated entity - set_tag("authenticated_entity", str(origin)) + # update the active tracing span with the authenticated entity + set_attribute("authenticated_entity", str(origin)) # if the origin is authenticated and whitelisted, use its span context # as the parent. - context = None + origin_context = None if origin and whitelisted_homeserver(origin): - context = span_context_from_request(request) - - if context: - servlet_span = active_span() - # a scope which uses the origin's context as a parent - processing_start_time = time.time() - scope = start_active_span_follows_from( + origin_context = context_from_request(request) + + remote_parent_span = None + if origin_context: + local_servlet_span = get_active_span() + # Create a span which uses the `origin_context` as a parent + # so we can see how the incoming payload was processed while + # we're looking at the outgoing trace. Since the parent is set + # to a remote span (from the origin), it won't show up in the + # local trace which is why we create another span below for the + # local trace. A span can only have one parent so we have to + # create two separate ones. + remote_parent_span = start_span( "incoming-federation-request", - child_of=context, - contexts=(servlet_span,), - start_time=processing_start_time, + context=origin_context, + # Cross-link back to the local trace so we can jump + # to the incoming side from the remote origin trace. + links=[Link(local_servlet_span.get_span_context())] + if local_servlet_span + else None, ) + # Create a local span to appear in the local trace + local_parent_span_cm = start_active_span( + "process-federation-request", + # Cross-link back to the remote outgoing trace so we can + # jump over there. + links=[Link(remote_parent_span.get_span_context())], + ) else: - # just use our context as a parent - scope = start_active_span( - "incoming-federation-request", + # Otherwise just use our local active servlet context as a parent + local_parent_span_cm = start_active_span( + "process-federation-request", ) - try: - with scope: - if origin and self.RATELIMIT: - with ratelimiter.ratelimit(origin) as d: - await d - if request._disconnected: - logger.warning( - "client disconnected before we started processing " - "request" - ) - return None - response = await func( - origin, content, request.args, *args, **kwargs + # Don't need to record anything for the remote because no remote + # trace context given. + remote_parent_span = create_non_recording_span() + + remote_parent_span_cm = use_span(remote_parent_span, end_on_exit=True) + + with remote_parent_span_cm, local_parent_span_cm: + if origin and self.RATELIMIT: + with ratelimiter.ratelimit(origin) as d: + await d + if request._disconnected: + logger.warning( + "client disconnected before we started processing " + "request" ) - else: + return None response = await func( origin, content, request.args, *args, **kwargs ) - finally: - # if we used the origin's context as the parent, add a new span using - # the servlet span as a parent, so that we have a link - if context: - scope2 = start_active_span_follows_from( - "process-federation_request", - contexts=(scope.span,), - start_time=processing_start_time, + else: + response = await func( + origin, content, request.args, *args, **kwargs ) - scope2.close() return response diff --git a/synapse/federation/units.py b/synapse/federation/units.py index b9b12fbea563..a6b590269e43 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -21,6 +21,7 @@ import attr +from synapse.api.constants import EventContentFields from synapse.types import JsonDict logger = logging.getLogger(__name__) @@ -54,11 +55,13 @@ def get_internal_dict(self) -> JsonDict: "destination": self.destination, } - def get_context(self) -> str: - return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}") + def get_tracing_context_json(self) -> str: + return getattr(self, "content", {}).get( + EventContentFields.TRACING_CONTEXT, "{}" + ) - def strip_context(self) -> None: - getattr(self, "content", {})["org.matrix.opentracing_context"] = "{}" + def strip_tracing_context(self) -> None: + getattr(self, "content", {})[EventContentFields.TRACING_CONTEXT] = "{}" def _none_to_list(edus: Optional[List[JsonDict]]) -> List[JsonDict]: diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index c597639a7ff5..2567954679f0 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -36,7 +36,7 @@ RequestSendFailed, SynapseError, ) -from synapse.logging.opentracing import log_kv, set_tag, trace +from synapse.logging.tracing import log_kv, set_attribute, trace from synapse.metrics.background_process_metrics import ( run_as_background_process, wrap_as_background_process, @@ -87,7 +87,7 @@ async def get_devices_by_user(self, user_id: str) -> List[JsonDict]: info on each device """ - set_tag("user_id", user_id) + set_attribute("user_id", user_id) device_map = await self.store.get_devices_by_user(user_id) ips = await self.store.get_last_client_ip_by_device(user_id, device_id=None) @@ -119,8 +119,8 @@ async def get_device(self, user_id: str, device_id: str) -> JsonDict: ips = await self.store.get_last_client_ip_by_device(user_id, device_id) _update_device_from_client_ips(device, ips) - set_tag("device", str(device)) - set_tag("ips", str(ips)) + set_attribute("device", str(device)) + set_attribute("ips", str(ips)) return device @@ -172,8 +172,8 @@ async def get_user_ids_changed( joined a room, that `user_id` may be interested in. """ - set_tag("user_id", user_id) - set_tag("from_token", str(from_token)) + set_attribute("user_id", user_id) + set_attribute("from_token", str(from_token)) now_room_key = self.store.get_room_max_token() room_ids = await self.store.get_rooms_for_user(user_id) @@ -475,8 +475,8 @@ async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: except errors.StoreError as e: if e.code == 404: # no match - set_tag("error", True) - set_tag("reason", "User doesn't have that device id.") + set_attribute("error", True) + set_attribute("reason", "User doesn't have that device id.") else: raise @@ -694,7 +694,7 @@ async def _handle_new_device_update_async(self) -> None: else: return - for user_id, device_id, room_id, stream_id, opentracing_context in rows: + for user_id, device_id, room_id, stream_id, tracing_context in rows: hosts = set() # Ignore any users that aren't ours @@ -720,7 +720,7 @@ async def _handle_new_device_update_async(self) -> None: room_id=room_id, stream_id=stream_id, hosts=hosts, - context=opentracing_context, + context=tracing_context, ) # Notify replication that we've updated the device list stream. @@ -902,8 +902,8 @@ async def incoming_device_list_update( for parsing the EDU and adding to pending updates list. """ - set_tag("origin", origin) - set_tag("edu_content", str(edu_content)) + set_attribute("origin", origin) + set_attribute("edu_content", str(edu_content)) user_id = edu_content.pop("user_id") device_id = edu_content.pop("device_id") stream_id = str(edu_content.pop("stream_id")) # They may come as ints @@ -923,7 +923,7 @@ async def incoming_device_list_update( origin, ) - set_tag("error", True) + set_attribute("error", True) log_kv( { "message": "Got a device list update edu from a user and " @@ -951,7 +951,7 @@ async def incoming_device_list_update( if not room_ids: # We don't share any rooms with this user. Ignore update, as we # probably won't get any further updates. - set_tag("error", True) + set_attribute("error", True) log_kv( { "message": "Got an update from a user for which " @@ -1148,12 +1148,12 @@ async def user_device_resync( # eventually become consistent. return None except FederationDeniedError as e: - set_tag("error", True) + set_attribute("error", True) log_kv({"reason": "FederationDeniedError"}) logger.info(e) return None except Exception as e: - set_tag("error", True) + set_attribute("error", True) log_kv( {"message": "Exception raised by federation request", "exception": e} ) diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 444c08bc2eef..9c9da0cb63e9 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -15,15 +15,15 @@ import logging from typing import TYPE_CHECKING, Any, Dict -from synapse.api.constants import EduTypes, ToDeviceEventTypes +from synapse.api.constants import EduTypes, EventContentFields, ToDeviceEventTypes from synapse.api.errors import SynapseError from synapse.api.ratelimiting import Ratelimiter from synapse.logging.context import run_in_background -from synapse.logging.opentracing import ( +from synapse.logging.tracing import ( SynapseTags, get_active_span_text_map, log_kv, - set_tag, + set_attribute, ) from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet from synapse.types import JsonDict, Requester, StreamKeyType, UserID, get_domain_from_id @@ -217,10 +217,10 @@ async def send_device_message( sender_user_id = requester.user.to_string() message_id = random_string(16) - set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id) + set_attribute(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id) log_kv({"number_of_to_device_messages": len(messages)}) - set_tag("sender", sender_user_id) + set_attribute("sender", sender_user_id) local_messages = {} remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} for user_id, by_device in messages.items(): @@ -273,7 +273,7 @@ async def send_device_message( "sender": sender_user_id, "type": message_type, "message_id": message_id, - "org.matrix.opentracing_context": json_encoder.encode(context), + EventContentFields.TRACING_CONTEXT: json_encoder.encode(context), } # Add messages to the database. diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index bf1221f523cd..5f84f1769bef 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -28,7 +28,7 @@ from synapse.api.constants import EduTypes from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace +from synapse.logging.tracing import log_kv, set_attribute, tag_args, trace from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet from synapse.types import ( JsonDict, @@ -141,8 +141,8 @@ async def query_devices( else: remote_queries[user_id] = device_ids - set_tag("local_key_query", str(local_query)) - set_tag("remote_key_query", str(remote_queries)) + set_attribute("local_key_query", str(local_query)) + set_attribute("remote_key_query", str(remote_queries)) # First get local devices. # A map of destination -> failure response. @@ -378,8 +378,8 @@ async def _query_devices_for_destination( except Exception as e: failure = _exception_to_failure(e) failures[destination] = failure - set_tag("error", True) - set_tag("reason", str(failure)) + set_attribute("error", True) + set_attribute("reason", str(failure)) return @@ -447,7 +447,7 @@ async def query_local_devices( Returns: A map from user_id -> device_id -> device details """ - set_tag("local_query", str(query)) + set_attribute("local_query", str(query)) local_query: List[Tuple[str, Optional[str]]] = [] result_dict: Dict[str, Dict[str, dict]] = {} @@ -462,7 +462,7 @@ async def query_local_devices( "user_id": user_id, } ) - set_tag("error", True) + set_attribute("error", True) raise SynapseError(400, "Not a user here") if not device_ids: @@ -543,8 +543,8 @@ async def claim_one_time_keys( domain = get_domain_from_id(user_id) remote_queries.setdefault(domain, {})[user_id] = one_time_keys - set_tag("local_key_query", str(local_query)) - set_tag("remote_key_query", str(remote_queries)) + set_attribute("local_key_query", str(local_query)) + set_attribute("remote_key_query", str(remote_queries)) results = await self.store.claim_e2e_one_time_keys(local_query) @@ -560,7 +560,7 @@ async def claim_one_time_keys( @trace async def claim_client_keys(destination: str) -> None: - set_tag("destination", destination) + set_attribute("destination", destination) device_keys = remote_queries[destination] try: remote_result = await self.federation.claim_client_keys( @@ -573,8 +573,8 @@ async def claim_client_keys(destination: str) -> None: except Exception as e: failure = _exception_to_failure(e) failures[destination] = failure - set_tag("error", True) - set_tag("reason", str(failure)) + set_attribute("error", True) + set_attribute("reason", str(failure)) await make_deferred_yieldable( defer.gatherResults( @@ -677,7 +677,7 @@ async def upload_keys_for_user( result = await self.store.count_e2e_one_time_keys(user_id, device_id) - set_tag("one_time_key_counts", str(result)) + set_attribute("one_time_key_counts", str(result)) return {"one_time_key_counts": result} async def _upload_one_time_keys_for_user( diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 83f53ceb8891..098288f058f8 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -25,7 +25,7 @@ StoreError, SynapseError, ) -from synapse.logging.opentracing import log_kv, trace +from synapse.logging.tracing import log_kv, trace from synapse.storage.databases.main.e2e_room_keys import RoomKey from synapse.types import JsonDict from synapse.util.async_helpers import Linearizer diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index d92582fd5c77..99c715d75636 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -61,7 +61,7 @@ from synapse.federation.federation_client import InvalidResponseError from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import nested_logging_context -from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace +from synapse.logging.tracing import SynapseTags, set_attribute, tag_args, trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.module_api import NOT_SPAM from synapse.replication.http.federation import ( @@ -401,11 +401,11 @@ async def _maybe_backfill_inner( logger.debug( "_maybe_backfill_inner: extremities_to_request %s", extremities_to_request ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "extremities_to_request", str(extremities_to_request), ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "extremities_to_request.length", str(len(extremities_to_request)), ) diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index f7223b03c364..8357f6f84e83 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -60,9 +60,9 @@ from synapse.events.snapshot import EventContext from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo from synapse.logging.context import nested_logging_context -from synapse.logging.opentracing import ( +from synapse.logging.tracing import ( SynapseTags, - set_tag, + set_attribute, start_active_span, tag_args, trace, @@ -774,15 +774,15 @@ async def _process_pulled_events( backfilled: True if this is part of a historical batch of events (inhibits notification to clients, and validation of device keys.) """ - set_tag( + set_attribute( SynapseTags.FUNC_ARG_PREFIX + "event_ids", str([event.event_id for event in events]), ) - set_tag( + set_attribute( SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", str(len(events)), ) - set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled)) + set_attribute(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled)) logger.debug( "processing pulled backfilled=%s events=%s", backfilled, @@ -1151,19 +1151,19 @@ async def _get_state_ids_after_missing_prev_event( missing_event_ids = missing_desired_event_ids | missing_auth_event_ids - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "missing_auth_event_ids", str(missing_auth_event_ids), ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "missing_auth_event_ids.length", str(len(missing_auth_event_ids)), ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "missing_desired_event_ids", str(missing_desired_event_ids), ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "missing_desired_event_ids.length", str(len(missing_desired_event_ids)), ) @@ -1240,11 +1240,11 @@ async def _get_state_ids_after_missing_prev_event( event_id, failed_to_fetch, ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "failed_to_fetch", str(failed_to_fetch), ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "failed_to_fetch.length", str(len(failed_to_fetch)), ) @@ -1626,11 +1626,11 @@ async def _auth_and_persist_outliers( event_map = {event.event_id: event for event in events} event_ids = event_map.keys() - set_tag( + set_attribute( SynapseTags.FUNC_ARG_PREFIX + "event_ids", str(event_ids), ) - set_tag( + set_attribute( SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", str(len(event_ids)), ) @@ -1790,11 +1790,11 @@ async def _check_event_auth( claimed_auth_events = await self._load_or_fetch_auth_events_for_event( origin, event ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "claimed_auth_events", str([ev.event_id for ev in claimed_auth_events]), ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "claimed_auth_events.length", str(len(claimed_auth_events)), ) @@ -2244,11 +2244,11 @@ async def persist_events_and_notify( if not backfilled: # Never notify for backfilled events with start_active_span("notify_persisted_events"): - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "event_ids", str([ev.event_id for ev in events]), ) - set_tag( + set_attribute( SynapseTags.RESULT_PREFIX + "event_ids.length", str(len(events)), ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 4cf593cfdcbc..f2a0101733ab 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -52,7 +52,7 @@ from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator from synapse.handlers.directory import DirectoryHandler -from synapse.logging import opentracing +from synapse.logging import tracing from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_event import ReplicationSendEventRestServlet @@ -1462,9 +1462,10 @@ async def _persist_events( a room that has been un-partial stated. """ - await self._bulk_push_rule_evaluator.action_for_events_by_user( - events_and_context - ) + with tracing.start_active_span("calculate_push_actions"): + await self._bulk_push_rule_evaluator.action_for_events_by_user( + events_and_context + ) try: # If we're a worker we need to hit out to the master. @@ -1562,7 +1563,7 @@ async def cache_joined_hosts_for_events( state = await state_entry.get_state( self._storage_controllers.state, StateFilter.all() ) - with opentracing.start_active_span("get_joined_hosts"): + with tracing.start_active_span("get_joined_hosts"): joined_hosts = await self.store.get_joined_hosts( event.room_id, state, state_entry ) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index a4ca9cb8b4cb..fcb85723481e 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -24,7 +24,7 @@ from synapse.api.filtering import Filter from synapse.events.utils import SerializeEventConfig from synapse.handlers.room import ShutdownRoomResponse -from synapse.logging.opentracing import trace +from synapse.logging.tracing import trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.admin._base import assert_user_is_admin from synapse.storage.state import StateFilter diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 8e71dda970c0..2c990aab0a21 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -20,7 +20,7 @@ from synapse.api.constants import EventTypes, RelationTypes from synapse.api.errors import SynapseError from synapse.events import EventBase, relation_from_event -from synapse.logging.opentracing import trace +from synapse.logging.tracing import trace from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, Requester, StreamToken, UserID diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 6ad2b38b8f96..2ebd2e6eb772 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -32,7 +32,7 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN -from synapse.logging import opentracing +from synapse.logging import tracing from synapse.module_api import NOT_SPAM from synapse.storage.state import StateFilter from synapse.types import ( @@ -433,7 +433,7 @@ async def _local_membership_update( await self._join_rate_per_room_limiter.ratelimit( requester, key=room_id, update=False ) - with opentracing.start_active_span("handle_new_client_event"): + with tracing.start_active_span("handle_new_client_event"): result_event = await self.event_creation_handler.handle_new_client_event( requester, events_and_context=[(event, context)], @@ -571,7 +571,7 @@ async def update_membership( # by application services), and then by room ID. async with self.member_as_limiter.queue(as_id): async with self.member_linearizer.queue(key): - with opentracing.start_active_span("update_membership_locked"): + with tracing.start_active_span("update_membership_locked"): result = await self.update_membership_locked( requester, target, diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 259456b55dab..c00f30518ad2 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -38,7 +38,12 @@ from synapse.events import EventBase from synapse.handlers.relations import BundledAggregations from synapse.logging.context import current_context -from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span +from synapse.logging.tracing import ( + SynapseTags, + log_kv, + set_attribute, + start_active_span, +) from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.databases.main.event_push_actions import RoomNotifCounts from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary @@ -406,12 +411,12 @@ async def current_sync_for_user( indoctrination. """ with start_active_span("sync.current_sync_for_user"): - log_kv({"since_token": since_token}) + log_kv({"since_token": str(since_token)}) sync_result = await self.generate_sync_result( sync_config, since_token, full_state ) - set_tag(SynapseTags.SYNC_RESULT, bool(sync_result)) + set_attribute(SynapseTags.SYNC_RESULT, bool(sync_result)) return sync_result async def push_rules_for_user(self, user: UserID) -> Dict[str, Dict[str, list]]: @@ -1319,7 +1324,7 @@ async def generate_sync_result( # to query up to a given point. # Always use the `now_token` in `SyncResultBuilder` now_token = self.event_sources.get_current_token() - log_kv({"now_token": now_token}) + log_kv({"now_token": str(now_token)}) # Since we fetched the users room list before the token, there's a small window # during which membership events may have been persisted, so we fetch these now @@ -1607,7 +1612,7 @@ async def _generate_sync_entry_for_to_device( # `/sync` message_id = message.pop("message_id", None) if message_id: - set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id) + set_attribute(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id) logger.debug( "Returning %d to-device messages between %d and %d (current token: %d)", @@ -2262,13 +2267,13 @@ async def _generate_room_entry( upto_token = room_builder.upto_token with start_active_span("sync.generate_room_entry"): - set_tag("room_id", room_id) + set_attribute("room_id", room_id) log_kv({"events": len(events or ())}) log_kv( { - "since_token": since_token, - "upto_token": upto_token, + "since_token": str(since_token), + "upto_token": str(upto_token), } ) @@ -2283,7 +2288,7 @@ async def _generate_room_entry( log_kv( { "batch_events": len(batch.events), - "prev_batch": batch.prev_batch, + "prev_batch": str(batch.prev_batch), "batch_limited": batch.limited, } ) diff --git a/synapse/http/client.py b/synapse/http/client.py index 4eb740c04020..a7f93a298979 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -74,7 +74,13 @@ from synapse.http.proxyagent import ProxyAgent from synapse.http.types import QueryParams from synapse.logging.context import make_deferred_yieldable -from synapse.logging.opentracing import set_tag, start_active_span, tags +from synapse.logging.tracing import ( + SpanAttributes, + SpanKind, + StatusCode, + set_status, + start_active_span, +) from synapse.types import ISynapseReactor from synapse.util import json_decoder from synapse.util.async_helpers import timeout_deferred @@ -416,12 +422,11 @@ async def request( with start_active_span( "outgoing-client-request", - tags={ - tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, - tags.HTTP_METHOD: method, - tags.HTTP_URL: uri, + kind=SpanKind.CLIENT, + attributes={ + SpanAttributes.HTTP_METHOD: method, + SpanAttributes.HTTP_URL: uri, }, - finish_on_close=True, ): try: body_producer = None @@ -473,8 +478,7 @@ async def request( type(e).__name__, e.args[0], ) - set_tag(tags.ERROR, True) - set_tag("error_reason", e.args[0]) + set_status(StatusCode.ERROR, e) raise async def post_urlencoded_get_json( diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index b92f1d3d1af5..c35a5a12d59e 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -72,9 +72,14 @@ ) from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.http.types import QueryParams -from synapse.logging import opentracing +from synapse.logging import tracing from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.logging.opentracing import set_tag, start_active_span, tags +from synapse.logging.tracing import ( + SpanAttributes, + SpanKind, + set_attribute, + start_active_span, +) from synapse.types import JsonDict from synapse.util import json_decoder from synapse.util.async_helpers import AwakenableSleeper, timeout_deferred @@ -517,18 +522,19 @@ async def _send_request( scope = start_active_span( "outgoing-federation-request", - tags={ - tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, - tags.PEER_ADDRESS: request.destination, - tags.HTTP_METHOD: request.method, - tags.HTTP_URL: request.path, + kind=SpanKind.CLIENT, + attributes={ + SpanAttributes.HTTP_HOST: request.destination, + SpanAttributes.HTTP_METHOD: request.method, + SpanAttributes.HTTP_URL: request.path, }, - finish_on_close=True, ) # Inject the span into the headers headers_dict: Dict[bytes, List[bytes]] = {} - opentracing.inject_header_dict(headers_dict, request.destination) + tracing.inject_active_tracing_context_into_header_dict( + headers_dict, request.destination + ) headers_dict[b"User-Agent"] = [self.version_string_bytes] @@ -614,7 +620,7 @@ async def _send_request( request.method, response.code ).inc() - set_tag(tags.HTTP_STATUS_CODE, response.code) + set_attribute(SpanAttributes.HTTP_STATUS_CODE, response.code) response_phrase = response.phrase.decode("ascii", errors="replace") if 200 <= response.code < 300: diff --git a/synapse/http/server.py b/synapse/http/server.py index 051a1899a0e1..69e7147a2d4b 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -61,14 +61,14 @@ from synapse.config.homeserver import HomeServerConfig from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background -from synapse.logging.opentracing import active_span, start_active_span, trace_servlet +from synapse.logging.tracing import get_active_span, start_active_span, trace_servlet from synapse.util import json_encoder from synapse.util.caches import intern_dict from synapse.util.cancellation import is_function_cancellable from synapse.util.iterutils import chunk_seq if TYPE_CHECKING: - import opentracing + import opentelemetry from synapse.server import HomeServer @@ -268,7 +268,7 @@ def register_paths( subsequent arguments will be any matched groups from the regex. This should return either tuple of (code, response), or None. servlet_classname: The name of the handler to be used in prometheus - and opentracing logs. + and tracing logs. """ @@ -279,7 +279,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): requests by method, or override `_async_render` to handle all requests. Args: - extract_context: Whether to attempt to extract the opentracing + extract_context: Whether to attempt to extract the tracing context from the request the servlet is handling. """ @@ -449,7 +449,7 @@ def register_paths( callback: The handler for the request. Usually a Servlet servlet_classname: The name of the handler to be used in prometheus - and opentracing logs. + and tracing logs. """ method_bytes = method.encode("utf-8") @@ -817,19 +817,19 @@ async def _async_write_json_to_request_in_thread( expensive. """ - def encode(opentracing_span: "Optional[opentracing.Span]") -> bytes: + def encode(tracing_span: Optional["opentelemetry.trace.Span"]) -> bytes: # it might take a while for the threadpool to schedule us, so we write - # opentracing logs once we actually get scheduled, so that we can see how + # tracing logs once we actually get scheduled, so that we can see how # much that contributed. - if opentracing_span: - opentracing_span.log_kv({"event": "scheduled"}) + if tracing_span: + tracing_span.add_event("scheduled", attributes={"event": "scheduled"}) res = json_encoder(json_object) - if opentracing_span: - opentracing_span.log_kv({"event": "encoded"}) + if tracing_span: + tracing_span.add_event("scheduled", attributes={"event": "encoded"}) return res with start_active_span("encode_json_response"): - span = active_span() + span = get_active_span() json_str = await defer_to_thread(request.reactor, encode, span) _write_bytes_to_request(request, json_str) diff --git a/synapse/http/site.py b/synapse/http/site.py index 6a1dbf7f33b6..2de618e46aca 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -37,7 +37,7 @@ from synapse.types import Requester if TYPE_CHECKING: - import opentracing + import opentelemetry logger = logging.getLogger(__name__) @@ -88,9 +88,9 @@ def __init__( # server name, for client requests this is the Requester object. self._requester: Optional[Union[Requester, str]] = None - # An opentracing span for this request. Will be closed when the request is + # An tracing span for this request. Will be closed when the request is # completely processed. - self._opentracing_span: "Optional[opentracing.Span]" = None + self._tracing_span: Optional["opentelemetry.trace.Span"] = None # we can't yet create the logcontext, as we don't know the method. self.logcontext: Optional[LoggingContext] = None @@ -167,12 +167,12 @@ def requester(self, value: Union[Requester, str]) -> None: # If there's no authenticated entity, it was the requester. self.logcontext.request.authenticated_entity = authenticated_entity or requester - def set_opentracing_span(self, span: "opentracing.Span") -> None: - """attach an opentracing span to this request + def set_tracing_span(self, span: "opentelemetry.trace.Span") -> None: + """attach an tracing span to this request Doing so will cause the span to be closed when we finish processing the request """ - self._opentracing_span = span + self._tracing_span = span def get_request_id(self) -> str: request_id_value = None @@ -319,8 +319,10 @@ async def handle_request(request): self._processing_finished_time = time.time() self._is_processing = False - if self._opentracing_span: - self._opentracing_span.log_kv({"event": "finished processing"}) + if self._tracing_span: + self._tracing_span.add_event( + "finished processing", attributes={"event": "finished processing"} + ) # if we've already sent the response, log it now; otherwise, we wait for the # response to be sent. @@ -335,8 +337,10 @@ def finish(self) -> None: """ self.finish_time = time.time() Request.finish(self) - if self._opentracing_span: - self._opentracing_span.log_kv({"event": "response sent"}) + if self._tracing_span: + self._tracing_span.add_event( + "response sent", attributes={"event": "response sent"} + ) if not self._is_processing: assert self.logcontext is not None with PreserveLoggingContext(self.logcontext): @@ -371,9 +375,13 @@ def connectionLost(self, reason: Union[Failure, Exception]) -> None: with PreserveLoggingContext(self.logcontext): logger.info("Connection from client lost before response was sent") - if self._opentracing_span: - self._opentracing_span.log_kv( - {"event": "client connection lost", "reason": str(reason.value)} + if self._tracing_span: + self._tracing_span.add_event( + "client connection lost", + attributes={ + "event": "client connection lost", + "reason": str(reason.value), + }, ) if self._is_processing: @@ -481,9 +489,9 @@ def _finished_processing(self) -> None: usage.evt_db_fetch_count, ) - # complete the opentracing span, if any. - if self._opentracing_span: - self._opentracing_span.finish() + # complete the tracing span, if any. + if self._tracing_span: + self._tracing_span.end() try: self.request_metrics.stop(self.finish_time, self.code, self.sentLength) diff --git a/synapse/logging/context.py b/synapse/logging/context.py index f62bea968fe4..56243aa5e71e 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -46,7 +46,6 @@ from twisted.python.threadpool import ThreadPool if TYPE_CHECKING: - from synapse.logging.scopecontextmanager import _LogContextScope from synapse.types import ISynapseReactor logger = logging.getLogger(__name__) @@ -220,14 +219,13 @@ class ContextRequest: class _Sentinel: """Sentinel to represent the root context""" - __slots__ = ["previous_context", "finished", "request", "scope", "tag"] + __slots__ = ["previous_context", "finished", "request", "tag"] def __init__(self) -> None: # Minimal set for compatibility with LoggingContext self.previous_context = None self.finished = False self.request = None - self.scope = None self.tag = None def __str__(self) -> str: @@ -280,7 +278,6 @@ class LoggingContext: "finished", "request", "tag", - "scope", ] def __init__( @@ -301,7 +298,6 @@ def __init__( self.main_thread = get_thread_id() self.request = None self.tag = "" - self.scope: Optional["_LogContextScope"] = None # keep track of whether we have hit the __exit__ block for this context # (suggesting that the the thing that created the context thinks it should @@ -314,9 +310,6 @@ def __init__( # we track the current request_id self.request = self.parent_context.request - # we also track the current scope: - self.scope = self.parent_context.scope - if request is not None: # the request param overrides the request from the parent context self.request = request diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py deleted file mode 100644 index b69060854f07..000000000000 --- a/synapse/logging/opentracing.py +++ /dev/null @@ -1,1055 +0,0 @@ -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# NOTE -# This is a small wrapper around opentracing because opentracing is not currently -# packaged downstream (specifically debian). Since opentracing instrumentation is -# fairly invasive it was awkward to make it optional. As a result we opted to encapsulate -# all opentracing state in these methods which effectively noop if opentracing is -# not present. We should strongly consider encouraging the downstream distributers -# to package opentracing and making opentracing a full dependency. In order to facilitate -# this move the methods have work very similarly to opentracing's and it should only -# be a matter of few regexes to move over to opentracing's access patterns proper. - -""" -============================ -Using OpenTracing in Synapse -============================ - -Python-specific tracing concepts are at https://opentracing.io/guides/python/. -Note that Synapse wraps OpenTracing in a small module (this one) in order to make the -OpenTracing dependency optional. That means that the access patterns are -different to those demonstrated in the OpenTracing guides. However, it is -still useful to know, especially if OpenTracing is included as a full dependency -in the future or if you are modifying this module. - - -OpenTracing is encapsulated so that -no span objects from OpenTracing are exposed in Synapse's code. This allows -OpenTracing to be easily disabled in Synapse and thereby have OpenTracing as -an optional dependency. This does however limit the number of modifiable spans -at any point in the code to one. From here out references to `opentracing` -in the code snippets refer to the Synapses module. -Most methods provided in the module have a direct correlation to those provided -by opentracing. Refer to docs there for a more in-depth documentation on some of -the args and methods. - -Tracing -------- - -In Synapse it is not possible to start a non-active span. Spans can be started -using the ``start_active_span`` method. This returns a scope (see -OpenTracing docs) which is a context manager that needs to be entered and -exited. This is usually done by using ``with``. - -.. code-block:: python - - from synapse.logging.opentracing import start_active_span - - with start_active_span("operation name"): - # Do something we want to tracer - -Forgetting to enter or exit a scope will result in some mysterious and grievous log -context errors. - -At anytime where there is an active span ``opentracing.set_tag`` can be used to -set a tag on the current active span. - -Tracing functions ------------------ - -Functions can be easily traced using decorators. The name of -the function becomes the operation name for the span. - -.. code-block:: python - - from synapse.logging.opentracing import trace - - # Start a span using 'interesting_function' as the operation name - @trace - def interesting_function(*args, **kwargs): - # Does all kinds of cool and expected things - return something_usual_and_useful - - -Operation names can be explicitly set for a function by using ``trace_with_opname``: - -.. code-block:: python - - from synapse.logging.opentracing import trace_with_opname - - @trace_with_opname("a_better_operation_name") - def interesting_badly_named_function(*args, **kwargs): - # Does all kinds of cool and expected things - return something_usual_and_useful - -Setting Tags ------------- - -To set a tag on the active span do - -.. code-block:: python - - from synapse.logging.opentracing import set_tag - - set_tag(tag_name, tag_value) - -There's a convenient decorator to tag all the args of the method. It uses -inspection in order to use the formal parameter names prefixed with 'ARG_' as -tag names. It uses kwarg names as tag names without the prefix. - -.. code-block:: python - - from synapse.logging.opentracing import tag_args - - @tag_args - def set_fates(clotho, lachesis, atropos, father="Zues", mother="Themis"): - pass - - set_fates("the story", "the end", "the act") - # This will have the following tags - # - ARG_clotho: "the story" - # - ARG_lachesis: "the end" - # - ARG_atropos: "the act" - # - father: "Zues" - # - mother: "Themis" - -Contexts and carriers ---------------------- - -There are a selection of wrappers for injecting and extracting contexts from -carriers provided. Unfortunately OpenTracing's three context injection -techniques are not adequate for our inject of OpenTracing span-contexts into -Twisted's http headers, EDU contents and our database tables. Also note that -the binary encoding format mandated by OpenTracing is not actually implemented -by jaeger_client v4.0.0 - it will silently noop. -Please refer to the end of ``logging/opentracing.py`` for the available -injection and extraction methods. - -Homeserver whitelisting ------------------------ - -Most of the whitelist checks are encapsulated in the modules's injection -and extraction method but be aware that using custom carriers or crossing -unchartered waters will require the enforcement of the whitelist. -``logging/opentracing.py`` has a ``whitelisted_homeserver`` method which takes -in a destination and compares it to the whitelist. - -Most injection methods take a 'destination' arg. The context will only be injected -if the destination matches the whitelist or the destination is None. - -======= -Gotchas -======= - -- Checking whitelists on span propagation -- Inserting pii -- Forgetting to enter or exit a scope -- Span source: make sure that the span you expect to be active across a - function call really will be that one. Does the current function have more - than one caller? Will all of those calling functions have be in a context - with an active span? -""" -import contextlib -import enum -import inspect -import logging -import re -from functools import wraps -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Collection, - ContextManager, - Dict, - Generator, - Iterable, - List, - Optional, - Pattern, - Type, - TypeVar, - Union, - cast, - overload, -) - -import attr -from typing_extensions import ParamSpec - -from twisted.internet import defer -from twisted.web.http import Request -from twisted.web.http_headers import Headers - -from synapse.config import ConfigError -from synapse.util import json_decoder, json_encoder - -if TYPE_CHECKING: - from synapse.http.site import SynapseRequest - from synapse.server import HomeServer - -# Helper class - -# Matches the number suffix in an instance name like "matrix.org client_reader-8" -STRIP_INSTANCE_NUMBER_SUFFIX_REGEX = re.compile(r"[_-]?\d+$") - - -class _DummyTagNames: - """wrapper of opentracings tags. We need to have them if we - want to reference them without opentracing around. Clearly they - should never actually show up in a trace. `set_tags` overwrites - these with the correct ones.""" - - INVALID_TAG = "invalid-tag" - COMPONENT = INVALID_TAG - DATABASE_INSTANCE = INVALID_TAG - DATABASE_STATEMENT = INVALID_TAG - DATABASE_TYPE = INVALID_TAG - DATABASE_USER = INVALID_TAG - ERROR = INVALID_TAG - HTTP_METHOD = INVALID_TAG - HTTP_STATUS_CODE = INVALID_TAG - HTTP_URL = INVALID_TAG - MESSAGE_BUS_DESTINATION = INVALID_TAG - PEER_ADDRESS = INVALID_TAG - PEER_HOSTNAME = INVALID_TAG - PEER_HOST_IPV4 = INVALID_TAG - PEER_HOST_IPV6 = INVALID_TAG - PEER_PORT = INVALID_TAG - PEER_SERVICE = INVALID_TAG - SAMPLING_PRIORITY = INVALID_TAG - SERVICE = INVALID_TAG - SPAN_KIND = INVALID_TAG - SPAN_KIND_CONSUMER = INVALID_TAG - SPAN_KIND_PRODUCER = INVALID_TAG - SPAN_KIND_RPC_CLIENT = INVALID_TAG - SPAN_KIND_RPC_SERVER = INVALID_TAG - - -try: - import opentracing - import opentracing.tags - - tags = opentracing.tags -except ImportError: - opentracing = None # type: ignore[assignment] - tags = _DummyTagNames # type: ignore[assignment] -try: - from jaeger_client import Config as JaegerConfig - - from synapse.logging.scopecontextmanager import LogContextScopeManager -except ImportError: - JaegerConfig = None # type: ignore - LogContextScopeManager = None # type: ignore - - -try: - from rust_python_jaeger_reporter import Reporter - - # jaeger-client 4.7.0 requires that reporters inherit from BaseReporter, which - # didn't exist before that version. - try: - from jaeger_client.reporter import BaseReporter - except ImportError: - - class BaseReporter: # type: ignore[no-redef] - pass - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class _WrappedRustReporter(BaseReporter): - """Wrap the reporter to ensure `report_span` never throws.""" - - _reporter: Reporter = attr.Factory(Reporter) - - def set_process(self, *args: Any, **kwargs: Any) -> None: - return self._reporter.set_process(*args, **kwargs) - - def report_span(self, span: "opentracing.Span") -> None: - try: - return self._reporter.report_span(span) - except Exception: - logger.exception("Failed to report span") - - RustReporter: Optional[Type[_WrappedRustReporter]] = _WrappedRustReporter -except ImportError: - RustReporter = None - - -logger = logging.getLogger(__name__) - - -class SynapseTags: - # The message ID of any to_device message processed - TO_DEVICE_MESSAGE_ID = "to_device.message_id" - - # Whether the sync response has new data to be returned to the client. - SYNC_RESULT = "sync.new_data" - - INSTANCE_NAME = "instance_name" - - # incoming HTTP request ID (as written in the logs) - REQUEST_ID = "request_id" - - # HTTP request tag (used to distinguish full vs incremental syncs, etc) - REQUEST_TAG = "request_tag" - - # Text description of a database transaction - DB_TXN_DESC = "db.txn_desc" - - # Uniqueish ID of a database transaction - DB_TXN_ID = "db.txn_id" - - # The name of the external cache - CACHE_NAME = "cache.name" - - # Used to tag function arguments - # - # Tag a named arg. The name of the argument should be appended to this prefix. - FUNC_ARG_PREFIX = "ARG." - # Tag extra variadic number of positional arguments (`def foo(first, second, *extras)`) - FUNC_ARGS = "args" - # Tag keyword args - FUNC_KWARGS = "kwargs" - - # Some intermediate result that's interesting to the function. The label for - # the result should be appended to this prefix. - RESULT_PREFIX = "RESULT." - - -class SynapseBaggage: - FORCE_TRACING = "synapse-force-tracing" - - -# Block everything by default -# A regex which matches the server_names to expose traces for. -# None means 'block everything'. -_homeserver_whitelist: Optional[Pattern[str]] = None - -# Util methods - - -class _Sentinel(enum.Enum): - # defining a sentinel in this way allows mypy to correctly handle the - # type of a dictionary lookup. - sentinel = object() - - -P = ParamSpec("P") -R = TypeVar("R") -T = TypeVar("T") - - -def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]: - """Executes the function only if we're tracing. Otherwise returns None.""" - - @wraps(func) - def _only_if_tracing_inner(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: - if opentracing: - return func(*args, **kwargs) - else: - return None - - return _only_if_tracing_inner - - -@overload -def ensure_active_span( - message: str, -) -> Callable[[Callable[P, R]], Callable[P, Optional[R]]]: - ... - - -@overload -def ensure_active_span( - message: str, ret: T -) -> Callable[[Callable[P, R]], Callable[P, Union[T, R]]]: - ... - - -def ensure_active_span( - message: str, ret: Optional[T] = None -) -> Callable[[Callable[P, R]], Callable[P, Union[Optional[T], R]]]: - """Executes the operation only if opentracing is enabled and there is an active span. - If there is no active span it logs message at the error level. - - Args: - message: Message which fills in "There was no active span when trying to %s" - in the error log if there is no active span and opentracing is enabled. - ret: return value if opentracing is None or there is no active span. - - Returns: - The result of the func, falling back to ret if opentracing is disabled or there - was no active span. - """ - - def ensure_active_span_inner_1( - func: Callable[P, R] - ) -> Callable[P, Union[Optional[T], R]]: - @wraps(func) - def ensure_active_span_inner_2( - *args: P.args, **kwargs: P.kwargs - ) -> Union[Optional[T], R]: - if not opentracing: - return ret - - if not opentracing.tracer.active_span: - logger.error( - "There was no active span when trying to %s." - " Did you forget to start one or did a context slip?", - message, - stack_info=True, - ) - - return ret - - return func(*args, **kwargs) - - return ensure_active_span_inner_2 - - return ensure_active_span_inner_1 - - -# Setup - - -def init_tracer(hs: "HomeServer") -> None: - """Set the whitelists and initialise the JaegerClient tracer""" - global opentracing - if not hs.config.tracing.opentracer_enabled: - # We don't have a tracer - opentracing = None # type: ignore[assignment] - return - - if not opentracing or not JaegerConfig: - raise ConfigError( - "The server has been configured to use opentracing but opentracing is not " - "installed." - ) - - # Pull out the jaeger config if it was given. Otherwise set it to something sensible. - # See https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/config.py - - set_homeserver_whitelist(hs.config.tracing.opentracer_whitelist) - - from jaeger_client.metrics.prometheus import PrometheusMetricsFactory - - # Instance names are opaque strings but by stripping off the number suffix, - # we can get something that looks like a "worker type", e.g. - # "client_reader-1" -> "client_reader" so we don't spread the traces across - # so many services. - instance_name_by_type = re.sub( - STRIP_INSTANCE_NUMBER_SUFFIX_REGEX, "", hs.get_instance_name() - ) - - config = JaegerConfig( - config=hs.config.tracing.jaeger_config, - service_name=f"{hs.config.server.server_name} {instance_name_by_type}", - scope_manager=LogContextScopeManager(), - metrics_factory=PrometheusMetricsFactory(), - ) - - # If we have the rust jaeger reporter available let's use that. - if RustReporter: - logger.info("Using rust_python_jaeger_reporter library") - assert config.sampler is not None - tracer = config.create_tracer(RustReporter(), config.sampler) - opentracing.set_global_tracer(tracer) - else: - config.initialize_tracer() - - -# Whitelisting - - -@only_if_tracing -def set_homeserver_whitelist(homeserver_whitelist: Iterable[str]) -> None: - """Sets the homeserver whitelist - - Args: - homeserver_whitelist: regexes specifying whitelisted homeservers - """ - global _homeserver_whitelist - if homeserver_whitelist: - # Makes a single regex which accepts all passed in regexes in the list - _homeserver_whitelist = re.compile( - "({})".format(")|(".join(homeserver_whitelist)) - ) - - -@only_if_tracing -def whitelisted_homeserver(destination: str) -> bool: - """Checks if a destination matches the whitelist - - Args: - destination - """ - - if _homeserver_whitelist: - return _homeserver_whitelist.match(destination) is not None - return False - - -# Start spans and scopes - -# Could use kwargs but I want these to be explicit -def start_active_span( - operation_name: str, - child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None, - references: Optional[List["opentracing.Reference"]] = None, - tags: Optional[Dict[str, str]] = None, - start_time: Optional[float] = None, - ignore_active_span: bool = False, - finish_on_close: bool = True, - *, - tracer: Optional["opentracing.Tracer"] = None, -) -> "opentracing.Scope": - """Starts an active opentracing span. - - Records the start time for the span, and sets it as the "active span" in the - scope manager. - - Args: - See opentracing.tracer - Returns: - scope (Scope) or contextlib.nullcontext - """ - - if opentracing is None: - return contextlib.nullcontext() # type: ignore[unreachable] - - if tracer is None: - # use the global tracer by default - tracer = opentracing.tracer - - return tracer.start_active_span( - operation_name, - child_of=child_of, - references=references, - tags=tags, - start_time=start_time, - ignore_active_span=ignore_active_span, - finish_on_close=finish_on_close, - ) - - -def start_active_span_follows_from( - operation_name: str, - contexts: Collection, - child_of: Optional[Union["opentracing.Span", "opentracing.SpanContext"]] = None, - start_time: Optional[float] = None, - *, - inherit_force_tracing: bool = False, - tracer: Optional["opentracing.Tracer"] = None, -) -> "opentracing.Scope": - """Starts an active opentracing span, with additional references to previous spans - - Args: - operation_name: name of the operation represented by the new span - contexts: the previous spans to inherit from - - child_of: optionally override the parent span. If unset, the currently active - span will be the parent. (If there is no currently active span, the first - span in `contexts` will be the parent.) - - start_time: optional override for the start time of the created span. Seconds - since the epoch. - - inherit_force_tracing: if set, and any of the previous contexts have had tracing - forced, the new span will also have tracing forced. - tracer: override the opentracing tracer. By default the global tracer is used. - """ - if opentracing is None: - return contextlib.nullcontext() # type: ignore[unreachable] - - references = [opentracing.follows_from(context) for context in contexts] - scope = start_active_span( - operation_name, - child_of=child_of, - references=references, - start_time=start_time, - tracer=tracer, - ) - - if inherit_force_tracing and any( - is_context_forced_tracing(ctx) for ctx in contexts - ): - force_tracing(scope.span) - - return scope - - -def start_active_span_from_edu( - edu_content: Dict[str, Any], - operation_name: str, - references: Optional[List["opentracing.Reference"]] = None, - tags: Optional[Dict[str, str]] = None, - start_time: Optional[float] = None, - ignore_active_span: bool = False, - finish_on_close: bool = True, -) -> "opentracing.Scope": - """ - Extracts a span context from an edu and uses it to start a new active span - - Args: - edu_content: an edu_content with a `context` field whose value is - canonical json for a dict which contains opentracing information. - - For the other args see opentracing.tracer - """ - references = references or [] - - if opentracing is None: - return contextlib.nullcontext() # type: ignore[unreachable] - - carrier = json_decoder.decode(edu_content.get("context", "{}")).get( - "opentracing", {} - ) - context = opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier) - _references = [ - opentracing.child_of(span_context_from_string(x)) - for x in carrier.get("references", []) - ] - - # For some reason jaeger decided not to support the visualization of multiple parent - # spans or explicitly show references. I include the span context as a tag here as - # an aid to people debugging but it's really not an ideal solution. - - references += _references - - scope = opentracing.tracer.start_active_span( - operation_name, - child_of=context, - references=references, - tags=tags, - start_time=start_time, - ignore_active_span=ignore_active_span, - finish_on_close=finish_on_close, - ) - - scope.span.set_tag("references", carrier.get("references", [])) - return scope - - -# Opentracing setters for tags, logs, etc -@only_if_tracing -def active_span() -> Optional["opentracing.Span"]: - """Get the currently active span, if any""" - return opentracing.tracer.active_span - - -@ensure_active_span("set a tag") -def set_tag(key: str, value: Union[str, bool, int, float]) -> None: - """Sets a tag on the active span""" - assert opentracing.tracer.active_span is not None - opentracing.tracer.active_span.set_tag(key, value) - - -@ensure_active_span("log") -def log_kv(key_values: Dict[str, Any], timestamp: Optional[float] = None) -> None: - """Log to the active span""" - assert opentracing.tracer.active_span is not None - opentracing.tracer.active_span.log_kv(key_values, timestamp) - - -@ensure_active_span("set the traces operation name") -def set_operation_name(operation_name: str) -> None: - """Sets the operation name of the active span""" - assert opentracing.tracer.active_span is not None - opentracing.tracer.active_span.set_operation_name(operation_name) - - -@only_if_tracing -def force_tracing( - span: Union["opentracing.Span", _Sentinel] = _Sentinel.sentinel -) -> None: - """Force sampling for the active/given span and its children. - - Args: - span: span to force tracing for. By default, the active span. - """ - if isinstance(span, _Sentinel): - span_to_trace = opentracing.tracer.active_span - else: - span_to_trace = span - if span_to_trace is None: - logger.error("No active span in force_tracing") - return - - span_to_trace.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1) - - # also set a bit of baggage, so that we have a way of figuring out if - # it is enabled later - span_to_trace.set_baggage_item(SynapseBaggage.FORCE_TRACING, "1") - - -def is_context_forced_tracing( - span_context: Optional["opentracing.SpanContext"], -) -> bool: - """Check if sampling has been force for the given span context.""" - if span_context is None: - return False - return span_context.baggage.get(SynapseBaggage.FORCE_TRACING) is not None - - -# Injection and extraction - - -@ensure_active_span("inject the span into a header dict") -def inject_header_dict( - headers: Dict[bytes, List[bytes]], - destination: Optional[str] = None, - check_destination: bool = True, -) -> None: - """ - Injects a span context into a dict of HTTP headers - - Args: - headers: the dict to inject headers into - destination: address of entity receiving the span context. Must be given unless - check_destination is False. The context will only be injected if the - destination matches the opentracing whitelist - check_destination: If false, destination will be ignored and the context - will always be injected. - - Note: - The headers set by the tracer are custom to the tracer implementation which - should be unique enough that they don't interfere with any headers set by - synapse or twisted. If we're still using jaeger these headers would be those - here: - https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py - """ - if check_destination: - if destination is None: - raise ValueError( - "destination must be given unless check_destination is False" - ) - if not whitelisted_homeserver(destination): - return - - span = opentracing.tracer.active_span - - carrier: Dict[str, str] = {} - assert span is not None - opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier) - - for key, value in carrier.items(): - headers[key.encode()] = [value.encode()] - - -def inject_response_headers(response_headers: Headers) -> None: - """Inject the current trace id into the HTTP response headers""" - if not opentracing: - return - span = opentracing.tracer.active_span - if not span: - return - - # This is a bit implementation-specific. - # - # Jaeger's Spans have a trace_id property; other implementations (including the - # dummy opentracing.span.Span which we use if init_tracer is not called) do not - # expose it - trace_id = getattr(span, "trace_id", None) - - if trace_id is not None: - response_headers.addRawHeader("Synapse-Trace-Id", f"{trace_id:x}") - - -@ensure_active_span( - "get the active span context as a dict", ret=cast(Dict[str, str], {}) -) -def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str]: - """ - Gets a span context as a dict. This can be used instead of manually - injecting a span into an empty carrier. - - Args: - destination: the name of the remote server. - - Returns: - the active span's context if opentracing is enabled, otherwise empty. - """ - - if destination and not whitelisted_homeserver(destination): - return {} - - carrier: Dict[str, str] = {} - assert opentracing.tracer.active_span is not None - opentracing.tracer.inject( - opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier - ) - - return carrier - - -@ensure_active_span("get the span context as a string.", ret={}) -def active_span_context_as_string() -> str: - """ - Returns: - The active span context encoded as a string. - """ - carrier: Dict[str, str] = {} - if opentracing: - assert opentracing.tracer.active_span is not None - opentracing.tracer.inject( - opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier - ) - return json_encoder.encode(carrier) - - -def span_context_from_request(request: Request) -> "Optional[opentracing.SpanContext]": - """Extract an opentracing context from the headers on an HTTP request - - This is useful when we have received an HTTP request from another part of our - system, and want to link our spans to those of the remote system. - """ - if not opentracing: - return None - header_dict = { - k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders() - } - return opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, header_dict) - - -@only_if_tracing -def span_context_from_string(carrier: str) -> Optional["opentracing.SpanContext"]: - """ - Returns: - The active span context decoded from a string. - """ - payload: Dict[str, str] = json_decoder.decode(carrier) - return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, payload) - - -@only_if_tracing -def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanContext"]: - """ - Wrapper method for opentracing's tracer.extract for TEXT_MAP. - Args: - carrier: a dict possibly containing a span context. - - Returns: - The active span context extracted from carrier. - """ - return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier) - - -# Tracing decorators - - -def _custom_sync_async_decorator( - func: Callable[P, R], - wrapping_logic: Callable[[Callable[P, R], Any, Any], ContextManager[None]], -) -> Callable[P, R]: - """ - Decorates a function that is sync or async (coroutines), or that returns a Twisted - `Deferred`. The custom business logic of the decorator goes in `wrapping_logic`. - - Example usage: - ```py - # Decorator to time the function and log it out - def duration(func: Callable[P, R]) -> Callable[P, R]: - @contextlib.contextmanager - def _wrapping_logic(func: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> Generator[None, None, None]: - start_ts = time.time() - try: - yield - finally: - end_ts = time.time() - duration = end_ts - start_ts - logger.info("%s took %s seconds", func.__name__, duration) - return _custom_sync_async_decorator(func, _wrapping_logic) - ``` - - Args: - func: The function to be decorated - wrapping_logic: The business logic of your custom decorator. - This should be a ContextManager so you are able to run your logic - before/after the function as desired. - """ - - if inspect.iscoroutinefunction(func): - - @wraps(func) - async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: - with wrapping_logic(func, *args, **kwargs): - return await func(*args, **kwargs) # type: ignore[misc] - - else: - # The other case here handles both sync functions and those - # decorated with inlineDeferred. - @wraps(func) - def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: - scope = wrapping_logic(func, *args, **kwargs) - scope.__enter__() - - try: - result = func(*args, **kwargs) - if isinstance(result, defer.Deferred): - - def call_back(result: R) -> R: - scope.__exit__(None, None, None) - return result - - def err_back(result: R) -> R: - scope.__exit__(None, None, None) - return result - - result.addCallbacks(call_back, err_back) - - else: - if inspect.isawaitable(result): - logger.error( - "@trace may not have wrapped %s correctly! " - "The function is not async but returned a %s.", - func.__qualname__, - type(result).__name__, - ) - - scope.__exit__(None, None, None) - - return result - - except Exception as e: - scope.__exit__(type(e), None, e.__traceback__) - raise - - return _wrapper # type: ignore[return-value] - - -def trace_with_opname( - opname: str, - *, - tracer: Optional["opentracing.Tracer"] = None, -) -> Callable[[Callable[P, R]], Callable[P, R]]: - """ - Decorator to trace a function with a custom opname. - See the module's doc string for usage examples. - """ - - # type-ignore: mypy bug, see https://github.com/python/mypy/issues/12909 - @contextlib.contextmanager # type: ignore[arg-type] - def _wrapping_logic( - func: Callable[P, R], *args: P.args, **kwargs: P.kwargs - ) -> Generator[None, None, None]: - with start_active_span(opname, tracer=tracer): - yield - - def _decorator(func: Callable[P, R]) -> Callable[P, R]: - if not opentracing: - return func - - return _custom_sync_async_decorator(func, _wrapping_logic) - - return _decorator - - -def trace(func: Callable[P, R]) -> Callable[P, R]: - """ - Decorator to trace a function. - Sets the operation name to that of the function's name. - See the module's doc string for usage examples. - """ - - return trace_with_opname(func.__name__)(func) - - -def tag_args(func: Callable[P, R]) -> Callable[P, R]: - """ - Decorator to tag all of the args to the active span. - - Args: - func: `func` is assumed to be a method taking a `self` parameter, or a - `classmethod` taking a `cls` parameter. In either case, a tag is not - created for this parameter. - """ - - if not opentracing: - return func - - # type-ignore: mypy bug, see https://github.com/python/mypy/issues/12909 - @contextlib.contextmanager # type: ignore[arg-type] - def _wrapping_logic( - func: Callable[P, R], *args: P.args, **kwargs: P.kwargs - ) -> Generator[None, None, None]: - argspec = inspect.getfullargspec(func) - # We use `[1:]` to skip the `self` object reference and `start=1` to - # make the index line up with `argspec.args`. - # - # FIXME: We could update this to handle any type of function by ignoring the - # first argument only if it's named `self` or `cls`. This isn't fool-proof - # but handles the idiomatic cases. - for i, arg in enumerate(args[1:], start=1): - set_tag(SynapseTags.FUNC_ARG_PREFIX + argspec.args[i], str(arg)) - set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :])) - set_tag(SynapseTags.FUNC_KWARGS, str(kwargs)) - yield - - return _custom_sync_async_decorator(func, _wrapping_logic) - - -@contextlib.contextmanager -def trace_servlet( - request: "SynapseRequest", extract_context: bool = False -) -> Generator[None, None, None]: - """Returns a context manager which traces a request. It starts a span - with some servlet specific tags such as the request metrics name and - request information. - - Args: - request - extract_context: Whether to attempt to extract the opentracing - context from the request the servlet is handling. - """ - - if opentracing is None: - yield # type: ignore[unreachable] - return - - request_tags = { - SynapseTags.REQUEST_ID: request.get_request_id(), - tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, - tags.HTTP_METHOD: request.get_method(), - tags.HTTP_URL: request.get_redacted_uri(), - tags.PEER_HOST_IPV6: request.getClientAddress().host, - } - - request_name = request.request_metrics.name - context = span_context_from_request(request) if extract_context else None - - # we configure the scope not to finish the span immediately on exit, and instead - # pass the span into the SynapseRequest, which will finish it once we've finished - # sending the response to the client. - scope = start_active_span(request_name, child_of=context, finish_on_close=False) - request.set_opentracing_span(scope.span) - - with scope: - inject_response_headers(request.responseHeaders) - try: - yield - finally: - # We set the operation name again in case its changed (which happens - # with JsonResource). - scope.span.set_operation_name(request.request_metrics.name) - - request_tags[ - SynapseTags.REQUEST_TAG - ] = request.request_metrics.start_context.tag - - # set the tags *after* the servlet completes, in case it decided to - # prioritise the span (tags will get dropped on unprioritised spans) - for k, v in request_tags.items(): - scope.span.set_tag(k, v) diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py deleted file mode 100644 index 10877bdfc522..000000000000 --- a/synapse/logging/scopecontextmanager.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License.import logging - -import logging -from types import TracebackType -from typing import Optional, Type - -from opentracing import Scope, ScopeManager, Span - -import twisted - -from synapse.logging.context import ( - LoggingContext, - current_context, - nested_logging_context, -) - -logger = logging.getLogger(__name__) - - -class LogContextScopeManager(ScopeManager): - """ - The LogContextScopeManager tracks the active scope in opentracing - by using the log contexts which are native to synapse. This is so - that the basic opentracing api can be used across twisted defereds. - - It would be nice just to use opentracing's ContextVarsScopeManager, - but currently that doesn't work due to https://twistedmatrix.com/trac/ticket/10301. - """ - - def __init__(self) -> None: - pass - - @property - def active(self) -> Optional[Scope]: - """ - Returns the currently active Scope which can be used to access the - currently active Scope.span. - If there is a non-null Scope, its wrapped Span - becomes an implicit parent of any newly-created Span at - Tracer.start_active_span() time. - - Return: - The Scope that is active, or None if not available. - """ - ctx = current_context() - return ctx.scope - - def activate(self, span: Span, finish_on_close: bool) -> Scope: - """ - Makes a Span active. - Args - span: the span that should become active. - finish_on_close: whether Span should be automatically finished when - Scope.close() is called. - - Returns: - Scope to control the end of the active period for - *span*. It is a programming error to neglect to call - Scope.close() on the returned instance. - """ - - ctx = current_context() - - if not ctx: - logger.error("Tried to activate scope outside of loggingcontext") - return Scope(None, span) # type: ignore[arg-type] - - if ctx.scope is not None: - # start a new logging context as a child of the existing one. - # Doing so -- rather than updating the existing logcontext -- means that - # creating several concurrent spans under the same logcontext works - # correctly. - ctx = nested_logging_context("") - enter_logcontext = True - else: - # if there is no span currently associated with the current logcontext, we - # just store the scope in it. - # - # This feels a bit dubious, but it does hack around a problem where a - # span outlasts its parent logcontext (which would otherwise lead to - # "Re-starting finished log context" errors). - enter_logcontext = False - - scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close) - ctx.scope = scope - if enter_logcontext: - ctx.__enter__() - - return scope - - -class _LogContextScope(Scope): - """ - A custom opentracing scope, associated with a LogContext - - * filters out _DefGen_Return exceptions which arise from calling - `defer.returnValue` in Twisted code - - * When the scope is closed, the logcontext's active scope is reset to None. - and - if enter_logcontext was set - the logcontext is finished too. - """ - - def __init__( - self, - manager: LogContextScopeManager, - span: Span, - logcontext: LoggingContext, - enter_logcontext: bool, - finish_on_close: bool, - ): - """ - Args: - manager: - the manager that is responsible for this scope. - span: - the opentracing span which this scope represents the local - lifetime for. - logcontext: - the log context to which this scope is attached. - enter_logcontext: - if True the log context will be exited when the scope is finished - finish_on_close: - if True finish the span when the scope is closed - """ - super().__init__(manager, span) - self.logcontext = logcontext - self._finish_on_close = finish_on_close - self._enter_logcontext = enter_logcontext - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - value: Optional[BaseException], - traceback: Optional[TracebackType], - ) -> None: - if exc_type == twisted.internet.defer._DefGen_Return: - # filter out defer.returnValue() calls - exc_type = value = traceback = None - super().__exit__(exc_type, value, traceback) - - def __str__(self) -> str: - return f"Scope<{self.span}>" - - def close(self) -> None: - active_scope = self.manager.active - if active_scope is not self: - logger.error( - "Closing scope %s which is not the currently-active one %s", - self, - active_scope, - ) - - if self._finish_on_close: - self.span.finish() - - self.logcontext.scope = None - - if self._enter_logcontext: - self.logcontext.__exit__(None, None, None) diff --git a/synapse/logging/tracing.py b/synapse/logging/tracing.py new file mode 100644 index 000000000000..d455854467ee --- /dev/null +++ b/synapse/logging/tracing.py @@ -0,0 +1,1022 @@ +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# NOTE This is a small wrapper around opentelemetry because tracing is optional +# and not always packaged downstream. Since opentelemetry instrumentation is +# fairly invasive it was awkward to make it optional. As a result we opted to +# encapsulate all opentelemetry state in these methods which effectively noop if +# opentelemetry is not present. We should strongly consider encouraging the +# downstream distributers to package opentelemetry and making opentelemetry a +# full dependency. In order to facilitate this move the methods have work very +# similarly to opentelemetry's and it should only be a matter of few regexes to +# move over to opentelemetry's access patterns proper. + +""" +============================ +Using OpenTelemetry in Synapse +============================ + +Python-specific tracing concepts are at +https://opentelemetry.io/docs/instrumentation/python/. Note that Synapse wraps +OpenTelemetry in a small module (this one) in order to make the OpenTelemetry +dependency optional. That means that some access patterns are different to those +demonstrated in the OpenTelemetry guides. However, it is still useful to know, +especially if OpenTelemetry is included as a full dependency in the future or if +you are modifying this module. + + +OpenTelemetry is encapsulated so that no span objects from OpenTelemetry are +exposed in Synapse's code. This allows OpenTelemetry to be easily disabled in +Synapse and thereby have OpenTelemetry as an optional dependency. This does +however limit the number of modifiable spans at any point in the code to one. +From here out references to `tracing` in the code snippets refer to the Synapses +module. Most methods provided in the module have a direct correlation to those +provided by OpenTelemetry. Refer to docs there for a more in-depth documentation +on some of the args and methods. + +Tracing +------- + +In Synapse, it is not possible to start a non-active span. Spans can be started +using the ``start_active_span`` method. This returns a context manager that +needs to be entered and exited to expose the ``span``. This is usually done by +using a ``with`` statement. + +.. code-block:: python + + from synapse.logging.tracing import start_active_span + + with start_active_span("operation name"): + # Do something we want to trace + +Forgetting to enter or exit a scope will result in unstarted and unfinished +spans that will not be reported (exported). + +At anytime where there is an active span ``set_attribute`` can be +used to set a tag on the current active span. + +Tracing functions +----------------- + +Functions can be easily traced using decorators. The name of the function +becomes the operation name for the span. + +.. code-block:: python + + from synapse.logging.tracing import trace + + # Start a span using 'interesting_function' as the operation name + @trace + def interesting_function(*args, **kwargs): + # Does all kinds of cool and expected things return + something_usual_and_useful + + +Operation names can be explicitly set for a function by using +``trace_with_opname``: + +.. code-block:: python + + from synapse.logging.tracing import trace_with_opname + + @trace_with_opname("a_better_operation_name") + def interesting_badly_named_function(*args, **kwargs): + # Does all kinds of cool and expected things return + something_usual_and_useful + +Setting Tags +------------ + +To set a tag on the active span do + +.. code-block:: python + + from synapse.logging.tracing import set_attribute + + set_attribute(tag_name, tag_value) + +There's a convenient decorator to tag all the args of the method. It uses +inspection in order to use the formal parameter names prefixed with 'ARG_' as +tag names. It uses kwarg names as tag names without the prefix. + +.. code-block:: python + from synapse.logging.tracing import tag_args + @tag_args + def set_fates(clotho, lachesis, atropos, father="Zues", mother="Themis"): + pass + set_fates("the story", "the end", "the act") + # This will have the following tags + # - ARG_clotho: "the story" + # - ARG_lachesis: "the end" + # - ARG_atropos: "the act" + # - father: "Zues" + # - mother: "Themis" + +Contexts and carriers +--------------------- + +There are a selection of wrappers for injecting and extracting contexts from +carriers provided. We use these to inject of OpenTelemetry Contexts into +Twisted's http headers, EDU contents and our database tables. Please refer to +the end of ``logging/tracing.py`` for the available injection and extraction +methods. + +Homeserver whitelisting +----------------------- + +Most of the whitelist checks are encapsulated in the modules's injection and +extraction method but be aware that using custom carriers or crossing +unchartered waters will require the enforcement of the whitelist. +``logging/tracing.py`` has a ``whitelisted_homeserver`` method which takes +in a destination and compares it to the whitelist. + +Most injection methods take a 'destination' arg. The context will only be +injected if the destination matches the whitelist or the destination is None. + +======= +Gotchas +======= + +- Checking whitelists on span propagation +- Inserting pii +- Forgetting to enter or exit a scope +- Span source: make sure that the span you expect to be active across a function + call really will be that one. Does the current function have more than one + caller? Will all of those calling functions have be in a context with an + active span? +""" +import contextlib +import inspect +import logging +import re +from abc import ABC +from functools import wraps +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Dict, + Generator, + Iterable, + List, + Optional, + Pattern, + Sequence, + TypeVar, + Union, + cast, + overload, +) + +from typing_extensions import ParamSpec + +from twisted.internet import defer +from twisted.web.http import Request +from twisted.web.http_headers import Headers + +from synapse.api.constants import EventContentFields +from synapse.config import ConfigError +from synapse.util import json_decoder + +if TYPE_CHECKING: + from synapse.http.site import SynapseRequest + from synapse.server import HomeServer + +# Helper class + +T = TypeVar("T") + +# Matches the number suffix in an instance name like "matrix.org client_reader-8" +STRIP_INSTANCE_NUMBER_SUFFIX_REGEX = re.compile(r"[_-]?\d+$") + + +class _DummyLookup(object): + """This will always returns the fixed value given for any accessed property""" + + def __init__(self, value: T) -> None: + self.value = value + + # type-ignore: Because mypy says "A function returning TypeVar should receive at + # least one argument containing the same Typevar" but this is just a dummy + # stand-in that doesn't need any input. + def __getattribute__(self, name: str) -> T: # type: ignore[type-var] + return object.__getattribute__(self, "value") + + +class DummyLink(ABC): + """Dummy placeholder for `opentelemetry.trace.Link`""" + + def __init__(self, *args: Any) -> None: + self.not_implemented_message = ( + "opentelemetry isn't installed so this is just a dummy link placeholder" + ) + + @property + def context(self) -> None: + raise NotImplementedError(self.not_implemented_message) + + @property + def attributes(self) -> None: + raise NotImplementedError(self.not_implemented_message) + + +# These dependencies are optional so they can fail to import +# and we +try: + import opentelemetry + import opentelemetry.exporter.jaeger.thrift + import opentelemetry.propagate + import opentelemetry.sdk.resources + import opentelemetry.sdk.trace + import opentelemetry.sdk.trace.export + import opentelemetry.semconv.trace + import opentelemetry.trace + import opentelemetry.trace.propagation + import opentelemetry.trace.status + + SpanKind = opentelemetry.trace.SpanKind + SpanAttributes = opentelemetry.semconv.trace.SpanAttributes + StatusCode = opentelemetry.trace.status.StatusCode + Link = opentelemetry.trace.Link +except ImportError: + opentelemetry = None # type: ignore[assignment] + SpanKind = _DummyLookup(0) # type: ignore + SpanAttributes = _DummyLookup("fake-attribute") # type: ignore + StatusCode = _DummyLookup(0) # type: ignore + Link = DummyLink # type: ignore + + +logger = logging.getLogger(__name__) + + +class SynapseTags: + """FIXME: Rename to `SynapseAttributes` so it matches OpenTelemetry `SpanAttributes`""" + + # The message ID of any to_device message processed + TO_DEVICE_MESSAGE_ID = "to_device.message_id" + + # Whether the sync response has new data to be returned to the client. + SYNC_RESULT = "sync.new_data" + + # The Synapse instance name + INSTANCE_NAME = "instance_name" + + # incoming HTTP request ID (as written in the logs) + REQUEST_ID = "request_id" + + # HTTP request tag (used to distinguish full vs incremental syncs, etc) + REQUEST_TAG = "request_tag" + + # Text description of a database transaction + DB_TXN_DESC = "db.txn_desc" + + # Uniqueish ID of a database transaction + DB_TXN_ID = "db.txn_id" + + # The name of the external cache + CACHE_NAME = "cache.name" + + # Used to tag function arguments + # + # Tag a named arg. The name of the argument should be appended to this prefix. + FUNC_ARG_PREFIX = "ARG." + # Tag extra variadic number of positional arguments (`def foo(first, second, *extras)`) + FUNC_ARGS = "args" + # Tag keyword args + FUNC_KWARGS = "kwargs" + + # Some intermediate result that's interesting to the function. The label for + # the result should be appended to this prefix. + RESULT_PREFIX = "RESULT." + + +class SynapseBaggage: + FORCE_TRACING = "synapse-force-tracing" + + +# Block everything by default +# A regex which matches the server_names to expose traces for. +# None means 'block everything'. +_homeserver_whitelist: Optional[Pattern[str]] = None + +# Util methods + + +P = ParamSpec("P") +R = TypeVar("R") + + +def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]: + """Decorator function that executes the function only if we're tracing. Otherwise returns None.""" + + @wraps(func) + def _only_if_tracing_inner(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: + if opentelemetry: + return func(*args, **kwargs) + else: + return None + + return _only_if_tracing_inner + + +@overload +def ensure_active_span( + message: str, +) -> Callable[[Callable[P, R]], Callable[P, Optional[R]]]: + ... + + +@overload +def ensure_active_span( + message: str, ret: T +) -> Callable[[Callable[P, R]], Callable[P, Union[T, R]]]: + ... + + +def ensure_active_span( + message: str, ret: Optional[T] = None +) -> Callable[[Callable[P, R]], Callable[P, Union[Optional[T], R]]]: + """Executes the operation only if opentelemetry is enabled and there is an active span. + If there is no active span it logs message at the error level. + + Args: + message: Message which fills in "There was no active span when trying to %s" + in the error log if there is no active span and opentelemetry is enabled. + ret: return value if opentelemetry is None or there is no active span. + + Returns: + The result of the func, falling back to ret if opentelemetry is disabled or there + was no active span. + """ + + def ensure_active_span_inner_1( + func: Callable[P, R] + ) -> Callable[P, Union[Optional[T], R]]: + @wraps(func) + def ensure_active_span_inner_2( + *args: P.args, **kwargs: P.kwargs + ) -> Union[Optional[T], R]: + if not opentelemetry: + return ret + + if not opentelemetry.trace.get_current_span(): + logger.error( + "There was no active span when trying to %s." + " Did you forget to start one or did a context slip?", + message, + stack_info=True, + ) + + return ret + + return func(*args, **kwargs) + + return ensure_active_span_inner_2 + + return ensure_active_span_inner_1 + + +# Setup + + +def init_tracer(hs: "HomeServer") -> None: + """Set the whitelists and initialise the OpenTelemetry tracer""" + global opentelemetry + if not hs.config.tracing.tracing_enabled: + # We don't have a tracer + opentelemetry = None # type: ignore[assignment] + return + + if not opentelemetry: + raise ConfigError( + "The server has been configured to use OpenTelemetry but OpenTelemetry is not " + "installed." + ) + + # Pull out of the config if it was given. Otherwise set it to something sensible. + set_homeserver_whitelist(hs.config.tracing.homeserver_whitelist) + + # Instance names are opaque strings but by stripping off the number suffix, + # we can get something that looks like a "worker type", e.g. + # "client_reader-1" -> "client_reader" so we don't spread the traces across + # so many services. + instance_name_by_type = re.sub( + STRIP_INSTANCE_NUMBER_SUFFIX_REGEX, "", hs.get_instance_name() + ) + + resource = opentelemetry.sdk.resources.Resource( + attributes={ + opentelemetry.sdk.resources.SERVICE_NAME: f"{hs.config.server.server_name} {instance_name_by_type}" + } + ) + + # TODO: `force_tracing_for_users` is not compatible with OTEL samplers + # because you can only determine `opentelemetry.trace.TraceFlags.SAMPLED` + # and whether it uses a recording span when the span is created and we don't + # have enough information at that time (we can determine in + # `synapse/api/auth.py`). There isn't a way to change the trace flags after + # the fact so there is no way to programmatically force + # recording/tracing/sampling like there was in opentracing. + sampler = opentelemetry.sdk.trace.sampling.ParentBasedTraceIdRatio( + hs.config.tracing.sample_rate + ) + + tracer_provider = opentelemetry.sdk.trace.TracerProvider( + resource=resource, sampler=sampler + ) + + # consoleProcessor = opentelemetry.sdk.trace.export.BatchSpanProcessor( + # opentelemetry.sdk.trace.export.ConsoleSpanExporter() + # ) + # tracer_provider.add_span_processor(consoleProcessor) + + jaeger_exporter = opentelemetry.exporter.jaeger.thrift.JaegerExporter( + **hs.config.tracing.jaeger_exporter_config + ) + jaeger_processor = opentelemetry.sdk.trace.export.BatchSpanProcessor( + jaeger_exporter + ) + tracer_provider.add_span_processor(jaeger_processor) + + # Sets the global default tracer provider + opentelemetry.trace.set_tracer_provider(tracer_provider) + + +# Whitelisting + + +@only_if_tracing +def set_homeserver_whitelist(homeserver_whitelist: Iterable[str]) -> None: + """Sets the homeserver whitelist + + Args: + homeserver_whitelist: regexes specifying whitelisted homeservers + """ + global _homeserver_whitelist + if homeserver_whitelist: + # Makes a single regex which accepts all passed in regexes in the list + _homeserver_whitelist = re.compile( + "({})".format(")|(".join(homeserver_whitelist)) + ) + + +@only_if_tracing +def whitelisted_homeserver(destination: str) -> bool: + """Checks if a destination matches the whitelist + + Args: + destination + """ + + if _homeserver_whitelist: + return _homeserver_whitelist.match(destination) is not None + return False + + +# Start spans and scopes + + +def use_span( + span: Optional["opentelemetry.trace.Span"], + end_on_exit: bool = True, +) -> ContextManager[Optional["opentelemetry.trace.Span"]]: + if opentelemetry is None or span is None: + return contextlib.nullcontext() + + return opentelemetry.trace.use_span(span=span, end_on_exit=end_on_exit) + + +def create_non_recording_span() -> Optional["opentelemetry.trace.Span"]: + """Create a no-op span that does not record or become part of a recorded trace""" + if opentelemetry is None: + return None # type: ignore[unreachable] + + return opentelemetry.trace.NonRecordingSpan( + opentelemetry.trace.INVALID_SPAN_CONTEXT + ) + + +def start_span( + name: str, + *, + context: Optional["opentelemetry.context.context.Context"] = None, + kind: Optional["opentelemetry.trace.SpanKind"] = SpanKind.INTERNAL, + attributes: "opentelemetry.util.types.Attributes" = None, + links: Optional[Sequence["opentelemetry.trace.Link"]] = None, + start_time: Optional[int] = None, + record_exception: bool = True, + set_status_on_exception: bool = True, + end_on_exit: bool = True, + # For testing only + tracer: Optional["opentelemetry.trace.Tracer"] = None, +) -> "opentelemetry.trace.Span": + if opentelemetry is None: + raise Exception("Not able to create span without opentelemetry installed.") + + if tracer is None: + tracer = opentelemetry.trace.get_tracer(__name__) + + # TODO: Why is this necessary to satisfy this error? It has a default? + # ` error: Argument "kind" to "start_span" of "Tracer" has incompatible type "Optional[SpanKind]"; expected "SpanKind" [arg-type]` + if kind is None: + kind = SpanKind.INTERNAL + + return tracer.start_span( + name=name, + context=context, + kind=kind, + attributes=attributes, + links=links, + start_time=start_time, + record_exception=record_exception, + set_status_on_exception=set_status_on_exception, + ) + + +def start_active_span( + name: str, + *, + context: Optional["opentelemetry.context.context.Context"] = None, + kind: "opentelemetry.trace.SpanKind" = SpanKind.INTERNAL, + attributes: "opentelemetry.util.types.Attributes" = None, + links: Optional[Sequence["opentelemetry.trace.Link"]] = None, + start_time: Optional[int] = None, + record_exception: bool = True, + set_status_on_exception: bool = True, + end_on_exit: bool = True, + # For testing only + tracer: Optional["opentelemetry.trace.Tracer"] = None, +) -> ContextManager[Optional["opentelemetry.trace.Span"]]: + if opentelemetry is None: + return contextlib.nullcontext() # type: ignore[unreachable] + + span = start_span( + name=name, + context=context, + kind=kind, + attributes=attributes, + links=links, + start_time=start_time, + record_exception=record_exception, + set_status_on_exception=set_status_on_exception, + tracer=tracer, + ) + + # Equivalent to `tracer.start_as_current_span` + return opentelemetry.trace.use_span( + span, + end_on_exit=end_on_exit, + record_exception=record_exception, + set_status_on_exception=set_status_on_exception, + ) + + +def start_active_span_from_edu( + operation_name: str, + *, + edu_content: Dict[str, Any], +) -> ContextManager[Optional["opentelemetry.trace.Span"]]: + """ + Extracts a span context from an edu and uses it to start a new active span + + Args: + operation_name: The label for the chunk of time used to process the given edu. + edu_content: an edu_content with a `context` field whose value is + canonical json for a dict which contains tracing information. + """ + if opentelemetry is None: + return contextlib.nullcontext() # type: ignore[unreachable] + + carrier = json_decoder.decode( + edu_content.get(EventContentFields.TRACING_CONTEXT, "{}") + ) + + context = extract_text_map(carrier) + + return start_active_span(name=operation_name, context=context) + + +# OpenTelemetry setters for attributes, logs, etc +@only_if_tracing +def get_active_span() -> Optional["opentelemetry.trace.Span"]: + """Get the currently active span, if any""" + return opentelemetry.trace.get_current_span() + + +def get_span_context_from_context( + context: "opentelemetry.context.context.Context", +) -> Optional["opentelemetry.trace.SpanContext"]: + """Utility function to convert a `Context` to a `SpanContext` + + Based on https://github.com/open-telemetry/opentelemetry-python/blob/43288ca9a36144668797c11ca2654836ec8b5e99/opentelemetry-api/src/opentelemetry/trace/propagation/tracecontext.py#L99-L102 + """ + span = opentelemetry.trace.get_current_span(context=context) + span_context = span.get_span_context() + if span_context == opentelemetry.trace.INVALID_SPAN_CONTEXT: + return None + return span_context + + +def get_context_from_span( + span: "opentelemetry.trace.Span", +) -> "opentelemetry.context.context.Context": + # This doesn't affect the current context at all, it just converts a span + # into `Context` object basically (bad name). + ctx = opentelemetry.trace.propagation.set_span_in_context(span) + return ctx + + +@ensure_active_span("set a tag") +def set_attribute(key: str, value: Union[str, bool, int, float]) -> None: + """Sets a tag on the active span""" + active_span = get_active_span() + assert active_span is not None + active_span.set_attribute(key, value) + + +@ensure_active_span("set the status") +def set_status( + status_code: "opentelemetry.trace.status.StatusCode", exc: Optional[Exception] +) -> None: + """Sets a tag on the active span""" + active_span = get_active_span() + assert active_span is not None + active_span.set_status(opentelemetry.trace.status.Status(status_code=status_code)) + if exc: + active_span.record_exception(exc) + + +DEFAULT_LOG_NAME = "log" + + +@ensure_active_span("log") +def log_kv(key_values: Dict[str, Any], timestamp: Optional[int] = None) -> None: + """Log to the active span""" + active_span = get_active_span() + assert active_span is not None + event_name = key_values.get("event", DEFAULT_LOG_NAME) + active_span.add_event(event_name, attributes=key_values, timestamp=timestamp) + + +@only_if_tracing +def force_tracing(span: Optional["opentelemetry.trace.Span"] = None) -> None: + """Force sampling for the active/given span and its children. + + Args: + span: span to force tracing for. By default, the active span. + """ + # TODO + pass + + +def is_context_forced_tracing( + context: "opentelemetry.context.context.Context", +) -> bool: + """Check if sampling has been force for the given span context.""" + # TODO + return False + + +# Injection and extraction + + +@ensure_active_span("inject the active tracing context into a header dict") +def inject_active_tracing_context_into_header_dict( + headers: Dict[bytes, List[bytes]], + destination: Optional[str] = None, + check_destination: bool = True, +) -> None: + """ + Injects the active tracing context into a dict of HTTP headers + + Args: + headers: the dict to inject headers into + destination: address of entity receiving the span context. Must be given unless + `check_destination` is False. + check_destination (bool): If False, destination will be ignored and the context + will always be injected. If True, the context will only be injected if the + destination matches the tracing allowlist + + Note: + The headers set by the tracer are custom to the tracer implementation which + should be unique enough that they don't interfere with any headers set by + synapse or twisted. + """ + if check_destination: + if destination is None: + raise ValueError( + "destination must be given unless check_destination is False" + ) + if not whitelisted_homeserver(destination): + return + + active_span = get_active_span() + assert active_span is not None + ctx = get_context_from_span(active_span) + + propagator = opentelemetry.propagate.get_global_textmap() + # Put all of SpanContext properties into the headers dict + propagator.inject(headers, context=ctx) + + +def inject_trace_id_into_response_headers(response_headers: Headers) -> None: + """Inject the current trace id into the HTTP response headers""" + if not opentelemetry: + return + active_span = get_active_span() + if not active_span: + return + + trace_id = active_span.get_span_context().trace_id + + if trace_id is not None: + response_headers.addRawHeader("Synapse-Trace-Id", f"{trace_id:x}") + + +@ensure_active_span( + "get the active span context as a dict", ret=cast(Dict[str, str], {}) +) +def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str]: + """ + Gets the active tracing Context serialized as a dict. This can be used + instead of manually injecting a span into an empty carrier. + + Args: + destination: the name of the remote server. + + Returns: + dict: the serialized active span's context if opentelemetry is enabled, otherwise + empty. + """ + if destination and not whitelisted_homeserver(destination): + return {} + + active_span = get_active_span() + assert active_span is not None + ctx = get_context_from_span(active_span) + + carrier_text_map: Dict[str, str] = {} + propagator = opentelemetry.propagate.get_global_textmap() + # Put all of Context properties onto the carrier text map that we can return + propagator.inject(carrier_text_map, context=ctx) + + return carrier_text_map + + +def context_from_request( + request: Request, +) -> Optional["opentelemetry.context.context.Context"]: + """Extract an opentelemetry context from the headers on an HTTP request + + This is useful when we have received an HTTP request from another part of our + system, and want to link our spans to those of the remote system. + """ + if not opentelemetry: + return None + header_dict = { + k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders() + } + + # Extract all of the relevant values from the headers to construct a + # SpanContext to return. + return extract_text_map(header_dict) + + +@only_if_tracing +def extract_text_map( + carrier: Dict[str, str] +) -> Optional["opentelemetry.context.context.Context"]: + """ + Wrapper method for opentelemetry's propagator.extract for TEXT_MAP. + Args: + carrier: a dict possibly containing a context. + + Returns: + The active context extracted from carrier. + """ + propagator = opentelemetry.propagate.get_global_textmap() + # Extract all of the relevant values from the `carrier` to construct a + # Context to return. + return propagator.extract(carrier) + + +# Tracing decorators + + +def _custom_sync_async_decorator( + func: Callable[P, R], + wrapping_logic: Callable[[Callable[P, R], Any, Any], ContextManager[None]], +) -> Callable[P, R]: + """ + Decorates a function that is sync or async (coroutines), or that returns a Twisted + `Deferred`. The custom business logic of the decorator goes in `wrapping_logic`. + Example usage: + ```py + # Decorator to time the function and log it out + def duration(func: Callable[P, R]) -> Callable[P, R]: + @contextlib.contextmanager + def _wrapping_logic(func: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> Generator[None, None, None]: + start_ts = time.time() + try: + yield + finally: + end_ts = time.time() + duration = end_ts - start_ts + logger.info("%s took %s seconds", func.__name__, duration) + return _custom_sync_async_decorator(func, _wrapping_logic) + ``` + Args: + func: The function to be decorated + wrapping_logic: The business logic of your custom decorator. + This should be a ContextManager so you are able to run your logic + before/after the function as desired. + """ + + if inspect.iscoroutinefunction(func): + + @wraps(func) + async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + with wrapping_logic(func, *args, **kwargs): + return await func(*args, **kwargs) # type: ignore[misc] + + else: + # The other case here handles both sync functions and those + # decorated with inlineDeferred. + @wraps(func) + def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + scope = wrapping_logic(func, *args, **kwargs) + scope.__enter__() + + try: + result = func(*args, **kwargs) + if isinstance(result, defer.Deferred): + + def call_back(result: R) -> R: + scope.__exit__(None, None, None) + return result + + def err_back(result: R) -> R: + scope.__exit__(None, None, None) + return result + + result.addCallbacks(call_back, err_back) + + else: + if inspect.isawaitable(result): + logger.error( + "@trace may not have wrapped %s correctly! " + "The function is not async but returned a %s.", + func.__qualname__, + type(result).__name__, + ) + + scope.__exit__(None, None, None) + + return result + + except Exception as e: + scope.__exit__(type(e), None, e.__traceback__) + raise + + return _wrapper # type: ignore[return-value] + + +def trace_with_opname( + opname: str, + *, + tracer: Optional["opentelemetry.trace.Tracer"] = None, +) -> Callable[[Callable[P, R]], Callable[P, R]]: + """ + Decorator to trace a function with a custom opname. + See the module's doc string for usage examples. + """ + # type-ignore: mypy bug, see https://github.com/python/mypy/issues/12909 + @contextlib.contextmanager # type: ignore[arg-type] + def _wrapping_logic( + func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + ) -> Generator[None, None, None]: + with start_active_span(opname, tracer=tracer): + yield + + def _decorator(func: Callable[P, R]) -> Callable[P, R]: + if not opentelemetry: + return func + + return _custom_sync_async_decorator(func, _wrapping_logic) + + return _decorator + + +def trace(func: Callable[P, R]) -> Callable[P, R]: + """ + Decorator to trace a function. + + Sets the operation name to that of the function's name. + + See the module's doc string for usage examples. + """ + + return trace_with_opname(func.__name__)(func) + + +def tag_args(func: Callable[P, R]) -> Callable[P, R]: + """ + Tags all of the args to the active span. + """ + + if not opentelemetry: + return func + + # type-ignore: mypy bug, see https://github.com/python/mypy/issues/12909 + @contextlib.contextmanager # type: ignore[arg-type] + def _wrapping_logic( + func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + ) -> Generator[None, None, None]: + argspec = inspect.getfullargspec(func) + # We use `[1:]` to skip the `self` object reference and `start=1` to + # make the index line up with `argspec.args`. + # + # FIXME: We could update this to handle any type of function by ignoring the + # first argument only if it's named `self` or `cls`. This isn't fool-proof + # but handles the idiomatic cases. + for i, arg in enumerate(args[1:], start=1): + set_attribute(SynapseTags.FUNC_ARG_PREFIX + argspec.args[i], str(arg)) + set_attribute(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :])) + set_attribute(SynapseTags.FUNC_KWARGS, str(kwargs)) + yield + + return _custom_sync_async_decorator(func, _wrapping_logic) + + +@contextlib.contextmanager +def trace_servlet( + request: "SynapseRequest", extract_context: bool = False +) -> Generator[None, None, None]: + """Returns a context manager which traces a request. It starts a span + with some servlet specific tags such as the request metrics name and + request information. + + Args: + request + extract_context: Whether to attempt to extract the tracing + context from the request the servlet is handling. + """ + + if opentelemetry is None: + yield # type: ignore[unreachable] + return + + attrs = { + SynapseTags.REQUEST_ID: request.get_request_id(), + SpanAttributes.HTTP_METHOD: request.get_method(), + SpanAttributes.HTTP_URL: request.get_redacted_uri(), + SpanAttributes.HTTP_HOST: request.getClientAddress().host, + } + + request_name = request.request_metrics.name + tracing_context = context_from_request(request) if extract_context else None + + # This is will end up being the root span for all of servlet traces and we + # aren't able to determine whether to force tracing yet. We can determine + # whether to force trace later in `synapse/api/auth.py`. + with start_active_span( + request_name, + kind=SpanKind.SERVER, + context=tracing_context, + attributes=attrs, + # we configure the span not to finish immediately on exiting the scope, + # and instead pass the span into the SynapseRequest (via + # `request.set_tracing_span(span)`), which will finish it once we've + # finished sending the response to the client. + end_on_exit=False, + ) as span: + if span: + request.set_tracing_span(span) + + inject_trace_id_into_response_headers(request.responseHeaders) + try: + yield + finally: + if span: + # We set the operation name again in case its changed (which happens + # with JsonResource). + span.update_name(request.request_metrics.name) + + if request.request_metrics.start_context.tag is not None: + span.set_attribute( + SynapseTags.REQUEST_TAG, + request.request_metrics.start_context.tag, + ) diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 9ea4e23b3107..1de06c385064 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -42,7 +42,7 @@ LoggingContext, PreserveLoggingContext, ) -from synapse.logging.opentracing import SynapseTags, start_active_span +from synapse.logging.tracing import SynapseTags, start_active_span from synapse.metrics._types import Collector if TYPE_CHECKING: @@ -210,7 +210,7 @@ def run_as_background_process( Args: desc: a description for this background process type func: a function, which may return a Deferred or a coroutine - bg_start_span: Whether to start an opentracing span. Defaults to True. + bg_start_span: Whether to start an tracing span. Defaults to True. Should only be disabled for processes that will not log to or tag a span. args: positional args for func @@ -234,10 +234,11 @@ async def run() -> Optional[R]: try: if bg_start_span: ctx = start_active_span( - f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)} + f"bgproc.{desc}", + attributes={SynapseTags.REQUEST_ID: str(context)}, ) else: - ctx = nullcontext() # type: ignore[assignment] + ctx = nullcontext() with ctx: return await func(*args, **kwargs) except Exception: diff --git a/synapse/notifier.py b/synapse/notifier.py index 26b97cf766c3..fe7d154659c4 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -39,7 +39,7 @@ from synapse.handlers.presence import format_user_presence_state from synapse.logging import issue9533_logger from synapse.logging.context import PreserveLoggingContext -from synapse.logging.opentracing import log_kv, start_active_span +from synapse.logging.tracing import log_kv, start_active_span from synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig from synapse.types import ( @@ -543,7 +543,7 @@ async def wait_for_events( log_kv( { "wait_for_events": "sleep", - "token": prev_token, + "token": str(prev_token), } ) @@ -553,7 +553,7 @@ async def wait_for_events( log_kv( { "wait_for_events": "woken", - "token": user_stream.current_token, + "token": str(user_stream.current_token), } ) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index b048b03a7477..3149492edea0 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -23,7 +23,7 @@ from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.logging import opentracing +from synapse.logging import tracing from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push import Pusher, PusherConfig, PusherConfigException from synapse.storage.databases.main.event_push_actions import HttpPushAction @@ -231,9 +231,9 @@ async def _unsafe_process(self) -> None: ) for push_action in unprocessed: - with opentracing.start_active_span( + with tracing.start_active_span( "http-push", - tags={ + attributes={ "authenticated_entity": self.user_id, "event_id": push_action.event_id, "app_id": self.app_id, diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 3f4d3fc51ae3..3f012f1b5ea3 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -28,8 +28,8 @@ from synapse.http import RequestTimedOutError from synapse.http.server import HttpServer from synapse.http.site import SynapseRequest -from synapse.logging import opentracing -from synapse.logging.opentracing import trace_with_opname +from synapse.logging import tracing +from synapse.logging.tracing import trace_with_opname from synapse.types import JsonDict from synapse.util.caches.response_cache import ResponseCache from synapse.util.cancellation import is_function_cancellable @@ -257,7 +257,9 @@ async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: # Add an authorization header, if configured. if replication_secret: headers[b"Authorization"] = [b"Bearer " + replication_secret] - opentracing.inject_header_dict(headers, check_destination=False) + tracing.inject_active_tracing_context_into_header_dict( + headers, check_destination=False + ) try: # Keep track of attempts made so we can bail if we don't manage to diff --git a/synapse/replication/tcp/external_cache.py b/synapse/replication/tcp/external_cache.py index a448dd7eb148..e928fded3644 100644 --- a/synapse/replication/tcp/external_cache.py +++ b/synapse/replication/tcp/external_cache.py @@ -17,7 +17,7 @@ from prometheus_client import Counter, Histogram -from synapse.logging import opentracing +from synapse.logging import tracing from synapse.logging.context import make_deferred_yieldable from synapse.util import json_decoder, json_encoder @@ -94,9 +94,9 @@ async def set(self, cache_name: str, key: str, value: Any, expiry_ms: int) -> No logger.debug("Caching %s %s: %r", cache_name, key, encoded_value) - with opentracing.start_active_span( + with tracing.start_active_span( "ExternalCache.set", - tags={opentracing.SynapseTags.CACHE_NAME: cache_name}, + attributes={tracing.SynapseTags.CACHE_NAME: cache_name}, ): with response_timer.labels("set").time(): return await make_deferred_yieldable( @@ -113,9 +113,9 @@ async def get(self, cache_name: str, key: str) -> Optional[Any]: if self._redis_connection is None: return None - with opentracing.start_active_span( + with tracing.start_active_span( "ExternalCache.get", - tags={opentracing.SynapseTags.CACHE_NAME: cache_name}, + attributes={tracing.SynapseTags.CACHE_NAME: cache_name}, ): with response_timer.labels("get").time(): result = await make_deferred_yieldable( diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index ee038c71921a..bb6a3ef9e57a 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -26,7 +26,7 @@ parse_string, ) from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import log_kv, set_tag +from synapse.logging.tracing import log_kv, set_attribute from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet from synapse.rest.client._base import client_patterns, interactive_auth_handler from synapse.types import JsonDict, StreamToken @@ -119,7 +119,7 @@ async def on_POST( user_id ) if dehydrated_device is not None and device_id != dehydrated_device[0]: - set_tag("error", True) + set_attribute("error", True) log_kv( { "message": "Client uploading keys for a different device", @@ -237,13 +237,13 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) from_token_string = parse_string(request, "from", required=True) - set_tag("from", from_token_string) + set_attribute("from", from_token_string) # We want to enforce they do pass us one, but we ignore it and return # changes after the "to" as well as before. # # XXX This does not enforce that "to" is passed. - set_tag("to", str(parse_string(request, "to"))) + set_attribute("to", str(parse_string(request, "to"))) from_token = await StreamToken.from_string(self.store, from_token_string) diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py index ad025c8a4529..8201f2bd86a4 100644 --- a/synapse/rest/client/knock.py +++ b/synapse/rest/client/knock.py @@ -24,7 +24,7 @@ parse_strings_from_args, ) from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import set_tag +from synapse.logging.tracing import set_attribute from synapse.rest.client.transactions import HttpTransactionCache from synapse.types import JsonDict, RoomAlias, RoomID @@ -97,7 +97,7 @@ async def on_POST( def on_PUT( self, request: SynapseRequest, room_identifier: str, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: - set_tag("txn_id", txn_id) + set_attribute("txn_id", txn_id) return self.txns.fetch_or_execute_request( request, self.on_POST, request, room_identifier, txn_id diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 91cb791139b1..cd9679ce1aef 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -51,7 +51,7 @@ ) from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.logging.opentracing import set_tag +from synapse.logging.tracing import set_attribute from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.client._base import client_patterns from synapse.rest.client.transactions import HttpTransactionCache @@ -153,7 +153,7 @@ def register(self, http_server: HttpServer) -> None: def on_PUT( self, request: SynapseRequest, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: - set_tag("txn_id", txn_id) + set_attribute("txn_id", txn_id) return self.txns.fetch_or_execute_request(request, self.on_POST, request) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: @@ -265,7 +265,7 @@ async def on_PUT( requester = await self.auth.get_user_by_req(request, allow_guest=True) if txn_id: - set_tag("txn_id", txn_id) + set_attribute("txn_id", txn_id) content = parse_json_object_from_request(request) @@ -308,7 +308,7 @@ async def on_PUT( except ShadowBanError: event_id = "$" + random_string(43) - set_tag("event_id", event_id) + set_attribute("event_id", event_id) ret = {"event_id": event_id} return 200, ret @@ -358,7 +358,7 @@ async def on_POST( except ShadowBanError: event_id = "$" + random_string(43) - set_tag("event_id", event_id) + set_attribute("event_id", event_id) return 200, {"event_id": event_id} def on_GET( @@ -369,7 +369,7 @@ def on_GET( def on_PUT( self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: - set_tag("txn_id", txn_id) + set_attribute("txn_id", txn_id) return self.txns.fetch_or_execute_request( request, self.on_POST, request, room_id, event_type, txn_id @@ -427,7 +427,7 @@ async def on_POST( def on_PUT( self, request: SynapseRequest, room_identifier: str, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: - set_tag("txn_id", txn_id) + set_attribute("txn_id", txn_id) return self.txns.fetch_or_execute_request( request, self.on_POST, request, room_identifier, txn_id @@ -915,7 +915,7 @@ async def on_POST( def on_PUT( self, request: SynapseRequest, room_id: str, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: - set_tag("txn_id", txn_id) + set_attribute("txn_id", txn_id) return self.txns.fetch_or_execute_request( request, self.on_POST, request, room_id, txn_id @@ -1018,7 +1018,7 @@ async def on_POST( def on_PUT( self, request: SynapseRequest, room_id: str, membership_action: str, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: - set_tag("txn_id", txn_id) + set_attribute("txn_id", txn_id) return self.txns.fetch_or_execute_request( request, self.on_POST, request, room_id, membership_action, txn_id @@ -1092,13 +1092,13 @@ async def on_POST( except ShadowBanError: event_id = "$" + random_string(43) - set_tag("event_id", event_id) + set_attribute("event_id", event_id) return 200, {"event_id": event_id} def on_PUT( self, request: SynapseRequest, room_id: str, event_id: str, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: - set_tag("txn_id", txn_id) + set_attribute("txn_id", txn_id) return self.txns.fetch_or_execute_request( request, self.on_POST, request, room_id, event_id, txn_id diff --git a/synapse/rest/client/sendtodevice.py b/synapse/rest/client/sendtodevice.py index 46a8b0382978..0df05ba52cf3 100644 --- a/synapse/rest/client/sendtodevice.py +++ b/synapse/rest/client/sendtodevice.py @@ -19,7 +19,7 @@ from synapse.http.server import HttpServer from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import set_tag +from synapse.logging.tracing import set_attribute from synapse.rest.client.transactions import HttpTransactionCache from synapse.types import JsonDict @@ -46,8 +46,8 @@ def __init__(self, hs: "HomeServer"): def on_PUT( self, request: SynapseRequest, message_type: str, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: - set_tag("message_type", message_type) - set_tag("txn_id", txn_id) + set_attribute("message_type", message_type) + set_attribute("txn_id", txn_id) return self.txns.fetch_or_execute_request( request, self._put, request, message_type, txn_id ) diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index f2013faeb206..a044593e31d8 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -37,7 +37,7 @@ from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import trace_with_opname +from synapse.logging.tracing import trace_with_opname from synapse.types import JsonDict, StreamToken from synapse.util import json_decoder diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 33ffef521b87..c893a4f0df7a 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -46,11 +46,12 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable -from synapse.logging.opentracing import ( +from synapse.logging.tracing import ( + Link, SynapseTags, - active_span, - set_tag, - start_active_span_follows_from, + get_active_span, + set_attribute, + start_active_span, trace, ) from synapse.metrics.background_process_metrics import run_as_background_process @@ -124,7 +125,7 @@ class _PersistEventsTask: """A batch of events to persist.""" - name: ClassVar[str] = "persist_event_batch" # used for opentracing + name: ClassVar[str] = "persist_event_batch" # used for tracing events_and_contexts: List[Tuple[EventBase, EventContext]] backfilled: bool @@ -145,7 +146,7 @@ def try_merge(self, task: "_EventPersistQueueTask") -> bool: class _UpdateCurrentStateTask: """A room whose current state needs recalculating.""" - name: ClassVar[str] = "update_current_state" # used for opentracing + name: ClassVar[str] = "update_current_state" # used for tracing def try_merge(self, task: "_EventPersistQueueTask") -> bool: """Deduplicates consecutive recalculations of current state.""" @@ -160,11 +161,11 @@ class _EventPersistQueueItem: task: _EventPersistQueueTask deferred: ObservableDeferred - parent_opentracing_span_contexts: List = attr.ib(factory=list) - """A list of opentracing spans waiting for this batch""" + parent_tracing_span_contexts: List = attr.ib(factory=list) + """A list of tracing spans waiting for this batch""" - opentracing_span_context: Any = None - """The opentracing span under which the persistence actually happened""" + tracing_span_context: Any = None + """The tracing span under which the persistence actually happened""" _PersistResult = TypeVar("_PersistResult") @@ -227,10 +228,10 @@ async def add_to_queue( ) queue.append(end_item) - # also add our active opentracing span to the item so that we get a link back - span = active_span() + # also add our active tracing span to the item so that we get a link back + span = get_active_span() if span: - end_item.parent_opentracing_span_contexts.append(span.context) + end_item.parent_tracing_span_contexts.append(span.get_span_context()) # start a processor for the queue, if there isn't one already self._handle_queue(room_id) @@ -238,9 +239,10 @@ async def add_to_queue( # wait for the queue item to complete res = await make_deferred_yieldable(end_item.deferred.observe()) - # add another opentracing span which links to the persist trace. - with start_active_span_follows_from( - f"{task.name}_complete", (end_item.opentracing_span_context,) + # add another tracing span which links to the persist trace. + with start_active_span( + f"{task.name}_complete", + links=[Link(end_item.tracing_span_context)], ): pass @@ -271,13 +273,15 @@ async def handle_queue_loop() -> None: queue = self._get_drainining_queue(room_id) for item in queue: try: - with start_active_span_follows_from( + with start_active_span( item.task.name, - item.parent_opentracing_span_contexts, - inherit_force_tracing=True, - ) as scope: - if scope: - item.opentracing_span_context = scope.span.context + links=[ + Link(span_context) + for span_context in item.parent_tracing_span_contexts + ], + ) as span: + if span: + item.tracing_span_context = span.get_span_context() ret = await self._per_item_callback(room_id, item.task) except Exception: @@ -391,15 +395,15 @@ async def persist_events( partitioned.setdefault(event.room_id, []).append((event, ctx)) event_ids.append(event.event_id) - set_tag( + set_attribute( SynapseTags.FUNC_ARG_PREFIX + "event_ids", str(event_ids), ) - set_tag( + set_attribute( SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", str(len(event_ids)), ) - set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled)) + set_attribute(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled)) async def enqueue( item: Tuple[str, List[Tuple[EventBase, EventContext]]] diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 2b31ce54bb75..263b26e3456e 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -29,7 +29,7 @@ from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.logging.opentracing import tag_args, trace +from synapse.logging.tracing import tag_args, trace from synapse.storage.roommember import ProfileInfo from synapse.storage.state import StateFilter from synapse.storage.util.partial_state_events_tracker import ( diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 0dc44b246cbb..174d18cc2a9b 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -47,7 +47,7 @@ from synapse.api.errors import StoreError from synapse.config.database import DatabaseConnectionConfig -from synapse.logging import opentracing +from synapse.logging import tracing from synapse.logging.context import ( LoggingContext, current_context, @@ -426,11 +426,11 @@ def _do_execute( start = time.time() try: - with opentracing.start_active_span( + with tracing.start_active_span( "db.query", - tags={ - opentracing.tags.DATABASE_TYPE: "sql", - opentracing.tags.DATABASE_STATEMENT: one_line_sql, + attributes={ + tracing.SpanAttributes.DB_SYSTEM: "sql", + tracing.SpanAttributes.DB_STATEMENT: one_line_sql, }, ): return func(sql, *args, **kwargs) @@ -700,15 +700,15 @@ def new_transaction( exception_callbacks=exception_callbacks, ) try: - with opentracing.start_active_span( + with tracing.start_active_span( "db.txn", - tags={ - opentracing.SynapseTags.DB_TXN_DESC: desc, - opentracing.SynapseTags.DB_TXN_ID: name, + attributes={ + tracing.SynapseTags.DB_TXN_DESC: desc, + tracing.SynapseTags.DB_TXN_ID: name, }, ): r = func(cursor, *args, **kwargs) - opentracing.log_kv({"message": "commit"}) + tracing.log_kv({"message": "commit"}) conn.commit() return r except self.engine.module.OperationalError as e: @@ -724,7 +724,7 @@ def new_transaction( if i < N: i += 1 try: - with opentracing.start_active_span("db.rollback"): + with tracing.start_active_span("db.rollback"): conn.rollback() except self.engine.module.Error as e1: transaction_logger.warning("[TXN EROLL] {%s} %s", name, e1) @@ -738,7 +738,7 @@ def new_transaction( if i < N: i += 1 try: - with opentracing.start_active_span("db.rollback"): + with tracing.start_active_span("db.rollback"): conn.rollback() except self.engine.module.Error as e1: transaction_logger.warning( @@ -844,7 +844,7 @@ async def _runInteraction() -> R: logger.warning("Starting db txn '%s' from sentinel context", desc) try: - with opentracing.start_active_span(f"db.{desc}"): + with tracing.start_active_span(f"db.{desc}"): result = await self.runWithConnection( self.new_transaction, desc, @@ -927,9 +927,7 @@ def inner_func(conn, *args, **kwargs): with LoggingContext( str(curr_context), parent_context=parent_context ) as context: - with opentracing.start_active_span( - operation_name="db.connection", - ): + with tracing.start_active_span("db.connection"): sched_duration_sec = monotonic_time() - start_time sql_scheduling_timer.observe(sched_duration_sec) context.add_database_scheduled(sched_duration_sec) @@ -943,15 +941,13 @@ def inner_func(conn, *args, **kwargs): "Reconnecting database connection over transaction limit" ) conn.reconnect() - opentracing.log_kv( - {"message": "reconnected due to txn limit"} - ) + tracing.log_kv({"message": "reconnected due to txn limit"}) self._txn_counters[tid] = 1 if self.engine.is_connection_closed(conn): logger.debug("Reconnecting closed database connection") conn.reconnect() - opentracing.log_kv({"message": "reconnected"}) + tracing.log_kv({"message": "reconnected"}) if self._txn_limit > 0: self._txn_counters[tid] = 1 diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 73c95ffb6f28..1503d74b1f77 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -27,7 +27,7 @@ ) from synapse.logging import issue9533_logger -from synapse.logging.opentracing import log_kv, set_tag, trace +from synapse.logging.tracing import log_kv, set_attribute, trace from synapse.replication.tcp.streams import ToDeviceStream from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( @@ -436,7 +436,7 @@ async def delete_messages_for_device( (user_id, device_id), None ) - set_tag("last_deleted_stream_id", str(last_deleted_stream_id)) + set_attribute("last_deleted_stream_id", str(last_deleted_stream_id)) if last_deleted_stream_id: has_changed = self._device_inbox_stream_cache.has_entity_changed( @@ -485,10 +485,10 @@ async def get_new_device_msgs_for_remote( A list of messages for the device and where in the stream the messages got to. """ - set_tag("destination", destination) - set_tag("last_stream_id", last_stream_id) - set_tag("current_stream_id", current_stream_id) - set_tag("limit", limit) + set_attribute("destination", destination) + set_attribute("last_stream_id", last_stream_id) + set_attribute("current_stream_id", current_stream_id) + set_attribute("limit", limit) has_changed = self._device_federation_outbox_stream_cache.has_entity_changed( destination, last_stream_id diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 57230df5aec5..360f204d16d6 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -29,11 +29,11 @@ from typing_extensions import Literal -from synapse.api.constants import EduTypes +from synapse.api.constants import EduTypes, EventContentFields from synapse.api.errors import Codes, StoreError -from synapse.logging.opentracing import ( +from synapse.logging.tracing import ( get_active_span_text_map, - set_tag, + set_attribute, trace, whitelisted_homeserver, ) @@ -394,12 +394,12 @@ async def get_device_updates_by_remote( # (user_id, device_id) entries into a map, with the value being # the max stream_id across each set of duplicate entries # - # maps (user_id, device_id) -> (stream_id, opentracing_context) + # maps (user_id, device_id) -> (stream_id,tracing_context) # - # opentracing_context contains the opentracing metadata for the request + # tracing_context contains the opentelemetry metadata for the request # that created the poke # - # The most recent request's opentracing_context is used as the + # The most recent request's tracing_context is used as the # context which created the Edu. # This is the stream ID that we will return for the consumer to resume @@ -462,8 +462,8 @@ async def get_device_updates_by_remote( if update_stream_id > previous_update_stream_id: # FIXME If this overwrites an older update, this discards the - # previous OpenTracing context. - # It might make it harder to track down issues using OpenTracing. + # previous tracing context. + # It might make it harder to track down issues using tracing. # If there's a good reason why it doesn't matter, a comment here # about that would not hurt. query_map[key] = (update_stream_id, update_context) @@ -529,11 +529,11 @@ def _get_device_updates_by_remote_txn( - user_id - device_id - stream_id - - opentracing_context + - tracing_context """ # get the list of device updates that need to be sent sql = """ - SELECT user_id, device_id, stream_id, opentracing_context FROM device_lists_outbound_pokes + SELECT user_id, device_id, stream_id, tracing_context FROM device_lists_outbound_pokes WHERE destination = ? AND ? < stream_id AND stream_id <= ? ORDER BY stream_id LIMIT ? @@ -554,7 +554,7 @@ async def _get_device_update_edus_by_remote( destination: The host the device updates are intended for from_stream_id: The minimum stream_id to filter updates by, exclusive query_map: Dictionary mapping (user_id, device_id) to - (update stream_id, the relevant json-encoded opentracing context) + (update stream_id, the relevant json-encoded tracing context) Returns: List of objects representing a device update EDU. @@ -592,7 +592,7 @@ async def _get_device_update_edus_by_remote( for device_id in device_ids: device = user_devices[device_id] - stream_id, opentracing_context = query_map[(user_id, device_id)] + stream_id, tracing_context = query_map[(user_id, device_id)] result = { "user_id": user_id, "device_id": device_id, @@ -600,8 +600,8 @@ async def _get_device_update_edus_by_remote( "stream_id": stream_id, } - if opentracing_context != "{}": - result["org.matrix.opentracing_context"] = opentracing_context + if tracing_context != "{}": + result[EventContentFields.TRACING_CONTEXT] = (tracing_context,) prev_id = stream_id @@ -769,8 +769,8 @@ async def get_user_devices_from_cache( else: results[user_id] = await self.get_cached_devices_for_user(user_id) - set_tag("in_cache", str(results)) - set_tag("not_in_cache", str(user_ids_not_in_cache)) + set_attribute("in_cache", str(results)) + set_attribute("not_in_cache", str(user_ids_not_in_cache)) return user_ids_not_in_cache, results @@ -1947,7 +1947,7 @@ def _add_device_outbound_poke_to_stream_txn( "device_id", "sent", "ts", - "opentracing_context", + "tracing_context", ), values=values, ) @@ -1990,7 +1990,7 @@ def _add_device_outbound_room_poke_txn( "room_id", "stream_id", "converted_to_destinations", - "opentracing_context", + "tracing_context", ), values=[ ( @@ -2014,11 +2014,11 @@ async def get_uncoverted_outbound_room_pokes( written to `device_lists_outbound_pokes`. Returns: - A list of user ID, device ID, room ID, stream ID and optional opentracing context. + A list of user ID, device ID, room ID, stream ID and optional opentelemetry context. """ sql = """ - SELECT user_id, device_id, room_id, stream_id, opentracing_context + SELECT user_id, device_id, room_id, stream_id, tracing_context FROM device_lists_changes_in_room WHERE NOT converted_to_destinations ORDER BY stream_id @@ -2036,9 +2036,9 @@ def get_uncoverted_outbound_room_pokes_txn( device_id, room_id, stream_id, - db_to_json(opentracing_context), + db_to_json(tracing_context), ) - for user_id, device_id, room_id, stream_id, opentracing_context in txn + for user_id, device_id, room_id, stream_id, tracing_context in txn ] return await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index 6240f9a75ed3..3eccbb5ee402 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -18,7 +18,7 @@ from typing_extensions import Literal, TypedDict from synapse.api.errors import StoreError -from synapse.logging.opentracing import log_kv, trace +from synapse.logging.tracing import log_kv, trace from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import LoggingTransaction from synapse.types import JsonDict, JsonSerializable, StreamKeyType diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index cf33e73e2b4f..1e25fda7eb20 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -36,7 +36,7 @@ TransactionOneTimeKeyCounts, TransactionUnusedFallbackKeys, ) -from synapse.logging.opentracing import log_kv, set_tag, trace +from synapse.logging.tracing import log_kv, set_attribute, trace from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( DatabasePool, @@ -153,7 +153,7 @@ async def get_e2e_device_keys_for_cs_api( key data. The key data will be a dict in the same format as the DeviceKeys type returned by POST /_matrix/client/r0/keys/query. """ - set_tag("query_list", str(query_list)) + set_attribute("query_list", str(query_list)) if not query_list: return {} @@ -239,8 +239,8 @@ async def get_e2e_device_keys_and_signatures( Dict mapping from user-id to dict mapping from device_id to key data. """ - set_tag("include_all_devices", include_all_devices) - set_tag("include_deleted_devices", include_deleted_devices) + set_attribute("include_all_devices", include_all_devices) + set_attribute("include_deleted_devices", include_deleted_devices) result = await self.db_pool.runInteraction( "get_e2e_device_keys", @@ -450,9 +450,9 @@ async def add_e2e_one_time_keys( """ def _add_e2e_one_time_keys(txn: LoggingTransaction) -> None: - set_tag("user_id", user_id) - set_tag("device_id", device_id) - set_tag("new_keys", str(new_keys)) + set_attribute("user_id", user_id) + set_attribute("device_id", device_id) + set_attribute("new_keys", str(new_keys)) # We are protected from race between lookup and insertion due to # a unique constraint. If there is a race of two calls to # `add_e2e_one_time_keys` then they'll conflict and we will only @@ -1192,10 +1192,10 @@ async def set_e2e_device_keys( """ def _set_e2e_device_keys_txn(txn: LoggingTransaction) -> bool: - set_tag("user_id", user_id) - set_tag("device_id", device_id) - set_tag("time_now", time_now) - set_tag("device_keys", str(device_keys)) + set_attribute("user_id", user_id) + set_attribute("device_id", device_id) + set_attribute("time_now", time_now) + set_attribute("device_keys", str(device_keys)) old_key_json = self.db_pool.simple_select_one_onecol_txn( txn, diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 309a4ba6643c..6bd982da89fe 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -34,7 +34,7 @@ from synapse.api.errors import StoreError from synapse.api.room_versions import EventFormatVersions, RoomVersion from synapse.events import EventBase, make_event_from_dict -from synapse.logging.opentracing import tag_args, trace +from synapse.logging.tracing import tag_args, trace from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index d68f127f9b7c..354ac21ced28 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -40,7 +40,7 @@ from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, relation_from_event from synapse.events.snapshot import EventContext -from synapse.logging.opentracing import trace +from synapse.logging.tracing import trace from synapse.storage._base import db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 01e935edef53..579fac021b69 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -54,7 +54,7 @@ current_context, make_deferred_yieldable, ) -from synapse.logging.opentracing import start_active_span, tag_args, trace +from synapse.logging.tracing import start_active_span, tag_args, trace from synapse.metrics.background_process_metrics import ( run_as_background_process, wrap_as_background_process, diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index af7bebee8030..32095d796944 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -23,7 +23,7 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.logging.opentracing import trace +from synapse.logging.tracing import trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index cc27ec38042b..fbf619dd8c4c 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -58,7 +58,7 @@ from synapse.api.filtering import Filter from synapse.events import EventBase from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.logging.opentracing import trace +from synapse.logging.tracing import trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 19dbf2da7fee..c15bdd1f1398 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -86,6 +86,8 @@ - Add indexes to various tables (`event_failed_pull_attempts`, `insertion_events`, `batch_events`) to make it easy to delete all associated rows when purging a room. - `inserted_ts` column is added to `event_push_actions_staging` table. + - Rename column in `device_lists_outbound_pokes` and `device_lists_changes_in_room` + from `opentracing_context` to generalized `tracing_context`. """ diff --git a/synapse/storage/schema/main/delta/73/02rename_opentelemtetry_tracing_context.sql b/synapse/storage/schema/main/delta/73/02rename_opentelemtetry_tracing_context.sql new file mode 100644 index 000000000000..ae904863f8bf --- /dev/null +++ b/synapse/storage/schema/main/delta/73/02rename_opentelemtetry_tracing_context.sql @@ -0,0 +1,18 @@ +/* Copyright 2021 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Rename to generalized `tracing_context` since we're moving from opentracing to opentelemetry +ALTER TABLE device_lists_outbound_pokes RENAME COLUMN opentracing_context TO tracing_context; +ALTER TABLE device_lists_changes_in_room RENAME COLUMN opentracing_context TO tracing_context; diff --git a/synapse/storage/util/partial_state_events_tracker.py b/synapse/storage/util/partial_state_events_tracker.py index 8d8894d1d585..9bdb68c0c17b 100644 --- a/synapse/storage/util/partial_state_events_tracker.py +++ b/synapse/storage/util/partial_state_events_tracker.py @@ -20,7 +20,7 @@ from twisted.internet.defer import Deferred from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable -from synapse.logging.opentracing import trace_with_opname +from synapse.logging.tracing import trace_with_opname from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.room import RoomWorkerStore from synapse.util import unwrapFirstError diff --git a/synapse/streams/events.py b/synapse/streams/events.py index f331e1af16e0..30e8d96c6f8a 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -21,7 +21,7 @@ from synapse.handlers.receipts import ReceiptEventSource from synapse.handlers.room import RoomEventSource from synapse.handlers.typing import TypingNotificationEventSource -from synapse.logging.opentracing import trace +from synapse.logging.tracing import trace from synapse.streams import EventSource from synapse.types import StreamToken diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index a3eb5f741bfc..1dd2d3e62e3c 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -29,11 +29,7 @@ from twisted.internet import defer from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.logging.opentracing import ( - active_span, - start_active_span, - start_active_span_follows_from, -) +from synapse.logging.tracing import Link, get_active_span, start_active_span from synapse.util import Clock from synapse.util.async_helpers import AbstractObservableDeferred, ObservableDeferred from synapse.util.caches import register_cache @@ -41,7 +37,7 @@ logger = logging.getLogger(__name__) if TYPE_CHECKING: - import opentracing + import opentelemetry # the type of the key in the cache KV = TypeVar("KV") @@ -82,8 +78,8 @@ class ResponseCacheEntry: easier to cache Failure results. """ - opentracing_span_context: "Optional[opentracing.SpanContext]" - """The opentracing span which generated/is generating the result""" + tracing_span_context: Optional["opentelemetry.trace.SpanContext"] + """The tracing span which generated/is generating the result""" class ResponseCache(Generic[KV]): @@ -141,7 +137,7 @@ def _set( self, context: ResponseCacheContext[KV], deferred: "defer.Deferred[RV]", - opentracing_span_context: "Optional[opentracing.SpanContext]", + tracing_span_context: Optional["opentelemetry.trace.SpanContext"], ) -> ResponseCacheEntry: """Set the entry for the given key to the given deferred. @@ -152,14 +148,14 @@ def _set( Args: context: Information about the cache miss deferred: The deferred which resolves to the result. - opentracing_span_context: An opentracing span wrapping the calculation + tracing_span_context: An tracing span wrapping the calculation Returns: The cache entry object. """ result = ObservableDeferred(deferred, consumeErrors=True) key = context.cache_key - entry = ResponseCacheEntry(result, opentracing_span_context) + entry = ResponseCacheEntry(result, tracing_span_context) self._result_cache[key] = entry def on_complete(r: RV) -> RV: @@ -234,15 +230,15 @@ async def handle_request(request): if cache_context: kwargs["cache_context"] = context - span_context: Optional[opentracing.SpanContext] = None + span_context: Optional["opentelemetry.trace.SpanContext"] = None async def cb() -> RV: # NB it is important that we do not `await` before setting span_context! nonlocal span_context with start_active_span(f"ResponseCache[{self._name}].calculate"): - span = active_span() + span = get_active_span() if span: - span_context = span.context + span_context = span.get_span_context() return await callback(*args, **kwargs) d = run_in_background(cb) @@ -257,9 +253,9 @@ async def cb() -> RV: "[%s]: using incomplete cached result for [%s]", self._name, key ) - span_context = entry.opentracing_span_context - with start_active_span_follows_from( + span_context = entry.tracing_span_context + with start_active_span( f"ResponseCache[{self._name}].wait", - contexts=(span_context,) if span_context else (), + links=[Link(span_context)] if span_context else None, ): return await make_deferred_yieldable(result) diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 2aceb1a47fb6..98f05fc79243 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -42,7 +42,7 @@ make_deferred_yieldable, run_in_background, ) -from synapse.logging.opentracing import start_active_span +from synapse.logging.tracing import start_active_span from synapse.metrics import Histogram, LaterGauge from synapse.util import Clock diff --git a/synapse/visibility.py b/synapse/visibility.py index b443857571b7..fa307211f589 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -23,7 +23,7 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.events.utils import prune_event -from synapse.logging.opentracing import trace +from synapse.logging.tracing import trace from synapse.storage.controllers import StorageControllers from synapse.storage.databases.main import DataStore from synapse.storage.state import StateFilter diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py deleted file mode 100644 index 0917e478a5b0..000000000000 --- a/tests/logging/test_opentracing.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright 2022 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import cast - -from twisted.internet import defer -from twisted.test.proto_helpers import MemoryReactorClock - -from synapse.logging.context import ( - LoggingContext, - make_deferred_yieldable, - run_in_background, -) -from synapse.logging.opentracing import ( - start_active_span, - start_active_span_follows_from, - tag_args, - trace_with_opname, -) -from synapse.util import Clock - -try: - from synapse.logging.scopecontextmanager import LogContextScopeManager -except ImportError: - LogContextScopeManager = None # type: ignore - -try: - import jaeger_client -except ImportError: - jaeger_client = None # type: ignore - -import logging - -from tests.unittest import TestCase - -logger = logging.getLogger(__name__) - - -class LogContextScopeManagerTestCase(TestCase): - """ - Test logging contexts and active opentracing spans. - - There's casts throughout this from generic opentracing objects (e.g. - opentracing.Span) to the ones specific to Jaeger since they have additional - properties that these tests depend on. This is safe since the only supported - opentracing backend is Jaeger. - """ - - if LogContextScopeManager is None: - skip = "Requires opentracing" # type: ignore[unreachable] - if jaeger_client is None: - skip = "Requires jaeger_client" # type: ignore[unreachable] - - def setUp(self) -> None: - # since this is a unit test, we don't really want to mess around with the - # global variables that power opentracing. We create our own tracer instance - # and test with it. - - scope_manager = LogContextScopeManager() - config = jaeger_client.config.Config( - config={}, service_name="test", scope_manager=scope_manager - ) - - self._reporter = jaeger_client.reporter.InMemoryReporter() - - self._tracer = config.create_tracer( - sampler=jaeger_client.ConstSampler(True), - reporter=self._reporter, - ) - - def test_start_active_span(self) -> None: - # the scope manager assumes a logging context of some sort. - with LoggingContext("root context"): - self.assertIsNone(self._tracer.active_span) - - # start_active_span should start and activate a span. - scope = start_active_span("span", tracer=self._tracer) - span = cast(jaeger_client.Span, scope.span) - self.assertEqual(self._tracer.active_span, span) - self.assertIsNotNone(span.start_time) - - # entering the context doesn't actually do a whole lot. - with scope as ctx: - self.assertIs(ctx, scope) - self.assertEqual(self._tracer.active_span, span) - - # ... but leaving it unsets the active span, and finishes the span. - self.assertIsNone(self._tracer.active_span) - self.assertIsNotNone(span.end_time) - - # the span should have been reported - self.assertEqual(self._reporter.get_spans(), [span]) - - def test_nested_spans(self) -> None: - """Starting two spans off inside each other should work""" - - with LoggingContext("root context"): - with start_active_span("root span", tracer=self._tracer) as root_scope: - self.assertEqual(self._tracer.active_span, root_scope.span) - root_context = cast(jaeger_client.SpanContext, root_scope.span.context) - - scope1 = start_active_span( - "child1", - tracer=self._tracer, - ) - self.assertEqual( - self._tracer.active_span, scope1.span, "child1 was not activated" - ) - context1 = cast(jaeger_client.SpanContext, scope1.span.context) - self.assertEqual(context1.parent_id, root_context.span_id) - - scope2 = start_active_span_follows_from( - "child2", - contexts=(scope1,), - tracer=self._tracer, - ) - self.assertEqual(self._tracer.active_span, scope2.span) - context2 = cast(jaeger_client.SpanContext, scope2.span.context) - self.assertEqual(context2.parent_id, context1.span_id) - - with scope1, scope2: - pass - - # the root scope should be restored - self.assertEqual(self._tracer.active_span, root_scope.span) - span2 = cast(jaeger_client.Span, scope2.span) - span1 = cast(jaeger_client.Span, scope1.span) - self.assertIsNotNone(span2.end_time) - self.assertIsNotNone(span1.end_time) - - self.assertIsNone(self._tracer.active_span) - - # the spans should be reported in order of their finishing. - self.assertEqual( - self._reporter.get_spans(), [scope2.span, scope1.span, root_scope.span] - ) - - def test_overlapping_spans(self) -> None: - """Overlapping spans which are not neatly nested should work""" - reactor = MemoryReactorClock() - clock = Clock(reactor) - - scopes = [] - - async def task(i: int): - scope = start_active_span( - f"task{i}", - tracer=self._tracer, - ) - scopes.append(scope) - - self.assertEqual(self._tracer.active_span, scope.span) - await clock.sleep(4) - self.assertEqual(self._tracer.active_span, scope.span) - scope.close() - - async def root(): - with start_active_span("root span", tracer=self._tracer) as root_scope: - self.assertEqual(self._tracer.active_span, root_scope.span) - scopes.append(root_scope) - - d1 = run_in_background(task, 1) - await clock.sleep(2) - d2 = run_in_background(task, 2) - - # because we did run_in_background, the active span should still be the - # root. - self.assertEqual(self._tracer.active_span, root_scope.span) - - await make_deferred_yieldable( - defer.gatherResults([d1, d2], consumeErrors=True) - ) - - self.assertEqual(self._tracer.active_span, root_scope.span) - - with LoggingContext("root context"): - # start the test off - d1 = defer.ensureDeferred(root()) - - # let the tasks complete - reactor.pump((2,) * 8) - - self.successResultOf(d1) - self.assertIsNone(self._tracer.active_span) - - # the spans should be reported in order of their finishing: task 1, task 2, - # root. - self.assertEqual( - self._reporter.get_spans(), - [scopes[1].span, scopes[2].span, scopes[0].span], - ) - - def test_trace_decorator_sync(self) -> None: - """ - Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` - with sync functions - """ - with LoggingContext("root context"): - - @trace_with_opname("fixture_sync_func", tracer=self._tracer) - @tag_args - def fixture_sync_func() -> str: - return "foo" - - result = fixture_sync_func() - self.assertEqual(result, "foo") - - # the span should have been reported - self.assertEqual( - [span.operation_name for span in self._reporter.get_spans()], - ["fixture_sync_func"], - ) - - def test_trace_decorator_deferred(self) -> None: - """ - Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` - with functions that return deferreds - """ - reactor = MemoryReactorClock() - - with LoggingContext("root context"): - - @trace_with_opname("fixture_deferred_func", tracer=self._tracer) - @tag_args - def fixture_deferred_func() -> "defer.Deferred[str]": - d1: defer.Deferred[str] = defer.Deferred() - d1.callback("foo") - return d1 - - result_d1 = fixture_deferred_func() - - # let the tasks complete - reactor.pump((2,) * 8) - - self.assertEqual(self.successResultOf(result_d1), "foo") - - # the span should have been reported - self.assertEqual( - [span.operation_name for span in self._reporter.get_spans()], - ["fixture_deferred_func"], - ) - - def test_trace_decorator_async(self) -> None: - """ - Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` - with async functions - """ - reactor = MemoryReactorClock() - - with LoggingContext("root context"): - - @trace_with_opname("fixture_async_func", tracer=self._tracer) - @tag_args - async def fixture_async_func() -> str: - return "foo" - - d1 = defer.ensureDeferred(fixture_async_func()) - - # let the tasks complete - reactor.pump((2,) * 8) - - self.assertEqual(self.successResultOf(d1), "foo") - - # the span should have been reported - self.assertEqual( - [span.operation_name for span in self._reporter.get_spans()], - ["fixture_async_func"], - ) diff --git a/tests/logging/test_tracing.py b/tests/logging/test_tracing.py new file mode 100644 index 000000000000..337796bccfd8 --- /dev/null +++ b/tests/logging/test_tracing.py @@ -0,0 +1,254 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactorClock + +from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.logging.tracing import start_active_span, tag_args, trace_with_opname +from synapse.util import Clock + +from tests.unittest import TestCase + +try: + import opentelemetry + import opentelemetry.sdk.trace + import opentelemetry.sdk.trace.export + import opentelemetry.sdk.trace.export.in_memory_span_exporter + import opentelemetry.trace + import opentelemetry.trace.propagation +except ImportError: + opentelemetry = None # type: ignore[assignment] + + +class TracingTestCase(TestCase): + """ + Test logging contexts and active opentelemetry spans. + """ + + if opentelemetry is None: + skip = "Requires opentelemetry" # type: ignore[unreachable] + + def setUp(self) -> None: + # since this is a unit test, we don't really want to mess around with the + # global variables that power opentelemetry. We create our own tracer instance + # and test with it. + + self._tracer_provider = opentelemetry.sdk.trace.TracerProvider() + + self._exporter = ( + opentelemetry.sdk.trace.export.in_memory_span_exporter.InMemorySpanExporter() + ) + processor = opentelemetry.sdk.trace.export.SimpleSpanProcessor(self._exporter) + self._tracer_provider.add_span_processor(processor) + + self._tracer = self._tracer_provider.get_tracer(__name__) + + def test_start_active_span(self) -> None: + # This means no current span + self.assertEqual( + opentelemetry.trace.get_current_span(), opentelemetry.trace.INVALID_SPAN + ) + + # start_active_span should start and activate a span. + with start_active_span("new-span", tracer=self._tracer) as span: + self.assertEqual(opentelemetry.trace.get_current_span(), span) + + # ... but leaving it unsets the active span, and finishes the span. + self.assertEqual( + opentelemetry.trace.get_current_span(), opentelemetry.trace.INVALID_SPAN + ) + + # the span should have been reported + self.assertListEqual( + [span.name for span in self._exporter.get_finished_spans()], ["new-span"] + ) + + def test_nested_spans(self) -> None: + """Starting two spans off inside each other should work""" + with start_active_span("root_span", tracer=self._tracer) as root_span: + self.assertEqual(opentelemetry.trace.get_current_span(), root_span) + + with start_active_span( + "child_span1", + tracer=self._tracer, + ) as child_span1: + self.assertEqual( + opentelemetry.trace.get_current_span(), + child_span1, + "child_span1 was not activated", + ) + + with start_active_span( + "child_span2", + tracer=self._tracer, + ) as child_span2: + self.assertEqual( + opentelemetry.trace.get_current_span(), child_span2 + ) + + # the root scope should be restored + self.assertEqual(opentelemetry.trace.get_current_span(), root_span) + + # Active span is unset now that we're outside of the `with` scopes + self.assertEqual( + opentelemetry.trace.get_current_span(), opentelemetry.trace.INVALID_SPAN + ) + + # the spans should be reported in order of their finishing. + self.assertListEqual( + [span.name for span in self._exporter.get_finished_spans()], + ["child_span2", "child_span1", "root_span"], + ) + + def test_side_by_side_spans(self) -> None: + with start_active_span("span1", tracer=self._tracer), start_active_span( + "span2", tracer=self._tracer + ) as span2: + # We expect the last span in `with` list to be active + self.assertEqual(opentelemetry.trace.get_current_span(), span2) + + # Active span is unset now that we're outside of the `with` scopes + self.assertEqual( + opentelemetry.trace.get_current_span(), opentelemetry.trace.INVALID_SPAN + ) + + # the spans should be reported in order of their finishing. + self.assertListEqual( + [span.name for span in self._exporter.get_finished_spans()], + ["span2", "span1"], + ) + + def test_overlapping_spans(self) -> None: + """Overlapping spans which are not neatly nested should work""" + reactor = MemoryReactorClock() + clock = Clock(reactor) + + async def task(i: int): + with start_active_span( + f"task{i}", + tracer=self._tracer, + ) as span: + self.assertEqual(opentelemetry.trace.get_current_span(), span) + await clock.sleep(4) + self.assertEqual(opentelemetry.trace.get_current_span(), span) + + async def root(): + with start_active_span("root_span", tracer=self._tracer) as root_span: + self.assertEqual(opentelemetry.trace.get_current_span(), root_span) + + d1 = run_in_background(task, 1) + await clock.sleep(2) + d2 = run_in_background(task, 2) + + # because we did run_in_background, the active span should still be the + # root. + self.assertEqual(opentelemetry.trace.get_current_span(), root_span) + + await make_deferred_yieldable( + defer.gatherResults([d1, d2], consumeErrors=True) + ) + + self.assertEqual(opentelemetry.trace.get_current_span(), root_span) + + # start the test off + root_defferred = defer.ensureDeferred(root()) + + # let the tasks complete + reactor.pump((2,) * 8) + + self.successResultOf(root_defferred) + # Active span is unset now that we're outside of the `with` scopes + self.assertEqual( + opentelemetry.trace.get_current_span(), opentelemetry.trace.INVALID_SPAN + ) + + # the spans should be reported in order of their finishing: task 1, task 2, + # root. + self.assertListEqual( + [span.name for span in self._exporter.get_finished_spans()], + ["task1", "task2", "root_span"], + ) + + def test_trace_decorator_sync(self) -> None: + """ + Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` + with sync functions + """ + + @trace_with_opname("fixture_sync_func", tracer=self._tracer) + @tag_args + def fixture_sync_func() -> str: + return "foo" + + result = fixture_sync_func() + self.assertEqual(result, "foo") + + # the span should have been reported + self.assertEqual( + [span.name for span in self._exporter.get_finished_spans()], + ["fixture_sync_func"], + ) + + def test_trace_decorator_deferred(self) -> None: + """ + Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` + with functions that return deferreds + """ + reactor = MemoryReactorClock() + + @trace_with_opname("fixture_deferred_func", tracer=self._tracer) + @tag_args + def fixture_deferred_func() -> "defer.Deferred[str]": + d1: defer.Deferred[str] = defer.Deferred() + d1.callback("foo") + return d1 + + result_d1 = fixture_deferred_func() + + # let the tasks complete + reactor.pump((2,) * 8) + + self.assertEqual(self.successResultOf(result_d1), "foo") + + # the span should have been reported + self.assertEqual( + [span.name for span in self._exporter.get_finished_spans()], + ["fixture_deferred_func"], + ) + + def test_trace_decorator_async(self) -> None: + """ + Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` + with async functions + """ + reactor = MemoryReactorClock() + + @trace_with_opname("fixture_async_func", tracer=self._tracer) + @tag_args + async def fixture_async_func() -> str: + return "foo" + + d1 = defer.ensureDeferred(fixture_async_func()) + + # let the tasks complete + reactor.pump((2,) * 8) + + self.assertEqual(self.successResultOf(d1), "foo") + + # the span should have been reported + self.assertEqual( + [span.name for span in self._exporter.get_finished_spans()], + ["fixture_async_func"], + )