diff --git a/packages/opentelemetry-instrumentation-alephalpha/poetry.lock b/packages/opentelemetry-instrumentation-alephalpha/poetry.lock index 00a759e937..a57965adbe 100644 --- a/packages/opentelemetry-instrumentation-alephalpha/poetry.lock +++ b/packages/opentelemetry-instrumentation-alephalpha/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiodns" @@ -880,13 +880,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1759,4 +1759,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "432724668b04a7ea6c42da308d6f0ba5035ec6415364ab4d54abe71aa28e53dc" +content-hash = "cace922b53cd81b6c467e4bb1fcda89a9880686c8e7905dba1e50751b829abce" diff --git a/packages/opentelemetry-instrumentation-alephalpha/pyproject.toml b/packages/opentelemetry-instrumentation-alephalpha/pyproject.toml index 7e4254e11a..8f97d0dfb5 100644 --- a/packages/opentelemetry-instrumentation-alephalpha/pyproject.toml +++ b/packages/opentelemetry-instrumentation-alephalpha/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py index ec1955be07..6456750a17 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py @@ -16,8 +16,10 @@ build_from_streaming_response, ) from opentelemetry.instrumentation.anthropic.utils import ( + acount_prompt_tokens_from_request, dont_throw, error_metrics_attributes, + count_prompt_tokens_from_request, set_span_attribute, shared_metrics_attributes, should_send_prompts, @@ -59,6 +61,18 @@ "method": "stream", "span_name": "anthropic.chat", }, + { + "package": "anthropic.resources.beta.prompt_caching.messages", + "object": "Messages", + "method": "create", + "span_name": "anthropic.chat", + }, + { + "package": "anthropic.resources.beta.prompt_caching.messages", + "object": "Messages", + "method": "stream", + "span_name": "anthropic.chat", + }, ] WRAPPED_AMETHODS = [ { @@ -79,6 +93,18 @@ "method": "stream", "span_name": "anthropic.chat", }, + { + "package": "anthropic.resources.beta.prompt_caching.messages", + "object": "AsyncMessages", + "method": "create", + "span_name": "anthropic.chat", + }, + { + "package": "anthropic.resources.beta.prompt_caching.messages", + "object": "AsyncMessages", + "method": "stream", + "span_name": "anthropic.chat", + }, ] @@ -102,6 +128,11 @@ async def _dump_content(message_index, content, span): if isinstance(content, str): return content elif isinstance(content, list): + # If the content is a list of text blocks, concatenate them. + # This is more commonly used in prompt caching. + if all([item.get("type") == "text" for item in content]): + return "".join([item.get("text") for item in content]) + content = [ ( await _process_image_item( @@ -220,27 +251,23 @@ async def _aset_token_usage( if usage := response.get("usage"): prompt_tokens = usage.input_tokens else: - prompt_tokens = 0 - if hasattr(anthropic, "count_tokens"): - if request.get("prompt"): - prompt_tokens = await anthropic.count_tokens(request.get("prompt")) - elif request.get("messages"): - prompt_tokens = 0 - for m in request.get("messages"): - content = m.get("content") - if isinstance(content, str): - prompt_tokens += await anthropic.count_tokens(content) - elif isinstance(content, list): - for item in content: - # TODO: handle image tokens - if isinstance(item, dict) and item.get("type") == "text": - prompt_tokens += await anthropic.count_tokens( - item.get("text", "") - ) - - if token_histogram and type(prompt_tokens) is int and prompt_tokens >= 0: + prompt_tokens = await acount_prompt_tokens_from_request(anthropic, request) + + if usage := response.get("usage"): + cache_read_tokens = dict(usage).get("cache_read_input_tokens", 0) + else: + cache_read_tokens = 0 + + if usage := response.get("usage"): + cache_creation_tokens = dict(usage).get("cache_creation_input_tokens", 0) + else: + cache_creation_tokens = 0 + + input_tokens = prompt_tokens + cache_read_tokens + cache_creation_tokens + + if token_histogram and type(input_tokens) is int and input_tokens >= 0: token_histogram.record( - prompt_tokens, + input_tokens, attributes={ **metric_attributes, SpanAttributes.LLM_TOKEN_TYPE: "input", @@ -268,7 +295,7 @@ async def _aset_token_usage( }, ) - total_tokens = prompt_tokens + completion_tokens + total_tokens = input_tokens + completion_tokens choices = 0 if type(response.get("content")) is list: @@ -285,12 +312,19 @@ async def _aset_token_usage( }, ) - set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens) + set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, input_tokens) set_span_attribute( span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens ) set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens) + set_span_attribute( + span, SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens + ) + set_span_attribute( + span, SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS, cache_creation_tokens + ) + @dont_throw def _set_token_usage( @@ -308,27 +342,23 @@ def _set_token_usage( if usage := response.get("usage"): prompt_tokens = usage.input_tokens else: - prompt_tokens = 0 - if hasattr(anthropic, "count_tokens"): - if request.get("prompt"): - prompt_tokens = anthropic.count_tokens(request.get("prompt")) - elif request.get("messages"): - prompt_tokens = 0 - for m in request.get("messages"): - content = m.get("content") - if isinstance(content, str): - prompt_tokens += anthropic.count_tokens(content) - elif isinstance(content, list): - for item in content: - # TODO: handle image tokens - if isinstance(item, dict) and item.get("type") == "text": - prompt_tokens += anthropic.count_tokens( - item.get("text", "") - ) - - if token_histogram and type(prompt_tokens) is int and prompt_tokens >= 0: + prompt_tokens = count_prompt_tokens_from_request(anthropic, request) + + if usage := response.get("usage"): + cache_read_tokens = dict(usage).get("cache_read_input_tokens", 0) + else: + cache_read_tokens = 0 + + if usage := response.get("usage"): + cache_creation_tokens = dict(usage).get("cache_creation_input_tokens", 0) + else: + cache_creation_tokens = 0 + + input_tokens = prompt_tokens + cache_read_tokens + cache_creation_tokens + + if token_histogram and type(input_tokens) is int and input_tokens >= 0: token_histogram.record( - prompt_tokens, + input_tokens, attributes={ **metric_attributes, SpanAttributes.LLM_TOKEN_TYPE: "input", @@ -354,7 +384,7 @@ def _set_token_usage( }, ) - total_tokens = prompt_tokens + completion_tokens + total_tokens = input_tokens + completion_tokens choices = 0 if type(response.get("content")) is list: @@ -371,12 +401,19 @@ def _set_token_usage( }, ) - set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens) + set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, input_tokens) set_span_attribute( span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens ) set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens) + set_span_attribute( + span, SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens + ) + set_span_attribute( + span, SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS, cache_creation_tokens + ) + @dont_throw def _set_response_attributes(span, response): @@ -687,7 +724,7 @@ def __init__( get_common_metrics_attributes: Callable[[], dict] = lambda: {}, upload_base64_image: Optional[ Callable[[str, str, str, str], Coroutine[None, None, str]] - ] = lambda *args: "", + ] = None, ): super().__init__() Config.exception_logger = exception_logger @@ -771,8 +808,9 @@ def _uninstrument(self, **kwargs): wrapped_method.get("method"), ) for wrapped_method in WRAPPED_AMETHODS: + wrap_package = wrapped_method.get("package") wrap_object = wrapped_method.get("object") unwrap( - f"anthropic.resources.completions.{wrap_object}", + f"{wrap_package}.{wrap_object}", wrapped_method.get("method"), ) diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/streaming.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/streaming.py index 5764822a97..011d722b80 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/streaming.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/streaming.py @@ -5,6 +5,7 @@ from opentelemetry.instrumentation.anthropic.utils import ( dont_throw, error_metrics_attributes, + count_prompt_tokens_from_request, set_span_attribute, shared_metrics_attributes, should_send_prompts, @@ -20,7 +21,7 @@ def _process_response_item(item, complete_response): if item.type == "message_start": complete_response["model"] = item.message.model - complete_response["usage"] = item.message.usage + complete_response["usage"] = dict(item.message.usage) elif item.type == "content_block_start": index = item.index if len(complete_response.get("events")) <= index: @@ -31,6 +32,13 @@ def _process_response_item(item, complete_response): elif item.type == "message_delta": for event in complete_response.get("events", []): event["finish_reason"] = item.delta.stop_reason + if item.usage: + if "usage" in complete_response: + item_output_tokens = dict(item.usage).get("output_tokens", 0) + existing_output_tokens = complete_response["usage"].get("output_tokens", 0) + complete_response["usage"]["output_tokens"] = item_output_tokens + existing_output_tokens + else: + complete_response["usage"] = dict(item.usage) def _set_token_usage( @@ -42,8 +50,13 @@ def _set_token_usage( token_histogram: Histogram = None, choice_counter: Counter = None, ): - total_tokens = prompt_tokens + completion_tokens - set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens) + cache_read_tokens = complete_response.get("usage", {}).get("cache_read_input_tokens", 0) + cache_creation_tokens = complete_response.get("usage", {}).get("cache_creation_input_tokens", 0) + + input_tokens = prompt_tokens + cache_read_tokens + cache_creation_tokens + total_tokens = input_tokens + completion_tokens + + set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, input_tokens) set_span_attribute( span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens ) @@ -52,10 +65,16 @@ def _set_token_usage( set_span_attribute( span, SpanAttributes.LLM_RESPONSE_MODEL, complete_response.get("model") ) + set_span_attribute( + span, SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens + ) + set_span_attribute( + span, SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS, cache_creation_tokens + ) - if token_histogram and type(prompt_tokens) is int and prompt_tokens >= 0: + if token_histogram and type(input_tokens) is int and input_tokens >= 0: token_histogram.record( - prompt_tokens, + input_tokens, attributes={ **metric_attributes, SpanAttributes.LLM_TOKEN_TYPE: "input", @@ -135,30 +154,26 @@ def build_from_streaming_response( # calculate token usage if Config.enrich_token_usage: try: - prompt_tokens = -1 completion_tokens = -1 - # prompt_usage - if kwargs.get("prompt"): - prompt_tokens = instance.count_tokens(kwargs.get("prompt")) - elif kwargs.get("messages"): - prompt_tokens = sum( - [ - instance.count_tokens(m.get("content")) - for m in kwargs.get("messages") - ] - ) + if usage := complete_response.get("usage"): + prompt_tokens = usage.get("input_tokens", 0) + else: + prompt_tokens = count_prompt_tokens_from_request(instance, kwargs) # completion_usage - completion_content = "" - if complete_response.get("events"): - model_name = complete_response.get("model") or None - for event in complete_response.get("events"): # type: dict - if event.get("text"): - completion_content += event.get("text") - - if model_name: - completion_tokens = instance.count_tokens(completion_content) + if usage := complete_response.get("usage"): + completion_tokens = usage.get("output_tokens", 0) + else: + completion_content = "" + if complete_response.get("events"): + model_name = complete_response.get("model") or None + for event in complete_response.get("events"): # type: dict + if event.get("text"): + completion_content += event.get("text") + + if model_name: + completion_tokens = instance.count_tokens(completion_content) _set_token_usage( span, @@ -170,7 +185,7 @@ def build_from_streaming_response( choice_counter, ) except Exception as e: - logger.warning("Failed to set token usage, error: %s", str(e)) + logger.warning("Failed to set token usage, error: %s", e) if should_send_prompts(): _set_completions(span, complete_response.get("events")) @@ -191,7 +206,7 @@ async def abuild_from_streaming_response( exception_counter: Counter = None, kwargs: dict = {}, ): - complete_response = {"events": [], "model": ""} + complete_response = {"events": [], "model": "", "usage": {}} async for item in response: try: yield item @@ -214,30 +229,25 @@ async def abuild_from_streaming_response( # calculate token usage if Config.enrich_token_usage: try: - prompt_tokens = -1 - completion_tokens = -1 - # prompt_usage - if kwargs.get("prompt"): - prompt_tokens = await instance.count_tokens(kwargs.get("prompt")) - elif kwargs.get("messages"): - prompt_tokens = sum( - [ - await instance.count_tokens(m.get("content")) - for m in kwargs.get("messages") - ] - ) + if usage := complete_response.get("usage"): + prompt_tokens = usage.get("input_tokens", 0) + else: + prompt_tokens = count_prompt_tokens_from_request(instance, kwargs) # completion_usage - completion_content = "" - if complete_response.get("events"): - model_name = complete_response.get("model") or None - for event in complete_response.get("events"): # type: dict - if event.get("text"): - completion_content += event.get("text") - - if model_name: - completion_tokens = await instance.count_tokens(completion_content) + if usage := complete_response.get("usage"): + completion_tokens = usage.get("output_tokens", 0) + else: + completion_content = "" + if complete_response.get("events"): + model_name = complete_response.get("model") or None + for event in complete_response.get("events"): # type: dict + if event.get("text"): + completion_content += event.get("text") + + if model_name: + completion_tokens = instance.count_tokens(completion_content) _set_token_usage( span, diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/utils.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/utils.py index f67c70f3c3..6c46528837 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/utils.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/utils.py @@ -67,3 +67,47 @@ def error_metrics_attributes(exception): GEN_AI_SYSTEM: GEN_AI_SYSTEM_ANTHROPIC, "error.type": exception.__class__.__name__, } + + +@dont_throw +def count_prompt_tokens_from_request(anthropic, request): + prompt_tokens = 0 + if hasattr(anthropic, "count_tokens"): + if request.get("prompt"): + prompt_tokens = anthropic.count_tokens(request.get("prompt")) + elif messages := request.get("messages"): + prompt_tokens = 0 + for m in messages: + content = m.get("content") + if isinstance(content, str): + prompt_tokens += anthropic.count_tokens(content) + elif isinstance(content, list): + for item in content: + # TODO: handle image and tool tokens + if isinstance(item, dict) and item.get("type") == "text": + prompt_tokens += anthropic.count_tokens( + item.get("text", "") + ) + return prompt_tokens + + +@dont_throw +async def acount_prompt_tokens_from_request(anthropic, request): + prompt_tokens = 0 + if hasattr(anthropic, "count_tokens"): + if request.get("prompt"): + prompt_tokens = await anthropic.count_tokens(request.get("prompt")) + elif messages := request.get("messages"): + prompt_tokens = 0 + for m in messages: + content = m.get("content") + if isinstance(content, str): + prompt_tokens += await anthropic.count_tokens(content) + elif isinstance(content, list): + for item in content: + # TODO: handle image and tool tokens + if isinstance(item, dict) and item.get("type") == "text": + prompt_tokens += await anthropic.count_tokens( + item.get("text", "") + ) + return prompt_tokens diff --git a/packages/opentelemetry-instrumentation-anthropic/poetry.lock b/packages/opentelemetry-instrumentation-anthropic/poetry.lock index 92b9b97a2c..08f685091d 100644 --- a/packages/opentelemetry-instrumentation-anthropic/poetry.lock +++ b/packages/opentelemetry-instrumentation-anthropic/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -13,13 +13,13 @@ files = [ [[package]] name = "anthropic" -version = "0.36.1" +version = "0.36.2" description = "The official Python library for the anthropic API" optional = false python-versions = ">=3.7" files = [ - {file = "anthropic-0.36.1-py3-none-any.whl", hash = "sha256:908968f89ecdf9747c34cf632e2099668ee515a38293d455ef7ad79a3d4f527c"}, - {file = "anthropic-0.36.1.tar.gz", hash = "sha256:8a343caa335aefc3ce64e2e559c88ca01af409ace5d8c4f83bd0b586315a43a2"}, + {file = "anthropic-0.36.2-py3-none-any.whl", hash = "sha256:308ddc6c538de03c081552e456bc0b387b6f7c7d1dea0c20122cc11c7cdbaf6a"}, + {file = "anthropic-0.36.2.tar.gz", hash = "sha256:d5a3fa56d1c82a51944f9dc7b0dc72048deb89f8df5ebfd09e2d1b59c62eb8eb"}, ] [package.dependencies] @@ -381,13 +381,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "huggingface-hub" -version = "0.25.2" +version = "0.26.0" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.25.2-py3-none-any.whl", hash = "sha256:1897caf88ce7f97fe0110603d8f66ac264e3ba6accdf30cd66cc0fed5282ad25"}, - {file = "huggingface_hub-0.25.2.tar.gz", hash = "sha256:a1014ea111a5f40ccd23f7f7ba8ac46e20fa3b658ced1f86a00c75c06ec6423c"}, + {file = "huggingface_hub-0.26.0-py3-none-any.whl", hash = "sha256:e43b8f36042b2103b48dea822535e08f5f089c4aa7013a067fca7b4ebf7f85a3"}, + {file = "huggingface_hub-0.26.0.tar.gz", hash = "sha256:524fe9281b015b76aa73ff1a83bf1cbe8cab851c9ac5ae5fcd2a25d5173ce629"}, ] [package.dependencies] @@ -400,16 +400,16 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] hf-transfer = ["hf-transfer (>=0.1.4)"] -inference = ["aiohttp", "minijinja (>=1.0)"] -quality = ["mypy (==1.5.1)", "ruff (>=0.5.0)"] +inference = ["aiohttp"] +quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.5.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] tensorflow-testing = ["keras (<3.0)", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] torch = ["safetensors[torch]", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] @@ -718,13 +718,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1169,13 +1169,13 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "setuptools" -version = "75.1.0" +version = "75.2.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-75.1.0-py3-none-any.whl", hash = "sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2"}, - {file = "setuptools-75.1.0.tar.gz", hash = "sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538"}, + {file = "setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8"}, + {file = "setuptools-75.2.0.tar.gz", hash = "sha256:753bb6ebf1f465a1912e19ed1d41f403a79173a9acf66a42e7e6aec45c3c16ec"}, ] [package.extras] @@ -1508,93 +1508,93 @@ files = [ [[package]] name = "yarl" -version = "1.15.3" +version = "1.15.5" description = "Yet another URL library" optional = false python-versions = ">=3.9" files = [ - {file = "yarl-1.15.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:14d6f07b7b4b3b8fba521904db58442281730b44318d6abb9908de79e2a4e4f4"}, - {file = "yarl-1.15.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eacd9de9b5b8262818a2e1f88efbd8d523abc8453de238c5d2f6a91fa85032dd"}, - {file = "yarl-1.15.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5a63ed17af784da3de39b82adfd4f8404ad5ee2ec8f616b063f37da3e64e0521"}, - {file = "yarl-1.15.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b55cc82ba92c07af6ba619dcf70cc89f7b9626adefb87d251f80f2e77419f1da"}, - {file = "yarl-1.15.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:63ba82841ce315e4b5dc8b9345062638c74b1864d38172d0a0403e5a083b0950"}, - {file = "yarl-1.15.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59dce412b2515de05ab2eb6aef19ad7f70857ad436cd65fc4276df007106fb42"}, - {file = "yarl-1.15.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e337737b8c9d837e5b4d9e906cc57ed7a639e16e515c8094509b17f556fdb642"}, - {file = "yarl-1.15.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2128315cdc517a45ceb72ec17b256a7940eeb4843c66834c203e7d6580c83405"}, - {file = "yarl-1.15.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69c2d111e67a818e702ba957da8c8e62de916f5c1b3da043f744084c63f12d46"}, - {file = "yarl-1.15.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:d2a70e8bec768be7423d8d465858a3646b34257a20cc02fd92612f1b14931f50"}, - {file = "yarl-1.15.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:efe758958a7bffce68d91ade238df72667e1f18966ed7b1d3d390eead51a8903"}, - {file = "yarl-1.15.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:b765f19e23c29b68e4f8bbadd36f1da2333ba983d8da2d6518e5f0a7eb2579c2"}, - {file = "yarl-1.15.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:df494e5a79f2ef8f81f966f787e515760e639c6319a321c16198b379c256a157"}, - {file = "yarl-1.15.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:68b27a7d9fb0f145de608da2e45e37fd2397b00266f10487e557f769afa2842d"}, - {file = "yarl-1.15.3-cp310-cp310-win32.whl", hash = "sha256:6d1aba1f644d6e5e16edada31938c11b6c9c97e3bf065742a2c7740d38af0c19"}, - {file = "yarl-1.15.3-cp310-cp310-win_amd64.whl", hash = "sha256:925e72fc7a4222a5bf6d288876d5afacc8f833b49c4cca85f65089131ba25afa"}, - {file = "yarl-1.15.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:dbd4808a209b175b5ebbac24c4798dd7511c5ee522a16f2f0eac78c717dfcdfc"}, - {file = "yarl-1.15.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:20f8bdaf667386cea1a8f49cb69a85f90346656d750d3c1278be1dbc76601065"}, - {file = "yarl-1.15.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:adeac55335669a189189373c93d131ebfc2de3ec04f0d3aa7dff6661f83b89b6"}, - {file = "yarl-1.15.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:690d8f702945506b58c9c5834d586e8fd819b845fe6239ab16ebc64a92a6fd3d"}, - {file = "yarl-1.15.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:df7784a29b9689341c17d06d826e3b52ee59d6b6916177e4db0477be7aad5f72"}, - {file = "yarl-1.15.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12c80ec2af97ff3e433699bcabc787ef34e7c08ec038a6e6a25fb81d7bb83607"}, - {file = "yarl-1.15.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39533b927c665bcff7da80bf299218e4af12f3e2be27e9c456e29547bcefd631"}, - {file = "yarl-1.15.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db32a5c2912db45e73f80107d178e30f5c48cf596762b3c60ddfebdd655385f0"}, - {file = "yarl-1.15.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bde319602111e9acca3c4f87f4205b38ba6166004bf108de47553633f9a580fc"}, - {file = "yarl-1.15.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:493760c4ced954582db83c4760166992c016e1777ebc0f3ef1bb5eb60b2b5924"}, - {file = "yarl-1.15.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d9cd73f7bff5079d87c2622aa418a75d5d3cdc944d3edb905c5dfc3235466eb0"}, - {file = "yarl-1.15.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e924040582499f7514ec64691031504e6224b5ae7224216208fc2c94f8b13c89"}, - {file = "yarl-1.15.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:1c3e9ae98719fe180751b093d02dbcc33b78a37e861d0f2c9571720bd31555db"}, - {file = "yarl-1.15.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6f2911cae6dd012adaaf51494dad4cafb4284ad1f3b588df6ea3e3017e053750"}, - {file = "yarl-1.15.3-cp311-cp311-win32.whl", hash = "sha256:acdfe626607a245aedca35b211f9305a9e7a33349da525bf4ef3caaec8ef51cd"}, - {file = "yarl-1.15.3-cp311-cp311-win_amd64.whl", hash = "sha256:0ace3927502a9f90a868d62c66623703cf5096dcb586187266e9b964d8dd6c81"}, - {file = "yarl-1.15.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:decf9d76191bfe34835f1abd3fa8ebe8a9cd7e16300a5c7e82b18c0812bb22a2"}, - {file = "yarl-1.15.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ce65ed7ad7b6cbca06b0c011b170bd2b0bc56b0a740540e2713e5ac12d7b9b2e"}, - {file = "yarl-1.15.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3cf2b50352df8775591869aaa22c52b64d60376ba99c0802b42778fedc90b775"}, - {file = "yarl-1.15.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32e8ebf0080ddd38ec05f8be940a3719e5fe1ab8bb6d2b3f6f8b89c9e34149aa"}, - {file = "yarl-1.15.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05183fd49244517cb11c208d0ae128f2e8a85ddb7caf22ad8b0ffcdf5481fcb6"}, - {file = "yarl-1.15.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:46653b5fd29e63ffe63335da343829a2b00bb43b0bd9bb21240d3b42629629e2"}, - {file = "yarl-1.15.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6316af233610b9868eda92cf68c016750cbf50085ac6c51faa17905ddd25605"}, - {file = "yarl-1.15.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5685ebc333c95b75be3a0a83a81b82b6411beee9585eaeb9e2e588ae8df23848"}, - {file = "yarl-1.15.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6da6f6c6ee5595658f21bb9d1ecd702f7a7f22f224ac063dfb595624aec4a2e0"}, - {file = "yarl-1.15.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:45c05b87a8494d9820ea1ac82118fd2f1d795d868e94766fe8ff670377bf6280"}, - {file = "yarl-1.15.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:04f930fcc940f96b8b29110c56882bcff8703f87a7b9354d3acf60ffded5a23d"}, - {file = "yarl-1.15.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8df77742b403e71c5d62d22d150e6e35efd6096a15f2c7419815911c62225100"}, - {file = "yarl-1.15.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f785d83ece0998e4ce4fadda22fa6c1ecc40e10f41617013a8726d2e9af0d98f"}, - {file = "yarl-1.15.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7794aade99be0d48b69bd5942acddfeff0de3d09c724d9abe4f19736708ef18f"}, - {file = "yarl-1.15.3-cp312-cp312-win32.whl", hash = "sha256:a3a98d70c667c957c7cd0b153d4cb5e45d43f5e2e23de73be6f7b5c883c01f72"}, - {file = "yarl-1.15.3-cp312-cp312-win_amd64.whl", hash = "sha256:90257bc627897a2c1d562efcd6a6b18887e9dacae795cad2367e8e16df47d966"}, - {file = "yarl-1.15.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f94d8adfdec402ff97cecc243b310c01d571362ca87bcf8def8e15cb3aaac3ee"}, - {file = "yarl-1.15.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d0328f798052a33803a77d0868c7f802e952127092c1738fc9e7bfcaac7207c5"}, - {file = "yarl-1.15.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f5f0a0691e39c2e7b5c0f23e6765fa6cb162dce99d9ab1897fdd0f7a4a38b6fb"}, - {file = "yarl-1.15.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370f646d3654e196ddbf772a2d737fe4e1dd738267015b73ff6267ca592fd9d6"}, - {file = "yarl-1.15.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3487c57bc8f17f2586ae7fd0e77f65cd298d45b64d15f604bbb29f4cce0e7961"}, - {file = "yarl-1.15.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef67989d480358482830dc3bc232709804f46a61e7e9841d3f0b1c13a4735b3b"}, - {file = "yarl-1.15.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b5ab6c64921802176f56c36aa67c5e6a8baf9557ec1662cb41ecdb5580b67eb9"}, - {file = "yarl-1.15.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb474a06023d01ead9c072b2580c22b2691aa1cabdcc19c3171ab1fa6d8496e3"}, - {file = "yarl-1.15.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92f9a45230d3aa8568c1d692ab27bf505a32dfe3b404721458fc374f411e8bd2"}, - {file = "yarl-1.15.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:24cad94cf2f46cc8e4b9cd44e4e8a84483536a6c54554960b02b10b5724ab122"}, - {file = "yarl-1.15.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:380f30073cbd9b740891bb56f44ee31f870e8721269b618ccc9913400936d9f6"}, - {file = "yarl-1.15.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:353306ba6f0218af1aefe4b9c8b3a0b81b209bc75d79357dac6aca70a7b09d6a"}, - {file = "yarl-1.15.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe03cea925d884b8f1157a7037df2f5b6a6478a64b78ee600832d8a9f044c83e"}, - {file = "yarl-1.15.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5c4cc1a438ac52562427330e33891f50a78ffd38d335abc64f93f201c83bdc82"}, - {file = "yarl-1.15.3-cp313-cp313-win32.whl", hash = "sha256:956975a3a1ce1f4537be22278d6a283b8bc74d77671f7f6469ab1e800f4e9b02"}, - {file = "yarl-1.15.3-cp313-cp313-win_amd64.whl", hash = "sha256:2e61b72cf15922a7a665299a6b6825bd9901d67ec3b9d3cf9b256dc1667c9bb1"}, - {file = "yarl-1.15.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:270fef2b335e60c91ee835c524445e2248af841c8b72f48769ed6c02fbff5873"}, - {file = "yarl-1.15.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:59b77f0682e1917be197fc8229530f0c6fb3ef8e242d8256ba091a3a1c0ef7e6"}, - {file = "yarl-1.15.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cc4b999718287073dccd3acb0ef1593961bd7923af08991cb3c94080db503935"}, - {file = "yarl-1.15.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9b251d3f90e125ff0d1f76257329a9190fa1bfd2157344c875580bff6dedc62"}, - {file = "yarl-1.15.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ccb4667e0c0a25815efbfe251d24b56624449a319d4bb497074dd49444fb306"}, - {file = "yarl-1.15.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ac26e43b56dbafb30256906bc763cc1f22e05825ae1ced4c6afbd0e6584f18de"}, - {file = "yarl-1.15.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2207491555af5dbbee4c3179a76766f7bc1ecff858f420ea96f2e105ca42c4dd"}, - {file = "yarl-1.15.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14effa29db6113be065a594e13a0f45afb9c1e374fd22b4bc3a4eff0725184b2"}, - {file = "yarl-1.15.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:19077525cd36c797cae19262e15f2881da33c602fb35d075ff0e4263b51b8b88"}, - {file = "yarl-1.15.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:d80c019083506886df098b7bb0d844e19db7e226736829ef49f892ed0a070fa5"}, - {file = "yarl-1.15.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c24debeec87908a864a2b4cb700f863db9441cabacdb22dc448c5d38b55c6f62"}, - {file = "yarl-1.15.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:1c49fe426c45520b4b8a48544d3a9a58194f39c1b57d92451883f847c299a137"}, - {file = "yarl-1.15.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:66ddcd7ee3264bc937860f4780290d60f6472ca0484c214fe805116a831121e8"}, - {file = "yarl-1.15.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2a5cbbb06559757f091f9e71d3f76c27d4dfe0652cc3f17ccce398b8377bfda4"}, - {file = "yarl-1.15.3-cp39-cp39-win32.whl", hash = "sha256:d798de0b50efb66583fc096bcdaa852ed6ea3485a4eb610d6a634f8010d932f4"}, - {file = "yarl-1.15.3-cp39-cp39-win_amd64.whl", hash = "sha256:8f0b33fd088e93ba5f7f6dd55226630e7b78212752479c8fcc6abbd143b9c1ce"}, - {file = "yarl-1.15.3-py3-none-any.whl", hash = "sha256:a1d49ed6f4b812dde88e937d4c2bd3f13d72c23ef7de1e17a63b7cacef4b5691"}, - {file = "yarl-1.15.3.tar.gz", hash = "sha256:fbcff47f8ba82467f203037f7a30decf5c724211b224682f7236edb0dcbb5b95"}, + {file = "yarl-1.15.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b6c57972a406ea0f61e3f28f2b3a780fb71fbe1d82d267afe5a2f889a83ee7e7"}, + {file = "yarl-1.15.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5c3ac5bdcc1375c8ee52784adf94edbce37c471dd2100a117cfef56fe8dbc2b4"}, + {file = "yarl-1.15.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:68d21d0563d82aaf46163eac529adac301b20be3181b8a2811f7bd5615466055"}, + {file = "yarl-1.15.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7d317fb80bc17ed4b34a9aad8b80cef34bea0993654f3e8566daf323def7ef9"}, + {file = "yarl-1.15.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed9c72d5361cfd5af5ccadffa8f8077f4929640e1f938aa0f4b92c5a24996ac5"}, + {file = "yarl-1.15.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bb707859218e8335447b210f41a755e7b1367c33e87add884128bba144694a7f"}, + {file = "yarl-1.15.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6563394492c96cb57f4dff0c69c63d2b28b5469c59c66f35a1e6451583cd0ab4"}, + {file = "yarl-1.15.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c2d1109c8d92059314cc34dd8f0a31f74b720dc140744923ed7ca228bf9b491"}, + {file = "yarl-1.15.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8fc727f0fb388debc771eaa7091c092bd2e8b6b4741b73354b8efadcf96d6031"}, + {file = "yarl-1.15.5-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:94189746c5ad62e1014a16298130e696fe593d031d442ef135fb7787b7a1f820"}, + {file = "yarl-1.15.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b06d8b05d0fafef204d635a4711283ddbf19c7c0facdc61b4b775f6e47e2d4be"}, + {file = "yarl-1.15.5-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:de6917946dc6bc237d4b354e38aa13a232e0c7948fdbdb160edee3862e9d735f"}, + {file = "yarl-1.15.5-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:34816f1d833433a16c4832562a050b0a60eac53dcb71b2032e6ebff82d74b6a7"}, + {file = "yarl-1.15.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:19e2a4b2935f95fad0949f420514c5d862f5f18058fbbfd8854f496a97d9fd87"}, + {file = "yarl-1.15.5-cp310-cp310-win32.whl", hash = "sha256:30ca64521f1a96b72886dd9e8652f16eab11891b4572dcfcfc1ad6d6ccb27abd"}, + {file = "yarl-1.15.5-cp310-cp310-win_amd64.whl", hash = "sha256:86648c53b10c53db8b967a75fb41e0c89dbec7398f6525e34af2b6c456bb0ac0"}, + {file = "yarl-1.15.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e652aa9f8dfa808bc5b2da4d1f4e286cf1d640570fdfa72ffc0c1d16ba114651"}, + {file = "yarl-1.15.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21050b6cd569980fe20ceeab4baeb900d3f7247270475e42bafe117416a5496c"}, + {file = "yarl-1.15.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:18940191ec9a83bbfe63eea61c3e9d12474bb910d5613bce8fa46e84a80b75b2"}, + {file = "yarl-1.15.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a082dc948045606f62dca0228ab24f13737180b253378d6443f5b2b9ef8beefe"}, + {file = "yarl-1.15.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0a843e692f9d5402b3455653f4607dc521de2385f01c5cad7ba4a87c46e2ea8d"}, + {file = "yarl-1.15.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5093a453176a4fad4f9c3006f507cf300546190bb3e27944275a37cfd6323a65"}, + {file = "yarl-1.15.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2597a589859b94d0a5e2f5d30fee95081867926e57cb751f8b44a7dd92da4e79"}, + {file = "yarl-1.15.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f5a1ca6eaabfe62718b87eac06d9a47b30cf92ffa065fee9196d3ecd24a3cf1"}, + {file = "yarl-1.15.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4ac83b307cc4b8907345b52994055c6c3c2601ceb6fcb94c5ed6a93c6b4e8257"}, + {file = "yarl-1.15.5-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:325e2beb2cd8654b276e7686a3cd203628dd3fe32d5c616e632bc35a2901fb16"}, + {file = "yarl-1.15.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:75d04ba8ed335042328086e643e01165e0c24598216f72da709b375930ae3bdb"}, + {file = "yarl-1.15.5-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7abd7d15aedb3961a967cc65f8144dbbca42e3626a21c5f4f29919cf43eeafb9"}, + {file = "yarl-1.15.5-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:294c742a273f44511f14b03a9e06b66094dcdf4bbb75a5e23fead548fd5310ae"}, + {file = "yarl-1.15.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:63d46606b20f80a6476f1044bab78e1a69c2e0747f174583e2f12fc70bad2170"}, + {file = "yarl-1.15.5-cp311-cp311-win32.whl", hash = "sha256:b1217102a455e3ac9ac293081093f21f0183e978c7692171ff669fee5296fa28"}, + {file = "yarl-1.15.5-cp311-cp311-win_amd64.whl", hash = "sha256:5848500b6a01497560969e8c3a7eb1b2570853c74a0ca6f67ebaf6064106c49b"}, + {file = "yarl-1.15.5-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d3309ee667f2d9c7ac9ecf44620d6b274bfdd8065b8c5019ff6795dd887b8fed"}, + {file = "yarl-1.15.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:96ce879799fee124d241ea3b84448378f638e290c49493d00b706f3fd57ec22b"}, + {file = "yarl-1.15.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c884dfa56b050f718ea3cbbfd972e29a6f07f63a7449b10d9a20d64f7eec92e2"}, + {file = "yarl-1.15.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0327081978fe186c3390dd4f73f95f825d0bb9c74967e22c2a1a87735974d8f5"}, + {file = "yarl-1.15.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:524b3bb7dff320e305bc979c65eddc0342548c56ea9241502f907853fe53c408"}, + {file = "yarl-1.15.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd56de8b645421ff09c993fdb0ee9c5a3b50d290a8f55793b500d99b34d0c1ce"}, + {file = "yarl-1.15.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c166ad987265bb343be58cdf4fbc4478cc1d81f2246d2be9a15f94393b269faa"}, + {file = "yarl-1.15.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d56980374a10c74255fcea6ebcfb0aeca7166d212ee9fd7e823ddef35fb62ad0"}, + {file = "yarl-1.15.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cbf36099a9b407e1456dbf55844743a98603fcba32d2a46fb3a698d926facf1b"}, + {file = "yarl-1.15.5-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:d7fa4b033e2f267e37aabcc36949fa89f9f1716a723395912147f9cf3fb437c7"}, + {file = "yarl-1.15.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:bb129f77ddaea2d8e6e00417b8d907448de3407af4eddacca0a515574ad71493"}, + {file = "yarl-1.15.5-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:68e837b3edfcd037f9706157e7cb8efda832de6248c7d9e893e2638356dfae5d"}, + {file = "yarl-1.15.5-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5b8af4165e097ff84d9bbb97bb4f4d7f71b9c1c9565a2d0e27d93e5f92dae220"}, + {file = "yarl-1.15.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:70d074d5a96e0954fe6db81ff356f4361397da1cda3f7c127fc0902f671a087e"}, + {file = "yarl-1.15.5-cp312-cp312-win32.whl", hash = "sha256:362da97ad4360e4ef1dd24ccdd3bceb18332da7f40026a42f49b7edd686e31c3"}, + {file = "yarl-1.15.5-cp312-cp312-win_amd64.whl", hash = "sha256:9aa054d97033beac9cb9b19b7c0b8784b85b12cd17879087ca6bffba57884e02"}, + {file = "yarl-1.15.5-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5fadcf532fd9f6cbad71485ef8c2462dd9a91d3efc72ca01eb0970792c92552a"}, + {file = "yarl-1.15.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8b7dd6983c81523f9de0ae6334c3b7a3cb33283936e0525f80c4f713f54a9bb6"}, + {file = "yarl-1.15.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:fcfd663dc88465ebe41c7c938bdc91c4b01cda96a0d64bf38fd66c1877323771"}, + {file = "yarl-1.15.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd529e637cd23204bd82072f6637cff7af2516ad2c132e8f3342cbc84871f7d1"}, + {file = "yarl-1.15.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b30f13fac56598474071a4f1ecd66c78fdaf2f8619042d7ca135f72dbb348cf"}, + {file = "yarl-1.15.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:44088ec0be82fba118ed29b6b429f80bf295297727adae4c257ac297e01e8bcd"}, + {file = "yarl-1.15.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:607683991bab8607e5158cd290dd8fdaa613442aeab802fe1c237d3a3eee7358"}, + {file = "yarl-1.15.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da48cdff56b01ea4282a6d04b83b07a2088351a4a3ff7aacc1e7e9b6b04b90b9"}, + {file = "yarl-1.15.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9162ea117ce8bad8ebc95b7376b4135988acd888d2cf4702f8281e3c11f8b81f"}, + {file = "yarl-1.15.5-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:e8aa19c39cb20bfb16f0266df175a6004943122cf20707fbf0cacc21f6468a25"}, + {file = "yarl-1.15.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:5d6be369488d503c8edc14e2f63d71ab2a607041ad216a8ad444fa18e8dea792"}, + {file = "yarl-1.15.5-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6e2c674cfe4c03ad7a4d536b1f808221f0d11a360486b4b032d2557c0bd633ad"}, + {file = "yarl-1.15.5-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:041bafaa82b77fd4ec2826d42a55461ec86d999adf7ed9644eef7e8a9febb366"}, + {file = "yarl-1.15.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2eeb9ba53c055740cd282ae9d34eb7970d65e73a46f15adec4b0c1b0f2e55cc2"}, + {file = "yarl-1.15.5-cp313-cp313-win32.whl", hash = "sha256:73143dd279e641543da52c55652ad7b4c7c5f79e797f124f58f04cc060f14271"}, + {file = "yarl-1.15.5-cp313-cp313-win_amd64.whl", hash = "sha256:94ab1185900f43760d5487c8e49f5f1a66f864e36092f282f1813597479b9dfa"}, + {file = "yarl-1.15.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6b3d2767bd64c62909ea33525b954ba05c8f9726bfdf2141d175da4e344f19ae"}, + {file = "yarl-1.15.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:44359c52af9c383e5107f3b6301446fc8269599721fa42fafb2afb5f31a42dcb"}, + {file = "yarl-1.15.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6493da9ba5c551978c679ab04856c2cf8f79c316e8ec8c503460a135705edc3b"}, + {file = "yarl-1.15.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a6b6e95bc621c11cf9ff21012173337e789f2461ebc3b4e5bf65c74ef69adb8"}, + {file = "yarl-1.15.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7983290ede3aaa2c9620879530849532529b4dcbf5b12a0b6a91163a773eadb9"}, + {file = "yarl-1.15.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07a4b53abe85813c538b9cdbb02909ebe3734e3af466a587df516e960d500cc8"}, + {file = "yarl-1.15.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5882faa2a6e684f65ee44f18c701768749a950cbd5e72db452fc07805f6bdec0"}, + {file = "yarl-1.15.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e27861251d9c094f641d39a8a78dd2371fb9a252ea2f689d1ad353a31d46a0bc"}, + {file = "yarl-1.15.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8669a110f655c9eb22f16fb68a7d4942020aeaa09f1def584a80183e3e89953c"}, + {file = "yarl-1.15.5-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:10bfe0bef4cf5ea0383886beda004071faadedf2647048b9f876664284c5b60d"}, + {file = "yarl-1.15.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f7de0d4b6b4d8a77e422eb54d765255c0ec6883ee03b8fd537101633948619d7"}, + {file = "yarl-1.15.5-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:00bb3a559d7bd006a5302ecd7e409916939106a8cdbe31f4eb5e5b9ffcca57ea"}, + {file = "yarl-1.15.5-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:06ec070a2d71415f90dbe9d70af3158e7da97a128519dba2d1581156ee27fb92"}, + {file = "yarl-1.15.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b997a806846c00d1f41d6a251803732837771b2091bead7566f68820e317bfe7"}, + {file = "yarl-1.15.5-cp39-cp39-win32.whl", hash = "sha256:7825506fbee4055265528ec3532a8197ff26fc53d4978917a4c8ddbb4c1667d7"}, + {file = "yarl-1.15.5-cp39-cp39-win_amd64.whl", hash = "sha256:71730658be0b5de7c570a9795d7404c577b2313c1db370407092c66f70e04ccb"}, + {file = "yarl-1.15.5-py3-none-any.whl", hash = "sha256:625f31d6650829fba4030b4e7bdb2d69e41510dddfa29a1da27076c199521757"}, + {file = "yarl-1.15.5.tar.gz", hash = "sha256:8249147ee81c1cf4d1dc6f26ba28a1b9d92751529f83c308ad02164bb93abd0d"}, ] [package.dependencies] @@ -1627,4 +1627,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "5be8d29dbae4d4fd62c69ecf019235190c39808fa799be106db4df62ca2881b9" +content-hash = "a3d4928d7f9a3bcd2e1bb3d61275ea75dde6010ed9c1db2452fb51263cf1e93c" diff --git a/packages/opentelemetry-instrumentation-anthropic/pyproject.toml b/packages/opentelemetry-instrumentation-anthropic/pyproject.toml index 4eaa40d292..5651115e10 100644 --- a/packages/opentelemetry-instrumentation-anthropic/pyproject.toml +++ b/packages/opentelemetry-instrumentation-anthropic/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_message_streaming.yaml b/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_message_streaming.yaml index 36aeec1d8c..7d5450f2ea 100644 --- a/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_message_streaming.yaml +++ b/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_message_streaming.yaml @@ -19,33 +19,35 @@ interactions: host: - api.anthropic.com user-agent: - - Anthropic/Python 0.21.3 + - Anthropic/Python 0.36.2 x-stainless-arch: - - other:amd64 + - arm64 x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - Windows + - MacOS x-stainless-package-version: - - 0.21.3 + - 0.36.2 + x-stainless-retry-count: + - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.9.13 + - 3.12.3 method: POST uri: https://api.anthropic.com/v1/messages response: body: string: 'event: message_start - data: {"type":"message_start","message":{"id":"msg_01L2s6GwyR6itC3AdQB8kDR8","type":"message","role":"assistant","content":[],"model":"claude-3-haiku-20240307","stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":17,"output_tokens":2}} } + data: {"type":"message_start","message":{"id":"msg_01MXWxhWoPSgrYhjTuMDM6F1","type":"message","role":"assistant","model":"claude-3-haiku-20240307","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":17,"output_tokens":3}} } event: content_block_start - data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} } + data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""}} event: ping @@ -55,826 +57,89 @@ interactions: event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Okay"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - here"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"''s"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - joke"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - about"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - Open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"T"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"el"} - } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emet"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":":"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\nWhy"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - did"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - software"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - developer"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - use"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - Open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"T"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"el"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emet"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"?"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - Because"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - they"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - needed"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - way"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - trace"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - all"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - bugs"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - in"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - their"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - code"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"!"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\nIn"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - all"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - s"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"eri"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ousness"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - Open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"T"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"el"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emet"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - is"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"source"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - observ"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ability"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - framework"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - that"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - helps"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - developers"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - instrument"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - generate"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - collect"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - and"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - export"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - tel"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emet"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - data"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - ("} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"metrics"} - } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - logs"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - and"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - traces"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":")"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - help"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - understand"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - and"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - monitor"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - performance"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - and"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - behavior"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - their"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - distributed"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - applications"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\nThe"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - joke"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - plays"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - on"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - fact"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - that"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - many"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - developers"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - struggle"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - with"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - debugging"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - complex"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - distributed"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - systems"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - and"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - Open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"T"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"el"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emet"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - aims"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - provide"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - unified"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"source"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - solution"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - help"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Here''s + an"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - \""} } + OpenTelemet"}} event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"trace"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry-"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\""} - } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"themed + joke for"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - and"} } + you:\n\nWhy"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - identify"} } + did the developer"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - issues"} } + feel"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - within"}} + so"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - these"} } + lost"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - kinds"} } + when"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } + using"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - systems"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."}} + OpenTelemet"}} event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - So"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry?"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\nThey"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - developer"} } + were"} } event: content_block_delta @@ -886,538 +151,335 @@ interactions: event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - joke"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - turns"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - Open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"T"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"el"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emet"} - } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - help"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - them"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - \""} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"trace"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - all"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - bugs"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\""} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - in"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - their"} } + a"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - c"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ode"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"base"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\nOf"} } + Span"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - course"} } + of Confusion"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","}} + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"!\n\nThe"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - Open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"T"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"el"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emet"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry"} } + idea"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - is"} } + behind this"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - much"} } + joke"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - more"} } + is that OpenT"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - than"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"elemetry + uses"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - just"} } + the"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"} } + concept"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - debugging"} } + of \""} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - tool"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"spans\" + to represent"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - -"} } + individual"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - it"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"''s"} - } + operations or requests within"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"} } + a distribute"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - powerful"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + system. When"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - observ"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ability"} } + a developer is"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - platform"} } + first"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - that"} } + getting starte"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - can"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + with OpenTel"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - provide"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emetry, + they"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - deep"} } + may feel a"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - insights"}} + bit dis"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - into"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"oriented + or"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } + lost trying"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - operation"} } + to understand all"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } + the different spans"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - modern"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} - } + and how"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - cloud"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"native"} } + they fit"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - applications"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."} } + together -"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - But"} } + hence"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } + the \"Span"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - joke"} } + of Confusion\""} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - capital"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"izes"} } + p"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - on"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"un."} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\nHopefully + this gives"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - \""} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"tr"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"acing"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\""} } + you a chuck"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - aspect"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"le + an"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"}} + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + provides"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - p"} } + a ligh"}} event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"oke"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"thearted + introduction"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"} } + to some"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - bit"} } + of the"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } + key concepts in"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - fun"}} + OpenTelemet"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - at"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry."} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } + Let me know if"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - challenges"} } + you''"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + like to"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - distributed"} } + hear any other tech"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - application"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - development"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"theme"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + jokes!"} } event: content_block_stop - data: {"type":"content_block_stop","index":0 } + data: {"type":"content_block_stop","index":0 } event: message_delta - data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":245} } + data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":171} } event: message_stop - data: {"type":"message_stop" } + data: {"type":"message_stop" } ' @@ -1425,7 +487,7 @@ interactions: CF-Cache-Status: - DYNAMIC CF-RAY: - - 87084fb4ddc878d7-LAX + - 8d5c8d70d8c09464-SJC Cache-Control: - no-cache Connection: @@ -1433,25 +495,27 @@ interactions: Content-Type: - text/event-stream; charset=utf-8 Date: - - Sun, 07 Apr 2024 07:30:16 GMT + - Sun, 20 Oct 2024 22:47:56 GMT Server: - cloudflare Transfer-Encoding: - chunked + X-Robots-Tag: + - none anthropic-ratelimit-requests-limit: - - '5' + - '50' anthropic-ratelimit-requests-remaining: - - '4' + - '49' anthropic-ratelimit-requests-reset: - - '2024-04-07T07:31:00Z' + - '2024-10-20T22:48:55Z' anthropic-ratelimit-tokens-limit: - - '25000' + - '50000' anthropic-ratelimit-tokens-remaining: - - '24000' + - '49000' anthropic-ratelimit-tokens-reset: - - '2024-04-07T07:31:00Z' + - '2024-10-20T22:47:57Z' request-id: - - req_0199exscYJoaCKeJhmJWNdP5 + - req_01JUK6ATKHDAaXF2Bs7Xprjv via: - 1.1 google status: diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_prompt_caching.yaml b/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_prompt_caching.yaml new file mode 100644 index 0000000000..45d71e1645 --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_prompt_caching.yaml @@ -0,0 +1,333 @@ +interactions: +- request: + body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": [{"type": + "text", "text": "test_anthropic_prompt_caching <- IGNORE THIS. ARTICLES START + ON THE NEXT LINE\nOpen-Source Library OpenLLMetry Brings LLM Functionality to + OpenTelemetry\nTraceloop, a YCombinator-backed company, has announced the release + of OpenLLMetry, a new open-source\nlibrary designed to extend OpenTelemetry + with Large Language Model (LLM) functionality. This innovative\ntool aims to + bridge the gap between traditional application monitoring and the rapidly evolving + field\nof AI-powered systems. OpenLLMetry builds upon the widely-adopted OpenTelemetry + framework, which provides\na standardized approach to collecting and exporting + telemetry data from cloud-native applications. By\nintegrating LLM-specific + features, OpenLLMetry enables developers to gain deeper insights into\nthe performance + and behavior of AI models within their applications.\n\nKey features of OpenLLMetry + include:\n\nLLM-specific metrics and traces\nSeamless integration with existing + OpenTelemetry setups\nSupport for popular LLM frameworks and platforms\n\nThe + open-source nature of OpenLLMetry is expected to foster community contributions + and rapid adoption\namong developers working with LLMs. As AI continues to transform + the software landscape, tools\nlike OpenLLMetry are poised to play a vital role + in ensuring the reliability and performance of\nnext-generation applications.\n\n========================================================================\n\nMajor + LLM Providers Introduce Prompt Caching, Boosting Speed and Reducing Costs\n\nIn + a significant development for the artificial intelligence industry, leading + Large Language Model (LLM) providers,\nincluding Anthropic, have announced the + implementation of prompt caching. This new feature promises to dramatically\nimprove + the speed and cost-effectiveness of LLM API calls, particularly for applications + with repetitive prompt patterns.\n\nUnderstanding Prompt Caching\nPrompt caching + is a technique that allows LLM providers to store and quickly retrieve responses + for frequently used prompts.\nInstead of processing the same or similar prompts + repeatedly, the system can serve pre-computed responses,\nsignificantly reducing + computation time and resources.\n\nBenefits of Prompt Caching\n1. Improved Response + Times\n\nWith prompt caching, response times for cached prompts can be reduced + from seconds to milliseconds. This dramatic speed\nimprovement enables near-instantaneous + responses in many scenarios, enhancing user experience in AI-powered applications.\n\n2. + Cost Reduction\nBy eliminating the need to reprocess identical or highly similar + prompts, prompt caching can substantially reduce the\ncomputational resources + required. This efficiency translates directly into cost savings for developers + and businesses\nutilizing LLM APIs.\n\n3. Scalability\nThe reduced computational + load allows LLM providers to handle a higher volume of requests with existing + infrastructure,\nimproving the scalability of their services.\n\nUse Cases and + Impact\nPrompt caching is particularly beneficial for applications with repetitive + prompt patterns. Some key use cases include:\n\nCustomer service chatbots handling + common queries\nContent moderation systems processing similar types of content\nLanguage + translation services for frequently translated phrases or sentences\nAutomated + coding assistants dealing with standard programming tasks\n\nImplementation + by Major Providers\nWhile the specific implementation details vary among providers, + the general approach involves:\n\nIdentifying frequently used prompts\nStoring + pre-computed responses\nImplementing efficient lookup mechanisms\nBalancing + cache freshness with performance gains\n\nAnthropic, known for its Claude AI + model, has been at the forefront of this technology.\n\nFuture Implications\nThe + introduction of prompt caching by major LLM providers is likely to have far-reaching + effects on the AI industry:\n\nBroader Adoption: Reduced costs and improved + performance could lead to wider adoption of LLM technologies across various + sectors.\nNew Application Paradigms: Developers may create new types of applications + that leverage the near-instantaneous response times of cached prompts.\nEvolution + of Pricing Models: LLM providers might introduce new pricing structures that + reflect the efficiency gains of prompt caching.\n\nAs the technology matures, + we can expect to see further refinements and innovative applications of prompt + caching, potentially\nreshaping the landscape of AI-powered services and applications.\n\n========================================================================\n\n\ud83d\udcca + Why Unit Testing is Non-Negotiable in Software Development \ud83d\udda5\ufe0f\nAs + a software professional, I can''t stress enough how crucial unit testing is + to our craft. Here''s why it''s a must-have practice:\n\n\ud83d\udc1b Catches + bugs early: Identify and fix issues before they snowball into major problems.\n\ud83d\udcb0 + Saves time and money: Less debugging time means faster development and lower + costs.\n\ud83c\udfd7\ufe0f Improves code quality: Writing testable code inherently + leads to better architecture.\n\ud83d\udd04 Facilitates refactoring: Tests give + you confidence to improve your code without breaking functionality.\n\ud83d\udcda + Serves as documentation: Well-written tests explain how your code should behave.\n\ud83e\udd1d + Enhances collaboration: New team members can understand and contribute to the + codebase faster.\n\nRemember: The time invested in unit testing pays off multifold + in the long run. It''s not just good practice\u2014it''s professional responsibility.\nWhat''s + your take on unit testing? Share your experiences below! \ud83d\udc47", "cache_control": + {"type": "ephemeral"}}]}], "model": "claude-3-5-sonnet-20240620", "system": + [{"type": "text", "text": "You help generate concise summaries of news articles + and blog posts that user sends you."}]}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + anthropic-beta: + - prompt-caching-2024-07-31 + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5970' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - Anthropic/Python 0.36.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.36.2 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.3 + method: POST + uri: https://api.anthropic.com/v1/messages?beta=prompt_caching + response: + body: + string: !!binary | + H4sIAAAAAAAAA2xUbW8cNRD+KyN/3jvuLqGV9huFIqLmgNJIIAg6+exnb916bdczTm6J8t+RvQm0 + hU8rzdvzMuN9UM6qXk18Omy2r97aGfPF29/3L9/8ug37l+ff7PtfVKdkTqhVYNYnqE7l6GtAMzsW + HUR1aooWXvXKeF0sVherr1ccQ4Csdpvd5ebFbqM6ZWIQBFH9Hw/PQwXn2t4+vfoBGaQzyMRgHIO4 + TJPODkxxIBlBMmbUEnHGg/vbcBu2a/opIVxf7yF57ulH3FNMCCuOJRuQd8es80w4C4J14dTKb+Ax + 1Qa6dzLS9fWehhKMuBi0dzKv6TvcwccES8eZbrI28DGmjpwQgj56MFkgIZML7E6jMLkgkb65ouYG + JeQh5kkHgwbiAumUvDO6ovCa3mCmAVpKRu01vlhUJitOMG5whipDZ/grqfBMOtiKgVNuIxbqODuW + /8piSEm8rg7t1rTX72NuKlOOd84iN7Y52mJQY1MSMtqMdZBEclOtA3ECbMPNaKUmsvCaXiFgcPIv + 7UGzIFMGpxgYJG4Cd+TjPTKZOKUiejG31rTNcLcIWqAssdFeH93i/s9tx8Xr7GcqjKF4GmL+zMFF + f0aCOHF3zzqYvPsAMqOWY5TFtafTa5tZzFvT63OCEdiq1+baf8xRWyw+aRtTM7m2B9x/ikxJZ23d + aVr8vVjT1ZRilrbqOFAJTkiwrMUF4jjIfb1ru9zUhCD9bVjRt1rMCKZjOTGhaq3Rd/oO3Bxs4FMM + mKkmrharmEy0oI+lXWpNfK9N9U0LmDIGbSRmF05tGHLt0Ew2mlKRm4Saeh3GyriO814fY37K3Iz/ + PDHClEbN7q9KaNTyhTQmk4txetlNynEA87Ll/9Pc5CQ91+c8VGPqk/YxnCiXsFaPf3aKJaZDhuYY + VK8Q7EFKDuopwfhYEAxUH4r3nSrtj9Q/KBdSkYPEDwis+stO1VvGwWQ0TYfP89vti4vnkgxtv0hv + OhWLfBrZ7S4fH/8GAAD//wMA6PVqOS4FAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8d5c3ca209b89834-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 20 Oct 2024 21:52:51 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-ratelimit-requests-limit: + - '50' + anthropic-ratelimit-requests-remaining: + - '49' + anthropic-ratelimit-requests-reset: + - '2024-10-20T21:52:55Z' + anthropic-ratelimit-tokens-limit: + - '40000' + anthropic-ratelimit-tokens-remaining: + - '40000' + anthropic-ratelimit-tokens-reset: + - '2024-10-20T21:52:51Z' + request-id: + - req_01CojC1WEucKCFcm87D8LCdw + via: + - 1.1 google + status: + code: 200 + message: OK +- request: + body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": [{"type": + "text", "text": "test_anthropic_prompt_caching <- IGNORE THIS. ARTICLES START + ON THE NEXT LINE\nOpen-Source Library OpenLLMetry Brings LLM Functionality to + OpenTelemetry\nTraceloop, a YCombinator-backed company, has announced the release + of OpenLLMetry, a new open-source\nlibrary designed to extend OpenTelemetry + with Large Language Model (LLM) functionality. This innovative\ntool aims to + bridge the gap between traditional application monitoring and the rapidly evolving + field\nof AI-powered systems. OpenLLMetry builds upon the widely-adopted OpenTelemetry + framework, which provides\na standardized approach to collecting and exporting + telemetry data from cloud-native applications. By\nintegrating LLM-specific + features, OpenLLMetry enables developers to gain deeper insights into\nthe performance + and behavior of AI models within their applications.\n\nKey features of OpenLLMetry + include:\n\nLLM-specific metrics and traces\nSeamless integration with existing + OpenTelemetry setups\nSupport for popular LLM frameworks and platforms\n\nThe + open-source nature of OpenLLMetry is expected to foster community contributions + and rapid adoption\namong developers working with LLMs. As AI continues to transform + the software landscape, tools\nlike OpenLLMetry are poised to play a vital role + in ensuring the reliability and performance of\nnext-generation applications.\n\n========================================================================\n\nMajor + LLM Providers Introduce Prompt Caching, Boosting Speed and Reducing Costs\n\nIn + a significant development for the artificial intelligence industry, leading + Large Language Model (LLM) providers,\nincluding Anthropic, have announced the + implementation of prompt caching. This new feature promises to dramatically\nimprove + the speed and cost-effectiveness of LLM API calls, particularly for applications + with repetitive prompt patterns.\n\nUnderstanding Prompt Caching\nPrompt caching + is a technique that allows LLM providers to store and quickly retrieve responses + for frequently used prompts.\nInstead of processing the same or similar prompts + repeatedly, the system can serve pre-computed responses,\nsignificantly reducing + computation time and resources.\n\nBenefits of Prompt Caching\n1. Improved Response + Times\n\nWith prompt caching, response times for cached prompts can be reduced + from seconds to milliseconds. This dramatic speed\nimprovement enables near-instantaneous + responses in many scenarios, enhancing user experience in AI-powered applications.\n\n2. + Cost Reduction\nBy eliminating the need to reprocess identical or highly similar + prompts, prompt caching can substantially reduce the\ncomputational resources + required. This efficiency translates directly into cost savings for developers + and businesses\nutilizing LLM APIs.\n\n3. Scalability\nThe reduced computational + load allows LLM providers to handle a higher volume of requests with existing + infrastructure,\nimproving the scalability of their services.\n\nUse Cases and + Impact\nPrompt caching is particularly beneficial for applications with repetitive + prompt patterns. Some key use cases include:\n\nCustomer service chatbots handling + common queries\nContent moderation systems processing similar types of content\nLanguage + translation services for frequently translated phrases or sentences\nAutomated + coding assistants dealing with standard programming tasks\n\nImplementation + by Major Providers\nWhile the specific implementation details vary among providers, + the general approach involves:\n\nIdentifying frequently used prompts\nStoring + pre-computed responses\nImplementing efficient lookup mechanisms\nBalancing + cache freshness with performance gains\n\nAnthropic, known for its Claude AI + model, has been at the forefront of this technology.\n\nFuture Implications\nThe + introduction of prompt caching by major LLM providers is likely to have far-reaching + effects on the AI industry:\n\nBroader Adoption: Reduced costs and improved + performance could lead to wider adoption of LLM technologies across various + sectors.\nNew Application Paradigms: Developers may create new types of applications + that leverage the near-instantaneous response times of cached prompts.\nEvolution + of Pricing Models: LLM providers might introduce new pricing structures that + reflect the efficiency gains of prompt caching.\n\nAs the technology matures, + we can expect to see further refinements and innovative applications of prompt + caching, potentially\nreshaping the landscape of AI-powered services and applications.\n\n========================================================================\n\n\ud83d\udcca + Why Unit Testing is Non-Negotiable in Software Development \ud83d\udda5\ufe0f\nAs + a software professional, I can''t stress enough how crucial unit testing is + to our craft. Here''s why it''s a must-have practice:\n\n\ud83d\udc1b Catches + bugs early: Identify and fix issues before they snowball into major problems.\n\ud83d\udcb0 + Saves time and money: Less debugging time means faster development and lower + costs.\n\ud83c\udfd7\ufe0f Improves code quality: Writing testable code inherently + leads to better architecture.\n\ud83d\udd04 Facilitates refactoring: Tests give + you confidence to improve your code without breaking functionality.\n\ud83d\udcda + Serves as documentation: Well-written tests explain how your code should behave.\n\ud83e\udd1d + Enhances collaboration: New team members can understand and contribute to the + codebase faster.\n\nRemember: The time invested in unit testing pays off multifold + in the long run. It''s not just good practice\u2014it''s professional responsibility.\nWhat''s + your take on unit testing? Share your experiences below! \ud83d\udc47", "cache_control": + {"type": "ephemeral"}}]}], "model": "claude-3-5-sonnet-20240620", "system": + [{"type": "text", "text": "You help generate concise summaries of news articles + and blog posts that user sends you."}]}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + anthropic-beta: + - prompt-caching-2024-07-31 + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5970' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - Anthropic/Python 0.36.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.36.2 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.3 + method: POST + uri: https://api.anthropic.com/v1/messages?beta=prompt_caching + response: + body: + string: !!binary | + H4sIAAAAAAAAA1xU227jNhD9lQGfba9jb1NAb2mxRYI6aYHuQ4GmMGjqSJqGIhnO0I4b5N8XlBw0 + 3ScDnNu5ya+GW9OYUfr9+mqL28Of18N4E25/fErXDw9p83zemYXRc0LtgojtYRYmR18frAiL2qBm + YcbYwpvGOG9Li+V2+cNSYgjQ5Wa9+by+3qzNwrgYFEFN89fr+1LFSx2ffhpziwyyGeRicCwgKeNo + M0ModqQDSIeM2qLsPORTiqLSPIbHcLWi3xLCbncPzeeGHnCimBCWEkt2IM+HbPOZ8KIILYd+av8K + j7EO0Il1oN3unroSnHIM1rOeV/QrztTBaskQ4uB8aVH7lpLguGNHdZ6dfNJsHWRBHBR9tnXHvBUv + LFovCrQkWZANLUlJKWalLmZKMRVv83w+2xGnmJ9kRTc8CmmklOORW1ALJGTiINwPWuFopJs7msSn + hNzFPNrgMN3lQDYlz26CIquq0mZF9/afOJ+6rM1CPKaqQ9D6NiYlZ91QEWukQ4yiJAloJ+AZbXHV + IVFZ0U8I6Fj/k2Zk71ng4tQqKQYBKY9VGR9PyOTimIraWeN5zywJjxURWhJnvT3wbMDvk9lVH3+m + IuiKn0T7yG3WOSNBWfmICw0hz08gN1g9RJXpyCWDk2azSSv68pLgFG2ly2OyTquqHNoiNRo65Fj6 + gU5VLLJtTJO3dVvA6SMQSjbblvtxVnu7oruxujx5EjsqgZUUcxo4kMROTzXvLY7wMVULmsewpJ+t + ugFCh9ILoVKvr3/YI2QSczo+xoAz1cLdrJyQiy3ouUzZrYVfrKsyWoVQRmedxsyhn5Yh1wkr1EZX + 6uWJQi19CUNFXNd5bw8xXypfB1D94ghjGqzwvxXNYPU7XkK2GtBBZPb4kgOeLZ3N8jH0S0Ue6fAe + oVj0BO7n4A0gDqxs/UyYwxGiFebKvP29MKIx7TOsxGAag9DuteRgLgXBc0FwME0o3i9Mmf65mlfD + IRXda3xCENN8XpiadOxdxsRx///6+r2eYdvvaldX19uFiUU/Pm6267e3bwAAAP//AwAaD3KcVgUA + AA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8d5c3cbfe99d9834-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 20 Oct 2024 21:52:55 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-ratelimit-requests-limit: + - '50' + anthropic-ratelimit-requests-remaining: + - '49' + anthropic-ratelimit-requests-reset: + - '2024-10-20T21:53:55Z' + anthropic-ratelimit-tokens-limit: + - '40000' + anthropic-ratelimit-tokens-remaining: + - '40000' + anthropic-ratelimit-tokens-reset: + - '2024-10-20T21:52:55Z' + request-id: + - req_019qn7LXva9QThSdwBt7v1gE + via: + - 1.1 google + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_prompt_caching_async.yaml b/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_prompt_caching_async.yaml new file mode 100644 index 0000000000..583b2ad57f --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_prompt_caching_async.yaml @@ -0,0 +1,333 @@ +interactions: +- request: + body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": [{"type": + "text", "text": "test_anthropic_prompt_caching_async <- IGNORE THIS. ARTICLES + START ON THE NEXT LINE\nOpen-Source Library OpenLLMetry Brings LLM Functionality + to OpenTelemetry\nTraceloop, a YCombinator-backed company, has announced the + release of OpenLLMetry, a new open-source\nlibrary designed to extend OpenTelemetry + with Large Language Model (LLM) functionality. This innovative\ntool aims to + bridge the gap between traditional application monitoring and the rapidly evolving + field\nof AI-powered systems. OpenLLMetry builds upon the widely-adopted OpenTelemetry + framework, which provides\na standardized approach to collecting and exporting + telemetry data from cloud-native applications. By\nintegrating LLM-specific + features, OpenLLMetry enables developers to gain deeper insights into\nthe performance + and behavior of AI models within their applications.\n\nKey features of OpenLLMetry + include:\n\nLLM-specific metrics and traces\nSeamless integration with existing + OpenTelemetry setups\nSupport for popular LLM frameworks and platforms\n\nThe + open-source nature of OpenLLMetry is expected to foster community contributions + and rapid adoption\namong developers working with LLMs. As AI continues to transform + the software landscape, tools\nlike OpenLLMetry are poised to play a vital role + in ensuring the reliability and performance of\nnext-generation applications.\n\n========================================================================\n\nMajor + LLM Providers Introduce Prompt Caching, Boosting Speed and Reducing Costs\n\nIn + a significant development for the artificial intelligence industry, leading + Large Language Model (LLM) providers,\nincluding Anthropic, have announced the + implementation of prompt caching. This new feature promises to dramatically\nimprove + the speed and cost-effectiveness of LLM API calls, particularly for applications + with repetitive prompt patterns.\n\nUnderstanding Prompt Caching\nPrompt caching + is a technique that allows LLM providers to store and quickly retrieve responses + for frequently used prompts.\nInstead of processing the same or similar prompts + repeatedly, the system can serve pre-computed responses,\nsignificantly reducing + computation time and resources.\n\nBenefits of Prompt Caching\n1. Improved Response + Times\n\nWith prompt caching, response times for cached prompts can be reduced + from seconds to milliseconds. This dramatic speed\nimprovement enables near-instantaneous + responses in many scenarios, enhancing user experience in AI-powered applications.\n\n2. + Cost Reduction\nBy eliminating the need to reprocess identical or highly similar + prompts, prompt caching can substantially reduce the\ncomputational resources + required. This efficiency translates directly into cost savings for developers + and businesses\nutilizing LLM APIs.\n\n3. Scalability\nThe reduced computational + load allows LLM providers to handle a higher volume of requests with existing + infrastructure,\nimproving the scalability of their services.\n\nUse Cases and + Impact\nPrompt caching is particularly beneficial for applications with repetitive + prompt patterns. Some key use cases include:\n\nCustomer service chatbots handling + common queries\nContent moderation systems processing similar types of content\nLanguage + translation services for frequently translated phrases or sentences\nAutomated + coding assistants dealing with standard programming tasks\n\nImplementation + by Major Providers\nWhile the specific implementation details vary among providers, + the general approach involves:\n\nIdentifying frequently used prompts\nStoring + pre-computed responses\nImplementing efficient lookup mechanisms\nBalancing + cache freshness with performance gains\n\nAnthropic, known for its Claude AI + model, has been at the forefront of this technology.\n\nFuture Implications\nThe + introduction of prompt caching by major LLM providers is likely to have far-reaching + effects on the AI industry:\n\nBroader Adoption: Reduced costs and improved + performance could lead to wider adoption of LLM technologies across various + sectors.\nNew Application Paradigms: Developers may create new types of applications + that leverage the near-instantaneous response times of cached prompts.\nEvolution + of Pricing Models: LLM providers might introduce new pricing structures that + reflect the efficiency gains of prompt caching.\n\nAs the technology matures, + we can expect to see further refinements and innovative applications of prompt + caching, potentially\nreshaping the landscape of AI-powered services and applications.\n\n========================================================================\n\n\ud83d\udcca + Why Unit Testing is Non-Negotiable in Software Development \ud83d\udda5\ufe0f\nAs + a software professional, I can''t stress enough how crucial unit testing is + to our craft. Here''s why it''s a must-have practice:\n\n\ud83d\udc1b Catches + bugs early: Identify and fix issues before they snowball into major problems.\n\ud83d\udcb0 + Saves time and money: Less debugging time means faster development and lower + costs.\n\ud83c\udfd7\ufe0f Improves code quality: Writing testable code inherently + leads to better architecture.\n\ud83d\udd04 Facilitates refactoring: Tests give + you confidence to improve your code without breaking functionality.\n\ud83d\udcda + Serves as documentation: Well-written tests explain how your code should behave.\n\ud83e\udd1d + Enhances collaboration: New team members can understand and contribute to the + codebase faster.\n\nRemember: The time invested in unit testing pays off multifold + in the long run. It''s not just good practice\u2014it''s professional responsibility.\nWhat''s + your take on unit testing? Share your experiences below! \ud83d\udc47", "cache_control": + {"type": "ephemeral"}}]}], "model": "claude-3-5-sonnet-20240620", "system": + [{"type": "text", "text": "You help generate concise summaries of news articles + and blog posts that user sends you."}]}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + anthropic-beta: + - prompt-caching-2024-07-31 + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5976' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - AsyncAnthropic/Python 0.36.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.36.2 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.3 + method: POST + uri: https://api.anthropic.com/v1/messages?beta=prompt_caching + response: + body: + string: !!binary | + H4sIAAAAAAAAA2xUTVMjNxD9K106jx1ggdr4RpJDqECF2mJzSEi5ZM0bj7IaSahbBi/Ff99qjU3I + Vk621V/vvX7tF+N7szITb9cnpx//uPw8SHkut6d/XnySj9u0+/rLqemM7DM0C8x2C9OZkoI+WGbP + YqOYzkypRzAr44KtPRYfFhcLTjFCFmcnZ+cnl2cnpjMuRUEUs/rr5dhU8Kzl7WNlfkUB2QJyKTrP + IK7TZIsHUxpIRpCMBZoi3gXwDzmx8OohPsTTJf2eEW9ubiFlT58QYBmrh3hfrENIKVOwNboR/fvE + jmyklBEXnGpxoOA3xZY94VkQex+3LfseAVNr/ORlpJubWxpqdOJTtMHLfknXQrmkne/BGl5whvOD + d6Rl3jHZ2JMoFu7IR8G2WAHP/fDsWXQWQ2rmriVzzTkVYcop12DLPLXYCU+pfOEl3Y+eSVIKZP2k + 38hPigE0peglFe2onXxkvx2FaUiFrq4XOT2hoCebc/DOKgteqohnS7oracpCP1s3avlm38bezdSK + an1r/0kzmHx8VUYu1CbXVZSxpOxdR6PdQTGpdlHQa4E2d4fm7xBf3V2TsyEQZyiy2JNLLAsMA5z4 + HSL4jTLcGP1jBbGkAqYCzikyZoJDwWNFlMM07qigr04HujTlKo0wiZ9AQ0kTMVyKfRNw8iH4w+8l + /YSIwQsf2IEGy4Ly77yOgkrZoB62diDUEzsb7MarOzrKzbG6xbCnyhhqaFjfb2C2QkGGeGV8xN82 + 82FJ15PawUYHPYbP0Qvdo9lGXT6C9BgIUx4t+6/gdi+uePHOBtKb1bqqdTLXkY/EaZAnPbkeO4SU + dVNL+g172nzPHg39pm6ph6CZv5tlPK6L2O583KofjjK41IMeq511gGXfBBysmw3azRl9clVH27mr + NkQclay2CMFuUmkx9QDIVhlVvrKtjaiV74gxgRlRvJ2FziUNYG73+r+c28i3Cw4pbheCMr2psDSv + f3eGJeV1geUUzcog9mupJZpDgJvxHMwq1hA6U9v/5erF+JirrCV9QWSzOu+MXgDWrqBxWv83fnp6 + eXFMKbD9d+GTzqQq71/Ozn98ff0GAAD//wMABs4kC8wFAAA= + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8d4f5530b89c9800-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 19 Oct 2024 08:17:46 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-ratelimit-requests-limit: + - '50' + anthropic-ratelimit-requests-remaining: + - '49' + anthropic-ratelimit-requests-reset: + - '2024-10-19T08:17:55Z' + anthropic-ratelimit-tokens-limit: + - '40000' + anthropic-ratelimit-tokens-remaining: + - '40000' + anthropic-ratelimit-tokens-reset: + - '2024-10-19T08:17:46Z' + request-id: + - req_014FixcQG3LDMh1PeBuuYNx1 + via: + - 1.1 google + status: + code: 200 + message: OK +- request: + body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": [{"type": + "text", "text": "test_anthropic_prompt_caching_async <- IGNORE THIS. ARTICLES + START ON THE NEXT LINE\nOpen-Source Library OpenLLMetry Brings LLM Functionality + to OpenTelemetry\nTraceloop, a YCombinator-backed company, has announced the + release of OpenLLMetry, a new open-source\nlibrary designed to extend OpenTelemetry + with Large Language Model (LLM) functionality. This innovative\ntool aims to + bridge the gap between traditional application monitoring and the rapidly evolving + field\nof AI-powered systems. OpenLLMetry builds upon the widely-adopted OpenTelemetry + framework, which provides\na standardized approach to collecting and exporting + telemetry data from cloud-native applications. By\nintegrating LLM-specific + features, OpenLLMetry enables developers to gain deeper insights into\nthe performance + and behavior of AI models within their applications.\n\nKey features of OpenLLMetry + include:\n\nLLM-specific metrics and traces\nSeamless integration with existing + OpenTelemetry setups\nSupport for popular LLM frameworks and platforms\n\nThe + open-source nature of OpenLLMetry is expected to foster community contributions + and rapid adoption\namong developers working with LLMs. As AI continues to transform + the software landscape, tools\nlike OpenLLMetry are poised to play a vital role + in ensuring the reliability and performance of\nnext-generation applications.\n\n========================================================================\n\nMajor + LLM Providers Introduce Prompt Caching, Boosting Speed and Reducing Costs\n\nIn + a significant development for the artificial intelligence industry, leading + Large Language Model (LLM) providers,\nincluding Anthropic, have announced the + implementation of prompt caching. This new feature promises to dramatically\nimprove + the speed and cost-effectiveness of LLM API calls, particularly for applications + with repetitive prompt patterns.\n\nUnderstanding Prompt Caching\nPrompt caching + is a technique that allows LLM providers to store and quickly retrieve responses + for frequently used prompts.\nInstead of processing the same or similar prompts + repeatedly, the system can serve pre-computed responses,\nsignificantly reducing + computation time and resources.\n\nBenefits of Prompt Caching\n1. Improved Response + Times\n\nWith prompt caching, response times for cached prompts can be reduced + from seconds to milliseconds. This dramatic speed\nimprovement enables near-instantaneous + responses in many scenarios, enhancing user experience in AI-powered applications.\n\n2. + Cost Reduction\nBy eliminating the need to reprocess identical or highly similar + prompts, prompt caching can substantially reduce the\ncomputational resources + required. This efficiency translates directly into cost savings for developers + and businesses\nutilizing LLM APIs.\n\n3. Scalability\nThe reduced computational + load allows LLM providers to handle a higher volume of requests with existing + infrastructure,\nimproving the scalability of their services.\n\nUse Cases and + Impact\nPrompt caching is particularly beneficial for applications with repetitive + prompt patterns. Some key use cases include:\n\nCustomer service chatbots handling + common queries\nContent moderation systems processing similar types of content\nLanguage + translation services for frequently translated phrases or sentences\nAutomated + coding assistants dealing with standard programming tasks\n\nImplementation + by Major Providers\nWhile the specific implementation details vary among providers, + the general approach involves:\n\nIdentifying frequently used prompts\nStoring + pre-computed responses\nImplementing efficient lookup mechanisms\nBalancing + cache freshness with performance gains\n\nAnthropic, known for its Claude AI + model, has been at the forefront of this technology.\n\nFuture Implications\nThe + introduction of prompt caching by major LLM providers is likely to have far-reaching + effects on the AI industry:\n\nBroader Adoption: Reduced costs and improved + performance could lead to wider adoption of LLM technologies across various + sectors.\nNew Application Paradigms: Developers may create new types of applications + that leverage the near-instantaneous response times of cached prompts.\nEvolution + of Pricing Models: LLM providers might introduce new pricing structures that + reflect the efficiency gains of prompt caching.\n\nAs the technology matures, + we can expect to see further refinements and innovative applications of prompt + caching, potentially\nreshaping the landscape of AI-powered services and applications.\n\n========================================================================\n\n\ud83d\udcca + Why Unit Testing is Non-Negotiable in Software Development \ud83d\udda5\ufe0f\nAs + a software professional, I can''t stress enough how crucial unit testing is + to our craft. Here''s why it''s a must-have practice:\n\n\ud83d\udc1b Catches + bugs early: Identify and fix issues before they snowball into major problems.\n\ud83d\udcb0 + Saves time and money: Less debugging time means faster development and lower + costs.\n\ud83c\udfd7\ufe0f Improves code quality: Writing testable code inherently + leads to better architecture.\n\ud83d\udd04 Facilitates refactoring: Tests give + you confidence to improve your code without breaking functionality.\n\ud83d\udcda + Serves as documentation: Well-written tests explain how your code should behave.\n\ud83e\udd1d + Enhances collaboration: New team members can understand and contribute to the + codebase faster.\n\nRemember: The time invested in unit testing pays off multifold + in the long run. It''s not just good practice\u2014it''s professional responsibility.\nWhat''s + your take on unit testing? Share your experiences below! \ud83d\udc47", "cache_control": + {"type": "ephemeral"}}]}], "model": "claude-3-5-sonnet-20240620", "system": + [{"type": "text", "text": "You help generate concise summaries of news articles + and blog posts that user sends you."}]}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + anthropic-beta: + - prompt-caching-2024-07-31 + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5976' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - AsyncAnthropic/Python 0.36.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.36.2 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.3 + method: POST + uri: https://api.anthropic.com/v1/messages?beta=prompt_caching + response: + body: + string: !!binary | + H4sIAAAAAAAAA2RUXW/jRgz8K8Q+tYDss313Kaq3fqRt0OTugOapTWGsVyNrmxV3b8m14wvy34uV + HCTXPkngkEPOkNKj8Z1pzSj77Wp9I7ub9eXF9Sd8l76sP+T7Py9/xcE0Rk8JNQsidg/TmBxDDVgR + L2pZTWPG2CGY1rhgS4fF28X7hURm6GKz2rxbXWxWpjEusoLVtH89PpMqHmr59GjNb8ggm0EusvMC + kjKONnsIxZ50AOmQUVPUuwB5k6KotHd8x+slfUzg6+sbaD619AFHigm8kFiyAwW/yzafCA8K7jzv + p/RbBIy1gI5eB7q+vqG+sFMf2QavpyX9jANCTOhod6LbbB1CjKkhrwS2uwChMbLXmCun5Y48i98P + KtTHTD9c0WSNkGeyKQXvbGWXJf2OE/WwWjIq6kLpUCdYSILzvXdUJ/NO3mhtK2dyxT5PFPPIePCi + /5cj0JJkWZ3ZLOnG/hPzpC7lePAdcu2oOXbFocbGpOSsGyqRRvJjzQNJArqpb8aU6qrfS/oRjN7r + y9i9FUWmDEmRBaR+hNA3ow/BC1zkTuggdH79tqEQj8jk4piK2tnumbyZZc4DdCTOBrvz8y4+TXsv + weZwoiLoS5hMfu3r7EpGgnr1h2d1QsHfg9xgdRd19lKzZQmzl4J88A6ypMuHBKfoqg9drgzHatjk + nu1imtJrOeP4ujPVi54df7ukqzHFrJYd6uUW9kqKeVGeSWKvx3rn3XxdI1jbO17QT1bdAKFd2Quh + 6qzRP+wBMnk6NR4j40QVuJptEnKxA30u081W4BfrqmdWIZTRWzff50SGXCusUBddqZ2n8St0yUOd + WEhhR3IxBLuL+QzfDqD6uRHGNFjxX2reYJU8H16Ufa1UyOXivJ3XFCLvF4o8vug/jzzJQt9758Hu + tDRPfzdGNKZthpXIpjXgbqslszkDgs8F7GBaLiE0pkz/pvbReE5FtxrvwWLad42pd42ty5iEbL/G + V894hu3+g63XF+8bE4u+Dm423z89/QsAAP//AwBFH3WoOAUAAA== + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8d4f556a8ab89800-SJC + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sat, 19 Oct 2024 08:17:54 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-ratelimit-requests-limit: + - '50' + anthropic-ratelimit-requests-remaining: + - '49' + anthropic-ratelimit-requests-reset: + - '2024-10-19T08:17:55Z' + anthropic-ratelimit-tokens-limit: + - '40000' + anthropic-ratelimit-tokens-remaining: + - '40000' + anthropic-ratelimit-tokens-reset: + - '2024-10-19T08:17:54Z' + request-id: + - req_01SGDzoRng4LTLJfwrvHzNmT + via: + - 1.1 google + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_prompt_caching_async_stream.yaml b/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_prompt_caching_async_stream.yaml new file mode 100644 index 0000000000..10c905eb7c --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_prompt_caching_async_stream.yaml @@ -0,0 +1,1168 @@ +interactions: +- request: + body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": [{"type": + "text", "text": "test_anthropic_prompt_caching_async_stream <- IGNORE THIS. + ARTICLES START ON THE NEXT LINE\nOpen-Source Library OpenLLMetry Brings LLM + Functionality to OpenTelemetry\nTraceloop, a YCombinator-backed company, has + announced the release of OpenLLMetry, a new open-source\nlibrary designed to + extend OpenTelemetry with Large Language Model (LLM) functionality. This innovative\ntool + aims to bridge the gap between traditional application monitoring and the rapidly + evolving field\nof AI-powered systems. OpenLLMetry builds upon the widely-adopted + OpenTelemetry framework, which provides\na standardized approach to collecting + and exporting telemetry data from cloud-native applications. By\nintegrating + LLM-specific features, OpenLLMetry enables developers to gain deeper insights + into\nthe performance and behavior of AI models within their applications.\n\nKey + features of OpenLLMetry include:\n\nLLM-specific metrics and traces\nSeamless + integration with existing OpenTelemetry setups\nSupport for popular LLM frameworks + and platforms\n\nThe open-source nature of OpenLLMetry is expected to foster + community contributions and rapid adoption\namong developers working with LLMs. + As AI continues to transform the software landscape, tools\nlike OpenLLMetry + are poised to play a vital role in ensuring the reliability and performance + of\nnext-generation applications.\n\n========================================================================\n\nMajor + LLM Providers Introduce Prompt Caching, Boosting Speed and Reducing Costs\n\nIn + a significant development for the artificial intelligence industry, leading + Large Language Model (LLM) providers,\nincluding Anthropic, have announced the + implementation of prompt caching. This new feature promises to dramatically\nimprove + the speed and cost-effectiveness of LLM API calls, particularly for applications + with repetitive prompt patterns.\n\nUnderstanding Prompt Caching\nPrompt caching + is a technique that allows LLM providers to store and quickly retrieve responses + for frequently used prompts.\nInstead of processing the same or similar prompts + repeatedly, the system can serve pre-computed responses,\nsignificantly reducing + computation time and resources.\n\nBenefits of Prompt Caching\n1. Improved Response + Times\n\nWith prompt caching, response times for cached prompts can be reduced + from seconds to milliseconds. This dramatic speed\nimprovement enables near-instantaneous + responses in many scenarios, enhancing user experience in AI-powered applications.\n\n2. + Cost Reduction\nBy eliminating the need to reprocess identical or highly similar + prompts, prompt caching can substantially reduce the\ncomputational resources + required. This efficiency translates directly into cost savings for developers + and businesses\nutilizing LLM APIs.\n\n3. Scalability\nThe reduced computational + load allows LLM providers to handle a higher volume of requests with existing + infrastructure,\nimproving the scalability of their services.\n\nUse Cases and + Impact\nPrompt caching is particularly beneficial for applications with repetitive + prompt patterns. Some key use cases include:\n\nCustomer service chatbots handling + common queries\nContent moderation systems processing similar types of content\nLanguage + translation services for frequently translated phrases or sentences\nAutomated + coding assistants dealing with standard programming tasks\n\nImplementation + by Major Providers\nWhile the specific implementation details vary among providers, + the general approach involves:\n\nIdentifying frequently used prompts\nStoring + pre-computed responses\nImplementing efficient lookup mechanisms\nBalancing + cache freshness with performance gains\n\nAnthropic, known for its Claude AI + model, has been at the forefront of this technology.\n\nFuture Implications\nThe + introduction of prompt caching by major LLM providers is likely to have far-reaching + effects on the AI industry:\n\nBroader Adoption: Reduced costs and improved + performance could lead to wider adoption of LLM technologies across various + sectors.\nNew Application Paradigms: Developers may create new types of applications + that leverage the near-instantaneous response times of cached prompts.\nEvolution + of Pricing Models: LLM providers might introduce new pricing structures that + reflect the efficiency gains of prompt caching.\n\nAs the technology matures, + we can expect to see further refinements and innovative applications of prompt + caching, potentially\nreshaping the landscape of AI-powered services and applications.\n\n========================================================================\n\n\ud83d\udcca + Why Unit Testing is Non-Negotiable in Software Development \ud83d\udda5\ufe0f\nAs + a software professional, I can''t stress enough how crucial unit testing is + to our craft. Here''s why it''s a must-have practice:\n\n\ud83d\udc1b Catches + bugs early: Identify and fix issues before they snowball into major problems.\n\ud83d\udcb0 + Saves time and money: Less debugging time means faster development and lower + costs.\n\ud83c\udfd7\ufe0f Improves code quality: Writing testable code inherently + leads to better architecture.\n\ud83d\udd04 Facilitates refactoring: Tests give + you confidence to improve your code without breaking functionality.\n\ud83d\udcda + Serves as documentation: Well-written tests explain how your code should behave.\n\ud83e\udd1d + Enhances collaboration: New team members can understand and contribute to the + codebase faster.\n\nRemember: The time invested in unit testing pays off multifold + in the long run. It''s not just good practice\u2014it''s professional responsibility.\nWhat''s + your take on unit testing? Share your experiences below! \ud83d\udc47", "cache_control": + {"type": "ephemeral"}}]}], "model": "claude-3-5-sonnet-20240620", "stream": + true, "system": [{"type": "text", "text": "You help generate concise summaries + of news articles and blog posts that user sends you."}]}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + anthropic-beta: + - prompt-caching-2024-07-31 + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5999' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - AsyncAnthropic/Python 0.36.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.36.2 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.3 + method: POST + uri: https://api.anthropic.com/v1/messages?beta=prompt_caching + response: + body: + string: 'event: message_start + + data: {"type":"message_start","message":{"id":"msg_01VBrTFKAYvujMd593dtpRHF","type":"message","role":"assistant","model":"claude-3-5-sonnet-20240620","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":4,"cache_creation_input_tokens":1167,"cache_read_input_tokens":0,"output_tokens":1}} } + + + event: content_block_start + + data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} + } + + + event: ping + + data: {"type": "ping"} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Here"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + are conc"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ise + summ"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"aries + of the"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + three"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + articles"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":":\n\n1"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":". + Open"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"LLMetry:"} + } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + New"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + open"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-source + library"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + extending"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + OpenTelemetry"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + with"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + L"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"LM + functionality"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":". + Key"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + features include L"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"LM-specific + metrics"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + and traces, integration"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + with existing"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + OpenTelemetry"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + setups, and support"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + for popular LLM"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + frameworks."} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Aims to provide"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + deeper"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + insights into AI"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + model performance within"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + applications.\n\n2."} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Major LLM providers"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + introduce prompt caching to"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + improve speed and reduce costs"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":". + Benefits"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + include faster"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + response times, lower"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + computational costs, and improve"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + scalability. Particularly"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + useful for applications with repet"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"itive + prom"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"pts + like chat"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"bots + an"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + content moderation."} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Expecte"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + to impact AI industry"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + through"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + wider"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + adoption and new application paradig"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ms.\n\n3."} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Importance of unit testing in"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + software development:"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Catches bugs"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + early"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Saves time an"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + money"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Improves code"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + quality\n- Facilit"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ates + refactoring"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Serves as documentation"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Enhances"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + collaboration"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\nThe"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + post"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + emphas"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"izes + unit testing as a"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + crucial"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + professional"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + responsibility that"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + pays off in"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + the long run."} } + + + event: content_block_stop + + data: {"type":"content_block_stop","index":0 } + + + event: message_delta + + data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":224} } + + + event: message_stop + + data: {"type":"message_stop" } + + + ' + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8d4f55f1bc74173b-SJC + Cache-Control: + - no-cache + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Sat, 19 Oct 2024 08:18:09 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-ratelimit-requests-limit: + - '50' + anthropic-ratelimit-requests-remaining: + - '49' + anthropic-ratelimit-requests-reset: + - '2024-10-19T08:18:55Z' + anthropic-ratelimit-tokens-limit: + - '40000' + anthropic-ratelimit-tokens-remaining: + - '38000' + anthropic-ratelimit-tokens-reset: + - '2024-10-19T08:18:11Z' + request-id: + - req_01D8a9dg61KFzypbJZZVD5qy + via: + - 1.1 google + status: + code: 200 + message: OK +- request: + body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": [{"type": + "text", "text": "test_anthropic_prompt_caching_async_stream <- IGNORE THIS. + ARTICLES START ON THE NEXT LINE\nOpen-Source Library OpenLLMetry Brings LLM + Functionality to OpenTelemetry\nTraceloop, a YCombinator-backed company, has + announced the release of OpenLLMetry, a new open-source\nlibrary designed to + extend OpenTelemetry with Large Language Model (LLM) functionality. This innovative\ntool + aims to bridge the gap between traditional application monitoring and the rapidly + evolving field\nof AI-powered systems. OpenLLMetry builds upon the widely-adopted + OpenTelemetry framework, which provides\na standardized approach to collecting + and exporting telemetry data from cloud-native applications. By\nintegrating + LLM-specific features, OpenLLMetry enables developers to gain deeper insights + into\nthe performance and behavior of AI models within their applications.\n\nKey + features of OpenLLMetry include:\n\nLLM-specific metrics and traces\nSeamless + integration with existing OpenTelemetry setups\nSupport for popular LLM frameworks + and platforms\n\nThe open-source nature of OpenLLMetry is expected to foster + community contributions and rapid adoption\namong developers working with LLMs. + As AI continues to transform the software landscape, tools\nlike OpenLLMetry + are poised to play a vital role in ensuring the reliability and performance + of\nnext-generation applications.\n\n========================================================================\n\nMajor + LLM Providers Introduce Prompt Caching, Boosting Speed and Reducing Costs\n\nIn + a significant development for the artificial intelligence industry, leading + Large Language Model (LLM) providers,\nincluding Anthropic, have announced the + implementation of prompt caching. This new feature promises to dramatically\nimprove + the speed and cost-effectiveness of LLM API calls, particularly for applications + with repetitive prompt patterns.\n\nUnderstanding Prompt Caching\nPrompt caching + is a technique that allows LLM providers to store and quickly retrieve responses + for frequently used prompts.\nInstead of processing the same or similar prompts + repeatedly, the system can serve pre-computed responses,\nsignificantly reducing + computation time and resources.\n\nBenefits of Prompt Caching\n1. Improved Response + Times\n\nWith prompt caching, response times for cached prompts can be reduced + from seconds to milliseconds. This dramatic speed\nimprovement enables near-instantaneous + responses in many scenarios, enhancing user experience in AI-powered applications.\n\n2. + Cost Reduction\nBy eliminating the need to reprocess identical or highly similar + prompts, prompt caching can substantially reduce the\ncomputational resources + required. This efficiency translates directly into cost savings for developers + and businesses\nutilizing LLM APIs.\n\n3. Scalability\nThe reduced computational + load allows LLM providers to handle a higher volume of requests with existing + infrastructure,\nimproving the scalability of their services.\n\nUse Cases and + Impact\nPrompt caching is particularly beneficial for applications with repetitive + prompt patterns. Some key use cases include:\n\nCustomer service chatbots handling + common queries\nContent moderation systems processing similar types of content\nLanguage + translation services for frequently translated phrases or sentences\nAutomated + coding assistants dealing with standard programming tasks\n\nImplementation + by Major Providers\nWhile the specific implementation details vary among providers, + the general approach involves:\n\nIdentifying frequently used prompts\nStoring + pre-computed responses\nImplementing efficient lookup mechanisms\nBalancing + cache freshness with performance gains\n\nAnthropic, known for its Claude AI + model, has been at the forefront of this technology.\n\nFuture Implications\nThe + introduction of prompt caching by major LLM providers is likely to have far-reaching + effects on the AI industry:\n\nBroader Adoption: Reduced costs and improved + performance could lead to wider adoption of LLM technologies across various + sectors.\nNew Application Paradigms: Developers may create new types of applications + that leverage the near-instantaneous response times of cached prompts.\nEvolution + of Pricing Models: LLM providers might introduce new pricing structures that + reflect the efficiency gains of prompt caching.\n\nAs the technology matures, + we can expect to see further refinements and innovative applications of prompt + caching, potentially\nreshaping the landscape of AI-powered services and applications.\n\n========================================================================\n\n\ud83d\udcca + Why Unit Testing is Non-Negotiable in Software Development \ud83d\udda5\ufe0f\nAs + a software professional, I can''t stress enough how crucial unit testing is + to our craft. Here''s why it''s a must-have practice:\n\n\ud83d\udc1b Catches + bugs early: Identify and fix issues before they snowball into major problems.\n\ud83d\udcb0 + Saves time and money: Less debugging time means faster development and lower + costs.\n\ud83c\udfd7\ufe0f Improves code quality: Writing testable code inherently + leads to better architecture.\n\ud83d\udd04 Facilitates refactoring: Tests give + you confidence to improve your code without breaking functionality.\n\ud83d\udcda + Serves as documentation: Well-written tests explain how your code should behave.\n\ud83e\udd1d + Enhances collaboration: New team members can understand and contribute to the + codebase faster.\n\nRemember: The time invested in unit testing pays off multifold + in the long run. It''s not just good practice\u2014it''s professional responsibility.\nWhat''s + your take on unit testing? Share your experiences below! \ud83d\udc47", "cache_control": + {"type": "ephemeral"}}]}], "model": "claude-3-5-sonnet-20240620", "stream": + true, "system": [{"type": "text", "text": "You help generate concise summaries + of news articles and blog posts that user sends you."}]}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + anthropic-beta: + - prompt-caching-2024-07-31 + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5999' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - AsyncAnthropic/Python 0.36.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.36.2 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.3 + method: POST + uri: https://api.anthropic.com/v1/messages?beta=prompt_caching + response: + body: + string: 'event: message_start + + data: {"type":"message_start","message":{"id":"msg_01BqqtrPfxepxW2xPuZz1m6h","type":"message","role":"assistant","model":"claude-3-5-sonnet-20240620","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":4,"cache_creation_input_tokens":0,"cache_read_input_tokens":1167,"output_tokens":1}} } + + + event: content_block_start + + data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} } + + + event: ping + + data: {"type": "ping"} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Here"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + are conc"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ise + summ"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"aries + of the"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + three"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + articles"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":":\n\n1"} + } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":". + Open"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"LLMetry:"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + New"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + open"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-source + library"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + extends"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + OpenTelemetry"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + with LLM functionality"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + enabling deeper"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + insights into AI"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + model performance in"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + applications. Key"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + features include LLM"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-specific + metrics/"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"traces + an"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + integration"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + with existing set"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ups."} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\n2. + Major L"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"LM + providers"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + introduce prompt caching,"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + dramatically"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + improving spee"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + and reducing costs for API"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + calls."} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Benefits"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + include millis"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"econd + response times,"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + lower"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + computational"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + resources"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + and improve"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + scalability."} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Particularly"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + useful"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + for applications with repetitive"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + prompts like"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + chatbots an"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + translation"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + services."} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\n3. + Unit testing"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + is crucial"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + in software development:"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Catches bugs early, saves"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + time/money, impro"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ves + code quality, facilit"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ates + refactoring,"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + serves as documentation, an"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + enhances collaboration. The"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + post"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + emphasizes that"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + investing"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + in unit testing is a"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + professional responsibility with"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + long-term benefits."} } + + + event: content_block_stop + + data: {"type":"content_block_stop","index":0 } + + + event: message_delta + + data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":186} } + + + event: message_stop + + data: {"type":"message_stop" } + + + ' + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8d4f56200a12173b-SJC + Cache-Control: + - no-cache + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Sat, 19 Oct 2024 08:18:17 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-ratelimit-requests-limit: + - '50' + anthropic-ratelimit-requests-remaining: + - '49' + anthropic-ratelimit-requests-reset: + - '2024-10-19T08:18:55Z' + anthropic-ratelimit-tokens-limit: + - '40000' + anthropic-ratelimit-tokens-remaining: + - '38000' + anthropic-ratelimit-tokens-reset: + - '2024-10-19T08:18:19Z' + request-id: + - req_013pSJwGsRyHBayiBD38PjTK + via: + - 1.1 google + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_prompt_caching_stream.yaml b/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_prompt_caching_stream.yaml new file mode 100644 index 0000000000..f262adb147 --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_anthropic_prompt_caching_stream.yaml @@ -0,0 +1,1350 @@ +interactions: +- request: + body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": [{"type": + "text", "text": "test_anthropic_prompt_caching_stream <- IGNORE THIS. ARTICLES + START ON THE NEXT LINE\nOpen-Source Library OpenLLMetry Brings LLM Functionality + to OpenTelemetry\nTraceloop, a YCombinator-backed company, has announced the + release of OpenLLMetry, a new open-source\nlibrary designed to extend OpenTelemetry + with Large Language Model (LLM) functionality. This innovative\ntool aims to + bridge the gap between traditional application monitoring and the rapidly evolving + field\nof AI-powered systems. OpenLLMetry builds upon the widely-adopted OpenTelemetry + framework, which provides\na standardized approach to collecting and exporting + telemetry data from cloud-native applications. By\nintegrating LLM-specific + features, OpenLLMetry enables developers to gain deeper insights into\nthe performance + and behavior of AI models within their applications.\n\nKey features of OpenLLMetry + include:\n\nLLM-specific metrics and traces\nSeamless integration with existing + OpenTelemetry setups\nSupport for popular LLM frameworks and platforms\n\nThe + open-source nature of OpenLLMetry is expected to foster community contributions + and rapid adoption\namong developers working with LLMs. As AI continues to transform + the software landscape, tools\nlike OpenLLMetry are poised to play a vital role + in ensuring the reliability and performance of\nnext-generation applications.\n\n========================================================================\n\nMajor + LLM Providers Introduce Prompt Caching, Boosting Speed and Reducing Costs\n\nIn + a significant development for the artificial intelligence industry, leading + Large Language Model (LLM) providers,\nincluding Anthropic, have announced the + implementation of prompt caching. This new feature promises to dramatically\nimprove + the speed and cost-effectiveness of LLM API calls, particularly for applications + with repetitive prompt patterns.\n\nUnderstanding Prompt Caching\nPrompt caching + is a technique that allows LLM providers to store and quickly retrieve responses + for frequently used prompts.\nInstead of processing the same or similar prompts + repeatedly, the system can serve pre-computed responses,\nsignificantly reducing + computation time and resources.\n\nBenefits of Prompt Caching\n1. Improved Response + Times\n\nWith prompt caching, response times for cached prompts can be reduced + from seconds to milliseconds. This dramatic speed\nimprovement enables near-instantaneous + responses in many scenarios, enhancing user experience in AI-powered applications.\n\n2. + Cost Reduction\nBy eliminating the need to reprocess identical or highly similar + prompts, prompt caching can substantially reduce the\ncomputational resources + required. This efficiency translates directly into cost savings for developers + and businesses\nutilizing LLM APIs.\n\n3. Scalability\nThe reduced computational + load allows LLM providers to handle a higher volume of requests with existing + infrastructure,\nimproving the scalability of their services.\n\nUse Cases and + Impact\nPrompt caching is particularly beneficial for applications with repetitive + prompt patterns. Some key use cases include:\n\nCustomer service chatbots handling + common queries\nContent moderation systems processing similar types of content\nLanguage + translation services for frequently translated phrases or sentences\nAutomated + coding assistants dealing with standard programming tasks\n\nImplementation + by Major Providers\nWhile the specific implementation details vary among providers, + the general approach involves:\n\nIdentifying frequently used prompts\nStoring + pre-computed responses\nImplementing efficient lookup mechanisms\nBalancing + cache freshness with performance gains\n\nAnthropic, known for its Claude AI + model, has been at the forefront of this technology.\n\nFuture Implications\nThe + introduction of prompt caching by major LLM providers is likely to have far-reaching + effects on the AI industry:\n\nBroader Adoption: Reduced costs and improved + performance could lead to wider adoption of LLM technologies across various + sectors.\nNew Application Paradigms: Developers may create new types of applications + that leverage the near-instantaneous response times of cached prompts.\nEvolution + of Pricing Models: LLM providers might introduce new pricing structures that + reflect the efficiency gains of prompt caching.\n\nAs the technology matures, + we can expect to see further refinements and innovative applications of prompt + caching, potentially\nreshaping the landscape of AI-powered services and applications.\n\n========================================================================\n\n\ud83d\udcca + Why Unit Testing is Non-Negotiable in Software Development \ud83d\udda5\ufe0f\nAs + a software professional, I can''t stress enough how crucial unit testing is + to our craft. Here''s why it''s a must-have practice:\n\n\ud83d\udc1b Catches + bugs early: Identify and fix issues before they snowball into major problems.\n\ud83d\udcb0 + Saves time and money: Less debugging time means faster development and lower + costs.\n\ud83c\udfd7\ufe0f Improves code quality: Writing testable code inherently + leads to better architecture.\n\ud83d\udd04 Facilitates refactoring: Tests give + you confidence to improve your code without breaking functionality.\n\ud83d\udcda + Serves as documentation: Well-written tests explain how your code should behave.\n\ud83e\udd1d + Enhances collaboration: New team members can understand and contribute to the + codebase faster.\n\nRemember: The time invested in unit testing pays off multifold + in the long run. It''s not just good practice\u2014it''s professional responsibility.\nWhat''s + your take on unit testing? Share your experiences below! \ud83d\udc47", "cache_control": + {"type": "ephemeral"}}]}], "model": "claude-3-5-sonnet-20240620", "stream": + true, "system": [{"type": "text", "text": "You help generate concise summaries + of news articles and blog posts that user sends you."}]}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + anthropic-beta: + - prompt-caching-2024-07-31 + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5993' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - Anthropic/Python 0.36.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.36.2 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.3 + method: POST + uri: https://api.anthropic.com/v1/messages?beta=prompt_caching + response: + body: + string: 'event: message_start + + data: {"type":"message_start","message":{"id":"msg_015AgAJmgFQdYXgvvgbuKqPY","type":"message","role":"assistant","model":"claude-3-5-sonnet-20240620","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":4,"cache_creation_input_tokens":1165,"cache_read_input_tokens":0,"output_tokens":1}} } + + + event: content_block_start + + data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} } + + + event: ping + + data: {"type": "ping"} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Here"} + } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + are conc"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ise + summ"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"aries + of the three"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + articles"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"/"} + } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"posts"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":":"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\n1. + Open"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"LLMetry:"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + New"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + open"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-source + library"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + extending"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + OpenTelemetry"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + with"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + L"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"LM + functionality"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":". + Develope"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + by"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Tr"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ace"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"loop"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + it provides L"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"LM-specific + metrics"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + an"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + traces,"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + integ"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"rates"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + with existing"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + set"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ups, + and supports popular"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + LLM frameworks."} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Aims to improve"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + monitoring"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + of AI-"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"powered + systems"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":".\n\n2. + Major"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + LLM providers introduce"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + prompt caching:"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Stores"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + responses"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + for frequent"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + prompts"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Benefits"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":": + Faster"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + response"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + times, lower"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + costs, improved scalability"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Useful"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + for chat"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"bots, + content mo"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"deration, + translation"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":", + coding assist"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ants\n- + Implemente"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + by providers like Anthrop"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ic"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + May"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + lead to wider AI"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + adoption an"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + new"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + application types"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\n3."} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Importance of unit testing in"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + software development:\n-"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Catches bugs early"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Saves time an"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + money\n- Impro"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ves + code quality an"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + architecture "} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Facilitates"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + refactoring\n-"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Acts"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + as documentation\n-"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Enhances team"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + collaboration\nThe"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + post"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + emphas"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"izes + unit testing as a"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + crucial"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + professional practice"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + with"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + long"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-term + benefits."} } + + + event: content_block_stop + + data: {"type":"content_block_stop","index":0 } + + + event: message_delta + + data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":237} } + + + event: message_stop + + data: {"type":"message_stop" } + + + ' + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8d4f559f0c162364-SJC + Cache-Control: + - no-cache + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Sat, 19 Oct 2024 08:17:56 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-ratelimit-requests-limit: + - '50' + anthropic-ratelimit-requests-remaining: + - '49' + anthropic-ratelimit-requests-reset: + - '2024-10-19T08:18:55Z' + anthropic-ratelimit-tokens-limit: + - '40000' + anthropic-ratelimit-tokens-remaining: + - '38000' + anthropic-ratelimit-tokens-reset: + - '2024-10-19T08:17:58Z' + request-id: + - req_01JNH7PWMF94CthyahdE5BS5 + via: + - 1.1 google + status: + code: 200 + message: OK +- request: + body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": [{"type": + "text", "text": "test_anthropic_prompt_caching_stream <- IGNORE THIS. ARTICLES + START ON THE NEXT LINE\nOpen-Source Library OpenLLMetry Brings LLM Functionality + to OpenTelemetry\nTraceloop, a YCombinator-backed company, has announced the + release of OpenLLMetry, a new open-source\nlibrary designed to extend OpenTelemetry + with Large Language Model (LLM) functionality. This innovative\ntool aims to + bridge the gap between traditional application monitoring and the rapidly evolving + field\nof AI-powered systems. OpenLLMetry builds upon the widely-adopted OpenTelemetry + framework, which provides\na standardized approach to collecting and exporting + telemetry data from cloud-native applications. By\nintegrating LLM-specific + features, OpenLLMetry enables developers to gain deeper insights into\nthe performance + and behavior of AI models within their applications.\n\nKey features of OpenLLMetry + include:\n\nLLM-specific metrics and traces\nSeamless integration with existing + OpenTelemetry setups\nSupport for popular LLM frameworks and platforms\n\nThe + open-source nature of OpenLLMetry is expected to foster community contributions + and rapid adoption\namong developers working with LLMs. As AI continues to transform + the software landscape, tools\nlike OpenLLMetry are poised to play a vital role + in ensuring the reliability and performance of\nnext-generation applications.\n\n========================================================================\n\nMajor + LLM Providers Introduce Prompt Caching, Boosting Speed and Reducing Costs\n\nIn + a significant development for the artificial intelligence industry, leading + Large Language Model (LLM) providers,\nincluding Anthropic, have announced the + implementation of prompt caching. This new feature promises to dramatically\nimprove + the speed and cost-effectiveness of LLM API calls, particularly for applications + with repetitive prompt patterns.\n\nUnderstanding Prompt Caching\nPrompt caching + is a technique that allows LLM providers to store and quickly retrieve responses + for frequently used prompts.\nInstead of processing the same or similar prompts + repeatedly, the system can serve pre-computed responses,\nsignificantly reducing + computation time and resources.\n\nBenefits of Prompt Caching\n1. Improved Response + Times\n\nWith prompt caching, response times for cached prompts can be reduced + from seconds to milliseconds. This dramatic speed\nimprovement enables near-instantaneous + responses in many scenarios, enhancing user experience in AI-powered applications.\n\n2. + Cost Reduction\nBy eliminating the need to reprocess identical or highly similar + prompts, prompt caching can substantially reduce the\ncomputational resources + required. This efficiency translates directly into cost savings for developers + and businesses\nutilizing LLM APIs.\n\n3. Scalability\nThe reduced computational + load allows LLM providers to handle a higher volume of requests with existing + infrastructure,\nimproving the scalability of their services.\n\nUse Cases and + Impact\nPrompt caching is particularly beneficial for applications with repetitive + prompt patterns. Some key use cases include:\n\nCustomer service chatbots handling + common queries\nContent moderation systems processing similar types of content\nLanguage + translation services for frequently translated phrases or sentences\nAutomated + coding assistants dealing with standard programming tasks\n\nImplementation + by Major Providers\nWhile the specific implementation details vary among providers, + the general approach involves:\n\nIdentifying frequently used prompts\nStoring + pre-computed responses\nImplementing efficient lookup mechanisms\nBalancing + cache freshness with performance gains\n\nAnthropic, known for its Claude AI + model, has been at the forefront of this technology.\n\nFuture Implications\nThe + introduction of prompt caching by major LLM providers is likely to have far-reaching + effects on the AI industry:\n\nBroader Adoption: Reduced costs and improved + performance could lead to wider adoption of LLM technologies across various + sectors.\nNew Application Paradigms: Developers may create new types of applications + that leverage the near-instantaneous response times of cached prompts.\nEvolution + of Pricing Models: LLM providers might introduce new pricing structures that + reflect the efficiency gains of prompt caching.\n\nAs the technology matures, + we can expect to see further refinements and innovative applications of prompt + caching, potentially\nreshaping the landscape of AI-powered services and applications.\n\n========================================================================\n\n\ud83d\udcca + Why Unit Testing is Non-Negotiable in Software Development \ud83d\udda5\ufe0f\nAs + a software professional, I can''t stress enough how crucial unit testing is + to our craft. Here''s why it''s a must-have practice:\n\n\ud83d\udc1b Catches + bugs early: Identify and fix issues before they snowball into major problems.\n\ud83d\udcb0 + Saves time and money: Less debugging time means faster development and lower + costs.\n\ud83c\udfd7\ufe0f Improves code quality: Writing testable code inherently + leads to better architecture.\n\ud83d\udd04 Facilitates refactoring: Tests give + you confidence to improve your code without breaking functionality.\n\ud83d\udcda + Serves as documentation: Well-written tests explain how your code should behave.\n\ud83e\udd1d + Enhances collaboration: New team members can understand and contribute to the + codebase faster.\n\nRemember: The time invested in unit testing pays off multifold + in the long run. It''s not just good practice\u2014it''s professional responsibility.\nWhat''s + your take on unit testing? Share your experiences below! \ud83d\udc47", "cache_control": + {"type": "ephemeral"}}]}], "model": "claude-3-5-sonnet-20240620", "stream": + true, "system": [{"type": "text", "text": "You help generate concise summaries + of news articles and blog posts that user sends you."}]}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + anthropic-beta: + - prompt-caching-2024-07-31 + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '5993' + content-type: + - application/json + host: + - api.anthropic.com + user-agent: + - Anthropic/Python 0.36.2 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 0.36.2 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.3 + method: POST + uri: https://api.anthropic.com/v1/messages?beta=prompt_caching + response: + body: + string: 'event: message_start + + data: {"type":"message_start","message":{"id":"msg_01HSJ2wYvBf4DbFbwCaiGreG","type":"message","role":"assistant","model":"claude-3-5-sonnet-20240620","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":4,"cache_creation_input_tokens":0,"cache_read_input_tokens":1165,"output_tokens":1}} } + + + event: content_block_start + + data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} } + + + event: ping + + data: {"type": "ping"} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Here"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + are conc"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ise + summ"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"aries + of the"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + three"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + articles"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"/"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"posts"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":":\n\n1. + Open"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"LLMetry:"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + New"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + open-source library"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + extending"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + OpenTelemetry"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + with"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + LLM functionality"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":". + Develope"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + by Tr"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ace"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"loop"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":", + it"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + provides L"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"LM-specific + metrics"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + an"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + traces,"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + integ"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"rates"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + with existing set"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ups, + and supports popular"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + LLM frameworks."} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Aims"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + to improve monitoring"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + of"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + AI"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"powered + systems"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":".\n\n2. + Major"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + LLM providers"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + introduce prompt caching:"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n-"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Stores"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + responses"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + for frequent"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + prompts"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Benefits"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":": + Faster"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + response times, lower"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + costs, improved scalability"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Useful"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + for chat"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"bots, + content mo"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"deration, + translation, coding"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + assist"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ants"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Implemente"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + by providers"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + like Anthropic"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + May"}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + lea"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + to wider"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + AI"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + adoption an"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + new application types"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\n3."} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Importance of unit testing in"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + software development:\n-"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Catches"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + bugs early"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Saves time an"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + money\n- Impro"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ves + code quality and architecture"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" "}} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Enables"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + safe"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + refactoring"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n- + Acts"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + as documentation\n-"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Enhances team"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + collaboration"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n-"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + Considere"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + a"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + professional responsibility"} } + + + event: content_block_stop + + data: {"type":"content_block_stop","index":0 } + + + event: message_delta + + data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":226} } + + + event: message_stop + + data: {"type":"message_stop" } + + + ' + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8d4f55c6b85a2364-SJC + Cache-Control: + - no-cache + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Sat, 19 Oct 2024 08:18:02 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-ratelimit-requests-limit: + - '50' + anthropic-ratelimit-requests-remaining: + - '49' + anthropic-ratelimit-requests-reset: + - '2024-10-19T08:18:55Z' + anthropic-ratelimit-tokens-limit: + - '40000' + anthropic-ratelimit-tokens-remaining: + - '38000' + anthropic-ratelimit-tokens-reset: + - '2024-10-19T08:18:05Z' + request-id: + - req_01FrTMh1ZwdXvCZsJuwmjFXp + via: + - 1.1 google + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_async_anthropic_message_streaming.yaml b/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_async_anthropic_message_streaming.yaml index 3ccd0988ee..309dd4077e 100644 --- a/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_async_anthropic_message_streaming.yaml +++ b/packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_completion/test_async_anthropic_message_streaming.yaml @@ -19,34 +19,35 @@ interactions: host: - api.anthropic.com user-agent: - - AsyncAnthropic/Python 0.21.3 + - AsyncAnthropic/Python 0.36.2 x-stainless-arch: - - other:amd64 + - arm64 x-stainless-async: - async:asyncio x-stainless-lang: - python x-stainless-os: - - Windows + - MacOS x-stainless-package-version: - - 0.21.3 + - 0.36.2 + x-stainless-retry-count: + - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.9.13 + - 3.12.3 method: POST uri: https://api.anthropic.com/v1/messages response: body: string: 'event: message_start - data: {"type":"message_start","message":{"id":"msg_01SMFagMqe8N56JcCyjBTmdj","type":"message","role":"assistant","content":[],"model":"claude-3-haiku-20240307","stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":17,"output_tokens":1}} } + data: {"type":"message_start","message":{"id":"msg_016o6A7zDmgjucf5mWv1rrPD","type":"message","role":"assistant","model":"claude-3-haiku-20240307","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":17,"output_tokens":3}} } event: content_block_start - data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} - } + data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""}} event: ping @@ -56,1389 +57,261 @@ interactions: event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Here"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"''s"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - an"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - Open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"T"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"el"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emet"} - } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - joke"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - for"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - you"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":":"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\nWhy"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - did"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - developer"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - cross"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - road"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"?"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - To"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - get"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - other"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - side"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - distributed"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - trace"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"!"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\nIn"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - world"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - modern"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - cloud"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"native"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - applications"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - monitoring"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - and"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - observ"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ability"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - have"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - become"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - crucial"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - Open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"T"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"el"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emet"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"-"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"source"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - project"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - for"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - distributed"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - tr"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"acing"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - and"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - metrics"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - is"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - here"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - help"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - But"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - sometimes"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - understanding"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - all"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - int"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ric"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ac"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ies"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - this"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - powerful"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - tool"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - can"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - feel"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - like"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - navig"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ating"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - complex"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - maze"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - instru"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"mentation"} - } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - and"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - configuration"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\nThat"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"''s"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - why"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - little"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - bit"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - humor"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - can"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - go"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - long"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - way"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - This"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - joke"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - plays"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - on"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - idea"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - that"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - developers"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - are"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - always"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - trying"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - \""}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"trace"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\""} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - flow"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - their"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - applications"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - just"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - like"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - classic"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - joke"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - about"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - why"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - chicken"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - crossed"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - road"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - But"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - in"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - world"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - Open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"T"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"el"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emet"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - developer"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - has"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - cross"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - distributed"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - trace"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - get"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - other"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - side"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - -"}} - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - n"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"od"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - challenges"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - understanding"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - complex"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - interactions"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - between"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Here''s + an"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - micro"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"serv"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ices"} } + OpenTelemet"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry-"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"\n\nSo"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"themed + joke for"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } + you:\n\nWhy"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - next"} } + was"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - time"} } + the OpenTel"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - you"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emetry + tra"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"''re"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"cer + so"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - struggling"} } + tire"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - with"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d? + Because it ha"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - your"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + been"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - Open"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"T"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"el"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"emet"} } + tr"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ry"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"acing + all"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - setup"} } + day"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"!\n\nIn"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - remember"} } + the"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - to"}} + worl"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - keep"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + of distribute"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - your"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + tr"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - sense"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"acing + an"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - of"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + observ"}} event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - humor"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ability, + OpenT"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"elemetry + is"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - After"} } + the tracing library"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - all"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":","} } + that"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"} } + just"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - little"} } + keeps going"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - l"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"aughter"} } + an"}} event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - can"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"d + going,"}} event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - make"} } + collecting"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - even"} } + data from"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - the"} } + all your"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - most"}} + microservices."} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - d"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"aun"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ting"} } + But"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - observ"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ability"} } + even"}} event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - challenge"} } + the"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - a"} } + most di"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - bit"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"ligent + tra"} } event: content_block_delta - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - more"} } + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"cer + needs"} } event: content_block_delta data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" - manag"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"eable"} } - - - event: content_block_delta - - data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"."} } + a break sometimes!"} } event: content_block_stop - data: {"type":"content_block_stop","index":0 } + data: {"type":"content_block_stop","index":0} event: message_delta - data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":246} - } + data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":92} } event: message_stop - data: {"type":"message_stop" } + data: {"type":"message_stop" } ' @@ -1446,7 +319,7 @@ interactions: CF-Cache-Status: - DYNAMIC CF-RAY: - - 8708501079c452a1-LAX + - 8d5c8d7bce36cec9-SJC Cache-Control: - no-cache Connection: @@ -1454,25 +327,27 @@ interactions: Content-Type: - text/event-stream; charset=utf-8 Date: - - Sun, 07 Apr 2024 07:30:31 GMT + - Sun, 20 Oct 2024 22:47:58 GMT Server: - cloudflare Transfer-Encoding: - chunked + X-Robots-Tag: + - none anthropic-ratelimit-requests-limit: - - '5' + - '50' anthropic-ratelimit-requests-remaining: - - '3' + - '49' anthropic-ratelimit-requests-reset: - - '2024-04-07T07:31:00Z' + - '2024-10-20T22:48:55Z' anthropic-ratelimit-tokens-limit: - - '25000' + - '50000' anthropic-ratelimit-tokens-remaining: - - '24000' + - '49000' anthropic-ratelimit-tokens-reset: - - '2024-04-07T07:31:00Z' + - '2024-10-20T22:47:59Z' request-id: - - req_01L1qXZVKya9zQeReuDtbVxU + - req_01F1o1MC5ME8uRqRyfy8NVC3 via: - 1.1 google status: diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/data/1024+tokens.txt b/packages/opentelemetry-instrumentation-anthropic/tests/data/1024+tokens.txt new file mode 100644 index 0000000000..12d9224ebf --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/tests/data/1024+tokens.txt @@ -0,0 +1,90 @@ +Open-Source Library OpenLLMetry Brings LLM Functionality to OpenTelemetry +Traceloop, a YCombinator-backed company, has announced the release of OpenLLMetry, a new open-source +library designed to extend OpenTelemetry with Large Language Model (LLM) functionality. This innovative +tool aims to bridge the gap between traditional application monitoring and the rapidly evolving field +of AI-powered systems. OpenLLMetry builds upon the widely-adopted OpenTelemetry framework, which provides +a standardized approach to collecting and exporting telemetry data from cloud-native applications. By +integrating LLM-specific features, OpenLLMetry enables developers to gain deeper insights into +the performance and behavior of AI models within their applications. + +Key features of OpenLLMetry include: + +LLM-specific metrics and traces +Seamless integration with existing OpenTelemetry setups +Support for popular LLM frameworks and platforms + +The open-source nature of OpenLLMetry is expected to foster community contributions and rapid adoption +among developers working with LLMs. As AI continues to transform the software landscape, tools +like OpenLLMetry are poised to play a vital role in ensuring the reliability and performance of +next-generation applications. + +======================================================================== + +Major LLM Providers Introduce Prompt Caching, Boosting Speed and Reducing Costs + +In a significant development for the artificial intelligence industry, leading Large Language Model (LLM) providers, +including Anthropic, have announced the implementation of prompt caching. This new feature promises to dramatically +improve the speed and cost-effectiveness of LLM API calls, particularly for applications with repetitive prompt patterns. + +Understanding Prompt Caching +Prompt caching is a technique that allows LLM providers to store and quickly retrieve responses for frequently used prompts. +Instead of processing the same or similar prompts repeatedly, the system can serve pre-computed responses, +significantly reducing computation time and resources. + +Benefits of Prompt Caching +1. Improved Response Times + +With prompt caching, response times for cached prompts can be reduced from seconds to milliseconds. This dramatic speed +improvement enables near-instantaneous responses in many scenarios, enhancing user experience in AI-powered applications. + +2. Cost Reduction +By eliminating the need to reprocess identical or highly similar prompts, prompt caching can substantially reduce the +computational resources required. This efficiency translates directly into cost savings for developers and businesses +utilizing LLM APIs. + +3. Scalability +The reduced computational load allows LLM providers to handle a higher volume of requests with existing infrastructure, +improving the scalability of their services. + +Use Cases and Impact +Prompt caching is particularly beneficial for applications with repetitive prompt patterns. Some key use cases include: + +Customer service chatbots handling common queries +Content moderation systems processing similar types of content +Language translation services for frequently translated phrases or sentences +Automated coding assistants dealing with standard programming tasks + +Implementation by Major Providers +While the specific implementation details vary among providers, the general approach involves: + +Identifying frequently used prompts +Storing pre-computed responses +Implementing efficient lookup mechanisms +Balancing cache freshness with performance gains + +Anthropic, known for its Claude AI model, has been at the forefront of this technology. + +Future Implications +The introduction of prompt caching by major LLM providers is likely to have far-reaching effects on the AI industry: + +Broader Adoption: Reduced costs and improved performance could lead to wider adoption of LLM technologies across various sectors. +New Application Paradigms: Developers may create new types of applications that leverage the near-instantaneous response times of cached prompts. +Evolution of Pricing Models: LLM providers might introduce new pricing structures that reflect the efficiency gains of prompt caching. + +As the technology matures, we can expect to see further refinements and innovative applications of prompt caching, potentially +reshaping the landscape of AI-powered services and applications. + +======================================================================== + +📊 Why Unit Testing is Non-Negotiable in Software Development 🖥️ +As a software professional, I can't stress enough how crucial unit testing is to our craft. Here's why it's a must-have practice: + +🐛 Catches bugs early: Identify and fix issues before they snowball into major problems. +💰 Saves time and money: Less debugging time means faster development and lower costs. +🏗️ Improves code quality: Writing testable code inherently leads to better architecture. +🔄 Facilitates refactoring: Tests give you confidence to improve your code without breaking functionality. +📚 Serves as documentation: Well-written tests explain how your code should behave. +🤝 Enhances collaboration: New team members can understand and contribute to the codebase faster. + +Remember: The time invested in unit testing pays off multifold in the long run. It's not just good practice—it's professional responsibility. +What's your take on unit testing? Share your experiences below! 👇 \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py b/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py index b1ea11cd3c..7e3b884d34 100644 --- a/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py +++ b/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py @@ -7,36 +7,8 @@ from opentelemetry.semconv_ai import SpanAttributes, Meters -@pytest.mark.vcr -def test_anthropic_completion(exporter, reader): - client = Anthropic() - client.completions.create( - prompt=f"{HUMAN_PROMPT}\nHello world\n{AI_PROMPT}", - model="claude-instant-1.2", - max_tokens_to_sample=2048, - top_p=0.1, - ) - try: - client.completions.create( - unknown_parameter="unknown", - ) - except Exception: - pass - - spans = exporter.get_finished_spans() - assert all(span.name == "anthropic.completion" for span in spans) - - anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] - == f"{HUMAN_PROMPT}\nHello world\n{AI_PROMPT}" - ) - assert anthropic_span.attributes.get(f"{SpanAttributes.LLM_COMPLETIONS}.0.content") - - metrics_data = reader.get_metrics_data() - resource_metrics = metrics_data.resource_metrics +def verify_metrics(resource_metrics, model_name: str, ignore_exception_metric: bool = False): assert len(resource_metrics) > 0 - found_token_metric = False found_choice_metric = False found_duration_metric = False @@ -54,7 +26,7 @@ def test_anthropic_completion(exporter, reader): ] assert ( data_point.attributes[SpanAttributes.LLM_RESPONSE_MODEL] - == "claude-instant-1.2" + == model_name ) assert data_point.sum > 0 @@ -64,7 +36,7 @@ def test_anthropic_completion(exporter, reader): assert data_point.value >= 1 assert ( data_point.attributes[SpanAttributes.LLM_RESPONSE_MODEL] - == "claude-instant-1.2" + == model_name ) if metric.name == Meters.LLM_OPERATION_DURATION: @@ -77,7 +49,7 @@ def test_anthropic_completion(exporter, reader): ) assert all( data_point.attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) - == "claude-instant-1.2" + == model_name or data_point.attributes.get("error.type") == "TypeError" for data_point in metric.data.data_points ) @@ -99,6 +71,38 @@ def test_anthropic_completion(exporter, reader): assert found_exception_metric is True +@pytest.mark.vcr +def test_anthropic_completion(exporter, reader): + client = Anthropic() + client.completions.create( + prompt=f"{HUMAN_PROMPT}\nHello world\n{AI_PROMPT}", + model="claude-instant-1.2", + max_tokens_to_sample=2048, + top_p=0.1, + ) + try: + client.completions.create( + unknown_parameter="unknown", + ) + except Exception: + pass + + spans = exporter.get_finished_spans() + assert all(span.name == "anthropic.completion" for span in spans) + + anthropic_span = spans[0] + assert ( + anthropic_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.user"] + == f"{HUMAN_PROMPT}\nHello world\n{AI_PROMPT}" + ) + assert anthropic_span.attributes.get(f"{SpanAttributes.LLM_COMPLETIONS}.0.content") + + metrics_data = reader.get_metrics_data() + resource_metrics = metrics_data.resource_metrics + + verify_metrics(resource_metrics, "claude-instant-1.2") + + @pytest.mark.vcr def test_anthropic_message_create(exporter, reader): client = Anthropic() @@ -141,68 +145,7 @@ def test_anthropic_message_create(exporter, reader): metrics_data = reader.get_metrics_data() resource_metrics = metrics_data.resource_metrics - assert len(resource_metrics) > 0 - - found_token_metric = False - found_choice_metric = False - found_duration_metric = False - found_exception_metric = False - - for rm in resource_metrics: - for sm in rm.scope_metrics: - for metric in sm.metrics: - if metric.name == Meters.LLM_TOKEN_USAGE: - found_token_metric = True - for data_point in metric.data.data_points: - assert data_point.attributes[SpanAttributes.LLM_TOKEN_TYPE] in [ - "output", - "input", - ] - assert ( - data_point.attributes[SpanAttributes.LLM_RESPONSE_MODEL] - == "claude-3-opus-20240229" - ) - assert data_point.sum > 0 - - if metric.name == Meters.LLM_GENERATION_CHOICES: - found_choice_metric = True - for data_point in metric.data.data_points: - assert data_point.value >= 1 - assert ( - data_point.attributes[SpanAttributes.LLM_RESPONSE_MODEL] - == "claude-3-opus-20240229" - ) - - if metric.name == Meters.LLM_OPERATION_DURATION: - found_duration_metric = True - assert any( - data_point.count > 0 for data_point in metric.data.data_points - ) - assert any( - data_point.sum > 0 for data_point in metric.data.data_points - ) - assert all( - data_point.attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) - == "claude-3-opus-20240229" - or data_point.attributes.get("error.type") == "TypeError" - for data_point in metric.data.data_points - ) - - if metric.name == Meters.LLM_ANTHROPIC_COMPLETION_EXCEPTIONS: - found_exception_metric = True - for data_point in metric.data.data_points: - assert data_point.value == 1 - assert data_point.attributes["error.type"] == "TypeError" - - assert all( - data_point.attributes.get("gen_ai.system") == "anthropic" - for data_point in metric.data.data_points - ) - - assert found_token_metric is True - assert found_choice_metric is True - assert found_duration_metric is True - assert found_exception_metric is True + verify_metrics(resource_metrics, "claude-3-opus-20240229") @pytest.mark.vcr @@ -336,6 +279,12 @@ def test_anthropic_message_streaming(exporter, reader): model="claude-3-haiku-20240307", stream=True, ) + try: + client.messages.create( + unknown_parameter="unknown", + ) + except Exception: + pass response_content = "" for event in response: @@ -356,7 +305,7 @@ def test_anthropic_message_streaming(exporter, reader): anthropic_span.attributes.get(f"{SpanAttributes.LLM_COMPLETIONS}.0.content") == response_content ) - assert anthropic_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] == 8 + assert anthropic_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] == 17 assert ( anthropic_span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] + anthropic_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] @@ -365,61 +314,8 @@ def test_anthropic_message_streaming(exporter, reader): metrics_data = reader.get_metrics_data() resource_metrics = metrics_data.resource_metrics - assert len(resource_metrics) > 0 - - found_token_metric = False - found_choice_metric = False - found_duration_metric = False - # TODO found_exception_metric = False - - for rm in resource_metrics: - for sm in rm.scope_metrics: - for metric in sm.metrics: - if metric.name == Meters.LLM_TOKEN_USAGE: - found_token_metric = True - for data_point in metric.data.data_points: - assert data_point.attributes[SpanAttributes.LLM_TOKEN_TYPE] in [ - "output", - "input", - ] - assert ( - data_point.attributes[SpanAttributes.LLM_RESPONSE_MODEL] - == "claude-3-haiku-20240307" - ) - assert data_point.sum > 0 - - if metric.name == Meters.LLM_GENERATION_CHOICES: - found_choice_metric = True - for data_point in metric.data.data_points: - assert data_point.value >= 1 - assert ( - data_point.attributes[SpanAttributes.LLM_RESPONSE_MODEL] - == "claude-3-haiku-20240307" - ) - - if metric.name == Meters.LLM_OPERATION_DURATION: - found_duration_metric = True - assert any( - data_point.count > 0 for data_point in metric.data.data_points - ) - assert any( - data_point.sum > 0 for data_point in metric.data.data_points - ) - assert all( - data_point.attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) - == "claude-3-haiku-20240307" - or data_point.attributes.get("error.type") == "TypeError" - for data_point in metric.data.data_points - ) - - assert all( - data_point.attributes.get("gen_ai.system") == "anthropic" - for data_point in metric.data.data_points - ) - assert found_token_metric is True - assert found_choice_metric is True - assert found_duration_metric is True + verify_metrics(resource_metrics, "claude-3-haiku-20240307", ignore_exception_metric=True) @pytest.mark.vcr @@ -466,74 +362,21 @@ async def test_async_anthropic_message_create(exporter, reader): metrics_data = reader.get_metrics_data() resource_metrics = metrics_data.resource_metrics - assert len(resource_metrics) > 0 - - found_token_metric = False - found_choice_metric = False - found_duration_metric = False - found_exception_metric = False - - for rm in resource_metrics: - for sm in rm.scope_metrics: - for metric in sm.metrics: - if metric.name == Meters.LLM_TOKEN_USAGE: - found_token_metric = True - for data_point in metric.data.data_points: - assert data_point.attributes[SpanAttributes.LLM_TOKEN_TYPE] in [ - "output", - "input", - ] - assert ( - data_point.attributes[SpanAttributes.LLM_RESPONSE_MODEL] - == "claude-3-opus-20240229" - ) - assert data_point.sum > 0 - - if metric.name == Meters.LLM_GENERATION_CHOICES: - found_choice_metric = True - for data_point in metric.data.data_points: - assert data_point.value >= 1 - assert ( - data_point.attributes[SpanAttributes.LLM_RESPONSE_MODEL] - == "claude-3-opus-20240229" - ) - - if metric.name == Meters.LLM_OPERATION_DURATION: - found_duration_metric = True - assert any( - data_point.count > 0 for data_point in metric.data.data_points - ) - assert any( - data_point.sum > 0 for data_point in metric.data.data_points - ) - assert all( - data_point.attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) - == "claude-3-opus-20240229" - or data_point.attributes.get("error.type") == "TypeError" - for data_point in metric.data.data_points - ) - - if metric.name == Meters.LLM_ANTHROPIC_COMPLETION_EXCEPTIONS: - found_exception_metric = True - for data_point in metric.data.data_points: - assert data_point.value == 1 - assert data_point.attributes["error.type"] == "TypeError" - - assert all( - data_point.attributes.get("gen_ai.system") == "anthropic" - for data_point in metric.data.data_points - ) - - assert found_token_metric is True - assert found_choice_metric is True - assert found_duration_metric is True - assert found_exception_metric is True + verify_metrics(resource_metrics, "claude-3-opus-20240229") @pytest.mark.vcr @pytest.mark.asyncio async def test_async_anthropic_message_streaming(exporter, reader): client = AsyncAnthropic() + + try: + await client.messages.create( + unknown_parameter="unknown", + ) + except Exception: + pass + response = await client.messages.create( max_tokens=1024, messages=[ @@ -564,7 +407,7 @@ async def test_async_anthropic_message_streaming(exporter, reader): anthropic_span.attributes.get(f"{SpanAttributes.LLM_COMPLETIONS}.0.content") == response_content ) - assert anthropic_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] == 8 + assert anthropic_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] == 17 assert ( anthropic_span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] + anthropic_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] @@ -573,61 +416,7 @@ async def test_async_anthropic_message_streaming(exporter, reader): metrics_data = reader.get_metrics_data() resource_metrics = metrics_data.resource_metrics - assert len(resource_metrics) > 0 - - found_token_metric = False - found_choice_metric = False - found_duration_metric = False - # TODO found_exception_metric = False - - for rm in resource_metrics: - for sm in rm.scope_metrics: - for metric in sm.metrics: - if metric.name == Meters.LLM_TOKEN_USAGE: - found_token_metric = True - for data_point in metric.data.data_points: - assert data_point.attributes[SpanAttributes.LLM_TOKEN_TYPE] in [ - "output", - "input", - ] - assert ( - data_point.attributes[SpanAttributes.LLM_RESPONSE_MODEL] - == "claude-3-haiku-20240307" - ) - assert data_point.sum > 0 - - if metric.name == Meters.LLM_GENERATION_CHOICES: - found_choice_metric = True - for data_point in metric.data.data_points: - assert data_point.value >= 1 - assert ( - data_point.attributes[SpanAttributes.LLM_RESPONSE_MODEL] - == "claude-3-haiku-20240307" - ) - - if metric.name == Meters.LLM_OPERATION_DURATION: - found_duration_metric = True - assert any( - data_point.count > 0 for data_point in metric.data.data_points - ) - assert any( - data_point.sum > 0 for data_point in metric.data.data_points - ) - assert all( - data_point.attributes.get(SpanAttributes.LLM_RESPONSE_MODEL) - == "claude-3-haiku-20240307" - or data_point.attributes.get("error.type") == "TypeError" - for data_point in metric.data.data_points - ) - - assert all( - data_point.attributes.get("gen_ai.system") == "anthropic" - for data_point in metric.data.data_points - ) - - assert found_token_metric is True - assert found_choice_metric is True - assert found_duration_metric is True + verify_metrics(resource_metrics, "claude-3-haiku-20240307", ignore_exception_metric=True) @pytest.mark.vcr @@ -757,71 +546,338 @@ def test_anthropic_tools(exporter, reader): assert (anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.id"]) == response.content[1].id assert (anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.name"]) == response.content[1].name - response_content = json.dumps(response.content[1].input) - assert (anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.arguments"] == response_content) + response_input = json.dumps(response.content[1].input) + assert (anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.arguments"] == response_input) assert (anthropic_span.attributes["gen_ai.completion.0.tool_calls.1.id"]) == response.content[2].id assert (anthropic_span.attributes["gen_ai.completion.0.tool_calls.1.name"]) == response.content[2].name - response_content = json.dumps(response.content[2].input) - assert (anthropic_span.attributes["gen_ai.completion.0.tool_calls.1.arguments"] == response_content) + response_input = json.dumps(response.content[2].input) + assert (anthropic_span.attributes["gen_ai.completion.0.tool_calls.1.arguments"] == response_input) # verify metrics metrics_data = reader.get_metrics_data() resource_metrics = metrics_data.resource_metrics - assert len(resource_metrics) > 0 + verify_metrics(resource_metrics, "claude-3-5-sonnet-20240620") - found_token_metric = False - found_choice_metric = False - found_duration_metric = False - found_exception_metric = False - for rm in resource_metrics: - for sm in rm.scope_metrics: - for metric in sm.metrics: - if metric.name == "gen_ai.client.token.usage": - found_token_metric = True - for data_point in metric.data.data_points: - assert data_point.attributes["gen_ai.token.type"] in [ - "input", - "output", - ] - assert ( - data_point.attributes["gen_ai.response.model"] - == "claude-3-5-sonnet-20240620" - ) - assert data_point.sum > 0 +@pytest.mark.vcr +def test_anthropic_prompt_caching(exporter, reader): + with open(Path(__file__).parent.joinpath("data/1024+tokens.txt"), "r") as f: + # add the unique test name to the prompt to avoid caching leaking to other tests + text = "test_anthropic_prompt_caching <- IGNORE THIS. ARTICLES START ON THE NEXT LINE\n" + f.read() + client = Anthropic() - if metric.name == "gen_ai.client.generation.choices": - found_choice_metric = True - for data_point in metric.data.data_points: - assert data_point.value >= 1 - assert ( - data_point.attributes["gen_ai.response.model"] - == "claude-3-5-sonnet-20240620" - ) + try: + client.messages.create( + unknown_parameter="unknown", + ) + except Exception: + pass - if metric.name == "gen_ai.client.operation.duration": - found_duration_metric = True - assert any( - data_point.count > 0 for data_point in metric.data.data_points - ) - assert any( - data_point.sum > 0 for data_point in metric.data.data_points - ) - assert all( - data_point.attributes.get("gen_ai.response.model") - == "claude-3-5-sonnet-20240620" - or data_point.attributes.get("error.type") == "TypeError" - for data_point in metric.data.data_points - ) + for _ in range(2): + client.beta.prompt_caching.messages.create( + model="claude-3-5-sonnet-20240620", + max_tokens=1024, + system=[ + { + "type": "text", + "text": "You help generate concise summaries of news articles and blog posts that user sends you.", + }, + ], + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": text, + "cache_control": {"type": "ephemeral"}, + }, + ], + }, + ], + ) - if metric.name == "llm.anthropic.completion.exceptions": - found_exception_metric = True - for data_point in metric.data.data_points: - assert data_point.value == 1 - assert data_point.attributes["error.type"] == "TypeError" + spans = exporter.get_finished_spans() + # verify overall shape + assert all(span.name == "anthropic.chat" for span in spans) + assert len(spans) == 2 + cache_creation_span = spans[0] + cache_read_span = spans[1] + + assert cache_creation_span.attributes["gen_ai.prompt.0.role"] == "user" + assert text == cache_creation_span.attributes["gen_ai.prompt.0.content"] + assert cache_read_span.attributes["gen_ai.prompt.0.role"] == "user" + assert text == cache_read_span.attributes["gen_ai.prompt.0.content"] + assert ( + cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] + == cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] + ) - assert found_token_metric is True - assert found_choice_metric is True - assert found_duration_metric is True - assert found_exception_metric is True + # first check that cache_creation_span only wrote to cache, but not read from it, + assert cache_creation_span.attributes["gen_ai.usage.cache_read_input_tokens"] == 0 + assert cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] != 0 + + # then check for exact figures for the fixture/cassete + assert cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] == 1163 + assert cache_creation_span.attributes["gen_ai.usage.prompt_tokens"] == 1167 + assert cache_creation_span.attributes["gen_ai.usage.completion_tokens"] == 224 + + # first check that cache_read_span only read from cache, but not wrote to it, + assert cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] != 0 + assert cache_read_span.attributes["gen_ai.usage.cache_creation_input_tokens"] == 0 + + # then check for exact figures for the fixture/cassete + assert cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] == 1163 + assert cache_read_span.attributes["gen_ai.usage.prompt_tokens"] == 1167 + assert cache_read_span.attributes["gen_ai.usage.completion_tokens"] == 230 + + # verify metrics + metrics_data = reader.get_metrics_data() + resource_metrics = metrics_data.resource_metrics + verify_metrics(resource_metrics, "claude-3-5-sonnet-20240620") + + +@pytest.mark.vcr +@pytest.mark.asyncio +async def test_anthropic_prompt_caching_async(exporter, reader): + + with open(Path(__file__).parent.joinpath("data/1024+tokens.txt"), "r") as f: + # add the unique test name to the prompt to avoid caching leaking to other tests + text = "test_anthropic_prompt_caching_async <- IGNORE THIS. ARTICLES START ON THE NEXT LINE\n" + f.read() + client = AsyncAnthropic() + + try: + await client.messages.create( + unknown_parameter="unknown", + ) + except Exception: + pass + + for _ in range(2): + await client.beta.prompt_caching.messages.create( + model="claude-3-5-sonnet-20240620", + max_tokens=1024, + system=[ + { + "type": "text", + "text": "You help generate concise summaries of news articles and blog posts that user sends you.", + }, + ], + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": text, + "cache_control": {"type": "ephemeral"}, + }, + ], + }, + ], + ) + + spans = exporter.get_finished_spans() + # verify overall shape + assert all(span.name == "anthropic.chat" for span in spans) + assert len(spans) == 2 + cache_creation_span = spans[0] + cache_read_span = spans[1] + + assert cache_creation_span.attributes["gen_ai.prompt.0.role"] == "user" + assert text == cache_creation_span.attributes["gen_ai.prompt.0.content"] + assert cache_read_span.attributes["gen_ai.prompt.0.role"] == "user" + assert text == cache_read_span.attributes["gen_ai.prompt.0.content"] + assert ( + cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] + == cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] + ) + + # first check that cache_creation_span only wrote to cache, but not read from it, + assert cache_creation_span.attributes["gen_ai.usage.cache_read_input_tokens"] == 0 + assert cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] != 0 + + # then check for exact figures for the fixture/cassete + assert cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] == 1165 + assert cache_creation_span.attributes["gen_ai.usage.prompt_tokens"] == 1169 + assert cache_creation_span.attributes["gen_ai.usage.completion_tokens"] == 249 + + # first check that cache_read_span only read from cache, but not wrote to it, + assert cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] != 0 + assert cache_read_span.attributes["gen_ai.usage.cache_creation_input_tokens"] == 0 + + # then check for exact figures for the fixture/cassete + assert cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] == 1165 + assert cache_read_span.attributes["gen_ai.usage.prompt_tokens"] == 1169 + assert cache_read_span.attributes["gen_ai.usage.completion_tokens"] == 229 + + # verify metrics + metrics_data = reader.get_metrics_data() + resource_metrics = metrics_data.resource_metrics + verify_metrics(resource_metrics, "claude-3-5-sonnet-20240620") + + +@pytest.mark.vcr +def test_anthropic_prompt_caching_stream(exporter, reader): + with open(Path(__file__).parent.joinpath("data/1024+tokens.txt"), "r") as f: + # add the unique test name to the prompt to avoid caching leaking to other tests + text = "test_anthropic_prompt_caching_stream <- IGNORE THIS. ARTICLES START ON THE NEXT LINE\n" + f.read() + client = Anthropic() + try: + client.messages.create( + unknown_parameter="unknown", + ) + except Exception: + pass + + for _ in range(2): + response = client.beta.prompt_caching.messages.create( + model="claude-3-5-sonnet-20240620", + max_tokens=1024, + stream=True, + system=[ + { + "type": "text", + "text": "You help generate concise summaries of news articles and blog posts that user sends you.", + }, + ], + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": text, + "cache_control": {"type": "ephemeral"}, + }, + ], + }, + ], + ) + response_content = "" + for event in response: + if event.type == "content_block_delta" and event.delta.type == "text_delta": + response_content += event.delta.text + + spans = exporter.get_finished_spans() + # verify overall shape + assert all(span.name == "anthropic.chat" for span in spans) + assert len(spans) == 2 + cache_creation_span = spans[0] + cache_read_span = spans[1] + + assert cache_creation_span.attributes["gen_ai.prompt.0.role"] == "user" + assert text == cache_creation_span.attributes["gen_ai.prompt.0.content"] + assert cache_read_span.attributes["gen_ai.prompt.0.role"] == "user" + assert text == cache_read_span.attributes["gen_ai.prompt.0.content"] + assert ( + cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] + == cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] + ) + + # first check that cache_creation_span only wrote to cache, but not read from it, + assert cache_creation_span.attributes["gen_ai.usage.cache_read_input_tokens"] == 0 + assert cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] != 0 + + # then check for exact figures for the fixture/cassete + assert cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] == 1165 + assert cache_creation_span.attributes["gen_ai.usage.prompt_tokens"] == 1169 + assert cache_creation_span.attributes["gen_ai.usage.completion_tokens"] == 238 + + # first check that cache_read_span only read from cache, but not wrote to it, + assert cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] != 0 + assert cache_read_span.attributes["gen_ai.usage.cache_creation_input_tokens"] == 0 + + # then check for exact figures for the fixture/cassete + assert cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] == 1165 + assert cache_read_span.attributes["gen_ai.usage.prompt_tokens"] == 1169 + assert cache_read_span.attributes["gen_ai.usage.completion_tokens"] == 227 + + # verify metrics + metrics_data = reader.get_metrics_data() + resource_metrics = metrics_data.resource_metrics + verify_metrics(resource_metrics, "claude-3-5-sonnet-20240620") + + +@pytest.mark.vcr +@pytest.mark.asyncio +async def test_anthropic_prompt_caching_async_stream(exporter, reader): + with open(Path(__file__).parent.joinpath("data/1024+tokens.txt"), "r") as f: + # add the unique test name to the prompt to avoid caching leaking to other tests + text = "test_anthropic_prompt_caching_async_stream <- IGNORE THIS. ARTICLES START ON THE NEXT LINE\n" + f.read() + client = AsyncAnthropic() + try: + await client.messages.create( + unknown_parameter="unknown", + ) + except Exception: + pass + + for _ in range(2): + response = await client.beta.prompt_caching.messages.create( + model="claude-3-5-sonnet-20240620", + max_tokens=1024, + stream=True, + system=[ + { + "type": "text", + "text": "You help generate concise summaries of news articles and blog posts that user sends you.", + }, + ], + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": text, + "cache_control": {"type": "ephemeral"}, + }, + ], + }, + ], + ) + response_content = "" + async for event in response: + if event.type == "content_block_delta" and event.delta.type == "text_delta": + response_content += event.delta.text + + spans = exporter.get_finished_spans() + # verify overall shape + assert all(span.name == "anthropic.chat" for span in spans) + assert len(spans) == 2 + cache_creation_span = spans[0] + cache_read_span = spans[1] + + assert cache_creation_span.attributes["gen_ai.prompt.0.role"] == "user" + assert text == cache_creation_span.attributes["gen_ai.prompt.0.content"] + assert cache_read_span.attributes["gen_ai.prompt.0.role"] == "user" + assert text == cache_read_span.attributes["gen_ai.prompt.0.content"] + assert ( + cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] + == cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] + ) + + # first check that cache_creation_span only wrote to cache, but not read from it, + assert cache_creation_span.attributes["gen_ai.usage.cache_read_input_tokens"] == 0 + assert cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] != 0 + + # then check for exact figures for the fixture/cassete + assert cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] == 1167 + assert cache_creation_span.attributes["gen_ai.usage.prompt_tokens"] == 1171 + assert cache_creation_span.attributes["gen_ai.usage.completion_tokens"] == 225 + + # first check that cache_read_span only read from cache, but not wrote to it, + assert cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] != 0 + assert cache_read_span.attributes["gen_ai.usage.cache_creation_input_tokens"] == 0 + + # then check for exact figures for the fixture/cassete + assert cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] == 1167 + assert cache_read_span.attributes["gen_ai.usage.prompt_tokens"] == 1171 + assert cache_read_span.attributes["gen_ai.usage.completion_tokens"] == 187 + + # verify metrics + metrics_data = reader.get_metrics_data() + resource_metrics = metrics_data.resource_metrics + verify_metrics(resource_metrics, "claude-3-5-sonnet-20240620") diff --git a/packages/opentelemetry-instrumentation-bedrock/poetry.lock b/packages/opentelemetry-instrumentation-bedrock/poetry.lock index 0b1f0e991d..2f6e1b7aef 100644 --- a/packages/opentelemetry-instrumentation-bedrock/poetry.lock +++ b/packages/opentelemetry-instrumentation-bedrock/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -734,13 +734,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1550,4 +1550,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "683c86f3c496c53c343ed795accfbcc97ed862aa8499e2b4adbb7ecca723532b" +content-hash = "19aaa600a9e0ab0335df0b4eabaeac65904d6d99680baa074df2fe1267a53b08" diff --git a/packages/opentelemetry-instrumentation-bedrock/pyproject.toml b/packages/opentelemetry-instrumentation-bedrock/pyproject.toml index 0d358a401d..43ba54d338 100644 --- a/packages/opentelemetry-instrumentation-bedrock/pyproject.toml +++ b/packages/opentelemetry-instrumentation-bedrock/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" anthropic = ">=0.17.0" [tool.poetry.group.dev.dependencies] diff --git a/packages/opentelemetry-instrumentation-chromadb/poetry.lock b/packages/opentelemetry-instrumentation-chromadb/poetry.lock index 7e26bfbe7a..159d27abc3 100644 --- a/packages/opentelemetry-instrumentation-chromadb/poetry.lock +++ b/packages/opentelemetry-instrumentation-chromadb/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -1277,13 +1277,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -2515,4 +2515,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "97d793fec9ce344479e7d1e407a0ebc804a8838223a176e6904ee98eee8dcd33" +content-hash = "cb036c8db0df06024b37140e105d2ca83d84d156387256cdfe6e112092c45739" diff --git a/packages/opentelemetry-instrumentation-chromadb/pyproject.toml b/packages/opentelemetry-instrumentation-chromadb/pyproject.toml index c3c54520c3..37785a3be2 100644 --- a/packages/opentelemetry-instrumentation-chromadb/pyproject.toml +++ b/packages/opentelemetry-instrumentation-chromadb/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-semantic-conventions = "^0.48b0" opentelemetry-instrumentation = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-cohere/poetry.lock b/packages/opentelemetry-instrumentation-cohere/poetry.lock index 55c1b0e67c..d1e4555d11 100644 --- a/packages/opentelemetry-instrumentation-cohere/poetry.lock +++ b/packages/opentelemetry-instrumentation-cohere/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -708,13 +708,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1580,4 +1580,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "84fda4c0d5f07596f74fb2fec50ae192b22fd0304eed8709114c62ef1a432653" +content-hash = "06d65d6d8d8170bcade7aee7c16eeabc054bfef0fb9999c70063094e2ba1c876" diff --git a/packages/opentelemetry-instrumentation-cohere/pyproject.toml b/packages/opentelemetry-instrumentation-cohere/pyproject.toml index 09f0384881..b44f737b69 100644 --- a/packages/opentelemetry-instrumentation-cohere/pyproject.toml +++ b/packages/opentelemetry-instrumentation-cohere/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-google-generativeai/poetry.lock b/packages/opentelemetry-instrumentation-google-generativeai/poetry.lock index 1084d51572..7e15116794 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/poetry.lock +++ b/packages/opentelemetry-instrumentation-google-generativeai/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -652,13 +652,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1402,4 +1402,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "25aed9b30ac083cefdc223c4860be2fcc1f5b04467cd92e62bbdd0d8670404a7" +content-hash = "228a519900d3bb43749de759d0579256bb87355e3077352b3a07c0a946423016" diff --git a/packages/opentelemetry-instrumentation-google-generativeai/pyproject.toml b/packages/opentelemetry-instrumentation-google-generativeai/pyproject.toml index 7d2906753d..855da8635c 100644 --- a/packages/opentelemetry-instrumentation-google-generativeai/pyproject.toml +++ b/packages/opentelemetry-instrumentation-google-generativeai/pyproject.toml @@ -26,7 +26,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-groq/poetry.lock b/packages/opentelemetry-instrumentation-groq/poetry.lock index 851ede9366..d91bdabf8d 100644 --- a/packages/opentelemetry-instrumentation-groq/poetry.lock +++ b/packages/opentelemetry-instrumentation-groq/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -419,13 +419,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1051,4 +1051,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "b0b28d441c12011d91b9a3dd5928ce7c79f71a4a88c58a668dfd0fe8ac4378d6" +content-hash = "fcfff6968badfdbcfc1eaeea41561d8df0888900ded21d3e2cc77a58fecb86db" diff --git a/packages/opentelemetry-instrumentation-groq/pyproject.toml b/packages/opentelemetry-instrumentation-groq/pyproject.toml index a7ab5392b9..3fceb33d3d 100644 --- a/packages/opentelemetry-instrumentation-groq/pyproject.toml +++ b/packages/opentelemetry-instrumentation-groq/pyproject.toml @@ -26,7 +26,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-haystack/poetry.lock b/packages/opentelemetry-instrumentation-haystack/poetry.lock index 8adac7ab74..fbe1e03eb3 100644 --- a/packages/opentelemetry-instrumentation-haystack/poetry.lock +++ b/packages/opentelemetry-instrumentation-haystack/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -761,13 +761,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1579,4 +1579,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "5f524c5a2a7d85c3fe84681806796bea29542725ffe4eb6a690d517ea848995c" +content-hash = "0261e8d4cb19eed6e9978c2ce1874d99592d13f2d36d74923f90ba5e9215dada" diff --git a/packages/opentelemetry-instrumentation-haystack/pyproject.toml b/packages/opentelemetry-instrumentation-haystack/pyproject.toml index 68b5db73e7..aabdc625d0 100644 --- a/packages/opentelemetry-instrumentation-haystack/pyproject.toml +++ b/packages/opentelemetry-instrumentation-haystack/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-lancedb/poetry.lock b/packages/opentelemetry-instrumentation-lancedb/poetry.lock index 96e43e47fa..056ba2ff4c 100644 --- a/packages/opentelemetry-instrumentation-lancedb/poetry.lock +++ b/packages/opentelemetry-instrumentation-lancedb/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -545,13 +545,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1459,4 +1459,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "c41fe87c2600bb420573f2443d680b01df83c5f2904cd7695162b1bbc71cddb2" +content-hash = "50a98e654306911cbf8bc6b079d4cf6f90d4300eb6a239483fb5e7bacc4d29f3" diff --git a/packages/opentelemetry-instrumentation-lancedb/pyproject.toml b/packages/opentelemetry-instrumentation-lancedb/pyproject.toml index e1966c165f..eadf499dcc 100644 --- a/packages/opentelemetry-instrumentation-lancedb/pyproject.toml +++ b/packages/opentelemetry-instrumentation-lancedb/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-langchain/poetry.lock b/packages/opentelemetry-instrumentation-langchain/poetry.lock index 6ab60eb122..dbf44b6176 100644 --- a/packages/opentelemetry-instrumentation-langchain/poetry.lock +++ b/packages/opentelemetry-instrumentation-langchain/poetry.lock @@ -1472,7 +1472,7 @@ wrapt = ">=1.0.0,<2.0.0" [[package]] name = "opentelemetry-instrumentation-bedrock" -version = "0.30.0" +version = "0.33.2" description = "OpenTelemetry Bedrock instrumentation" optional = false python-versions = ">=3.9,<4" @@ -1484,7 +1484,7 @@ anthropic = ">=0.17.0" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.source] type = "directory" @@ -1492,7 +1492,7 @@ url = "../opentelemetry-instrumentation-bedrock" [[package]] name = "opentelemetry-instrumentation-openai" -version = "0.30.0" +version = "0.33.2" description = "OpenTelemetry OpenAI instrumentation" optional = false python-versions = ">=3.9,<4" @@ -1503,7 +1503,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" tiktoken = ">=0.6.0, <1" [package.extras] @@ -1546,13 +1546,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -2724,6 +2724,7 @@ description = "Automatically mock your HTTP interactions to simplify and speed u optional = false python-versions = ">=3.8" files = [ + {file = "vcrpy-6.0.1-py2.py3-none-any.whl", hash = "sha256:621c3fb2d6bd8aa9f87532c688e4575bcbbde0c0afeb5ebdb7e14cac409edfdd"}, {file = "vcrpy-6.0.1.tar.gz", hash = "sha256:9e023fee7f892baa0bbda2f7da7c8ac51165c1c6e38ff8688683a12a4bde9278"}, ] @@ -2945,4 +2946,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "7975b5f1d28287912593a8f45b0a47daa03bbdf1c0209d3eece416eb03d1c998" +content-hash = "5c3428fc5661f6a2247e72b8923af9da7892758992b5c45b0f3448702a9d3bd8" diff --git a/packages/opentelemetry-instrumentation-langchain/pyproject.toml b/packages/opentelemetry-instrumentation-langchain/pyproject.toml index 4303f0108d..af4db9e4ed 100644 --- a/packages/opentelemetry-instrumentation-langchain/pyproject.toml +++ b/packages/opentelemetry-instrumentation-langchain/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-llamaindex/poetry.lock b/packages/opentelemetry-instrumentation-llamaindex/poetry.lock index ad115671ea..70d7e2a961 100644 --- a/packages/opentelemetry-instrumentation-llamaindex/poetry.lock +++ b/packages/opentelemetry-instrumentation-llamaindex/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -2426,7 +2426,7 @@ instruments = ["asgiref (>=3.0,<4.0)"] [[package]] name = "opentelemetry-instrumentation-chromadb" -version = "0.28.2" +version = "0.33.2" description = "OpenTelemetry Chroma DB instrumentation" optional = false python-versions = ">=3.9,<4" @@ -2437,7 +2437,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -2448,7 +2448,7 @@ url = "../opentelemetry-instrumentation-chromadb" [[package]] name = "opentelemetry-instrumentation-cohere" -version = "0.28.2" +version = "0.33.2" description = "OpenTelemetry Cohere instrumentation" optional = false python-versions = ">=3.9,<4" @@ -2459,7 +2459,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -2491,7 +2491,7 @@ instruments = ["fastapi (>=0.58,<1.0)"] [[package]] name = "opentelemetry-instrumentation-openai" -version = "0.28.2" +version = "0.33.2" description = "OpenTelemetry OpenAI instrumentation" optional = false python-versions = ">=3.9,<4" @@ -2502,7 +2502,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" tiktoken = ">=0.6.0, <1" [package.extras] @@ -2559,13 +2559,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -2616,6 +2616,8 @@ files = [ {file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:960db0e31c4e52fa0fc3ecbaea5b2d3b58f379e32a95ae6b0ebeaa25b93dfd34"}, {file = "orjson-3.10.6-cp312-none-win32.whl", hash = "sha256:a6ea7afb5b30b2317e0bee03c8d34c8181bc5a36f2afd4d0952f378972c4efd5"}, {file = "orjson-3.10.6-cp312-none-win_amd64.whl", hash = "sha256:874ce88264b7e655dde4aeaacdc8fd772a7962faadfb41abe63e2a4861abc3dc"}, + {file = "orjson-3.10.6-cp313-none-win32.whl", hash = "sha256:efdf2c5cde290ae6b83095f03119bdc00303d7a03b42b16c54517baa3c4ca3d0"}, + {file = "orjson-3.10.6-cp313-none-win_amd64.whl", hash = "sha256:8e190fe7888e2e4392f52cafb9626113ba135ef53aacc65cd13109eb9746c43e"}, {file = "orjson-3.10.6-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:66680eae4c4e7fc193d91cfc1353ad6d01b4801ae9b5314f17e11ba55e934183"}, {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caff75b425db5ef8e8f23af93c80f072f97b4fb3afd4af44482905c9f588da28"}, {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3722fddb821b6036fd2a3c814f6bd9b57a89dc6337b9924ecd614ebce3271394"}, @@ -4499,4 +4501,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "e23f543c73a7cd57ef00415d0edcdbd5ded9a44a979bbe887c5a3e551b9b3e0a" +content-hash = "a21f6ccabcf42c9e4d554f74ee7aa7b5c97e8a5c84394839f2c4b69f0e1e8836" diff --git a/packages/opentelemetry-instrumentation-llamaindex/pyproject.toml b/packages/opentelemetry-instrumentation-llamaindex/pyproject.toml index 490c60d012..8bd7b1e457 100644 --- a/packages/opentelemetry-instrumentation-llamaindex/pyproject.toml +++ b/packages/opentelemetry-instrumentation-llamaindex/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" inflection = "^0.5.1" [tool.poetry.group.dev.dependencies] diff --git a/packages/opentelemetry-instrumentation-marqo/poetry.lock b/packages/opentelemetry-instrumentation-marqo/poetry.lock index f671d701d8..12770b616d 100644 --- a/packages/opentelemetry-instrumentation-marqo/poetry.lock +++ b/packages/opentelemetry-instrumentation-marqo/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -427,13 +427,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1057,4 +1057,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "243f139dfb6883b50c16a26e2a0c4016bcedc20440b02eb330c7ff03becc3cd6" +content-hash = "8c1272a8cf960cc532a8e8a72cbe00d50431bfdf8db34e51bedbf90a58a75236" diff --git a/packages/opentelemetry-instrumentation-marqo/pyproject.toml b/packages/opentelemetry-instrumentation-marqo/pyproject.toml index 463efea05a..8ccee14165 100644 --- a/packages/opentelemetry-instrumentation-marqo/pyproject.toml +++ b/packages/opentelemetry-instrumentation-marqo/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-semantic-conventions = "^0.48b0" opentelemetry-instrumentation = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-milvus/poetry.lock b/packages/opentelemetry-instrumentation-milvus/poetry.lock index e013473143..e31d2fa96c 100644 --- a/packages/opentelemetry-instrumentation-milvus/poetry.lock +++ b/packages/opentelemetry-instrumentation-milvus/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "autopep8" @@ -346,13 +346,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -882,4 +882,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "f8b25528ab8767c825971285e275bf0abb47621438d9196763f9f40da00ae43b" +content-hash = "e4f1c2e39b7060d9306ce26aaab971d7699932c4a089c97fb2e1d9922b990f39" diff --git a/packages/opentelemetry-instrumentation-milvus/pyproject.toml b/packages/opentelemetry-instrumentation-milvus/pyproject.toml index 893c7fac0c..cabdbedcbb 100644 --- a/packages/opentelemetry-instrumentation-milvus/pyproject.toml +++ b/packages/opentelemetry-instrumentation-milvus/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-semantic-conventions = "^0.48b0" opentelemetry-instrumentation = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-mistralai/poetry.lock b/packages/opentelemetry-instrumentation-mistralai/poetry.lock index 7f91eb16fc..190c07246c 100644 --- a/packages/opentelemetry-instrumentation-mistralai/poetry.lock +++ b/packages/opentelemetry-instrumentation-mistralai/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -404,13 +404,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -450,6 +450,8 @@ files = [ {file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:960db0e31c4e52fa0fc3ecbaea5b2d3b58f379e32a95ae6b0ebeaa25b93dfd34"}, {file = "orjson-3.10.6-cp312-none-win32.whl", hash = "sha256:a6ea7afb5b30b2317e0bee03c8d34c8181bc5a36f2afd4d0952f378972c4efd5"}, {file = "orjson-3.10.6-cp312-none-win_amd64.whl", hash = "sha256:874ce88264b7e655dde4aeaacdc8fd772a7962faadfb41abe63e2a4861abc3dc"}, + {file = "orjson-3.10.6-cp313-none-win32.whl", hash = "sha256:efdf2c5cde290ae6b83095f03119bdc00303d7a03b42b16c54517baa3c4ca3d0"}, + {file = "orjson-3.10.6-cp313-none-win_amd64.whl", hash = "sha256:8e190fe7888e2e4392f52cafb9626113ba135ef53aacc65cd13109eb9746c43e"}, {file = "orjson-3.10.6-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:66680eae4c4e7fc193d91cfc1353ad6d01b4801ae9b5314f17e11ba55e934183"}, {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caff75b425db5ef8e8f23af93c80f072f97b4fb3afd4af44482905c9f588da28"}, {file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3722fddb821b6036fd2a3c814f6bd9b57a89dc6337b9924ecd614ebce3271394"}, @@ -1084,4 +1086,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "428d3a4850c25ed112b83525aad32bf24d1bde0d92cbc9cf5aa12d7f40eb5bbe" +content-hash = "367daa9cd1d5990ee082a5a6e3cbb7a7bba605ecfdc2b73550e6492a18a964cd" diff --git a/packages/opentelemetry-instrumentation-mistralai/pyproject.toml b/packages/opentelemetry-instrumentation-mistralai/pyproject.toml index bd2ba48e79..2141b649fc 100644 --- a/packages/opentelemetry-instrumentation-mistralai/pyproject.toml +++ b/packages/opentelemetry-instrumentation-mistralai/pyproject.toml @@ -26,7 +26,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-ollama/poetry.lock b/packages/opentelemetry-instrumentation-ollama/poetry.lock index 0424960138..4d162c7c09 100644 --- a/packages/opentelemetry-instrumentation-ollama/poetry.lock +++ b/packages/opentelemetry-instrumentation-ollama/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "anyio" @@ -400,13 +400,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -909,4 +909,4 @@ instruments = ["ollama"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "8ce5d5ca184a5c388fbd06b3c33b7814e278f783984b03f37b6a530a2485252f" +content-hash = "f5b23aa3d31b9ca40eece5fc67fbc7aa79319f1e4fe7461e7faf2c08395619d3" diff --git a/packages/opentelemetry-instrumentation-ollama/pyproject.toml b/packages/opentelemetry-instrumentation-ollama/pyproject.toml index 4f362316cd..df1808c0b7 100644 --- a/packages/opentelemetry-instrumentation-ollama/pyproject.toml +++ b/packages/opentelemetry-instrumentation-ollama/pyproject.toml @@ -26,7 +26,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" ollama = "^0.3.2" [tool.poetry.group.dev.dependencies] diff --git a/packages/opentelemetry-instrumentation-openai/poetry.lock b/packages/opentelemetry-instrumentation-openai/poetry.lock index 71fcbe8178..f183c06c71 100644 --- a/packages/opentelemetry-instrumentation-openai/poetry.lock +++ b/packages/opentelemetry-instrumentation-openai/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -670,13 +670,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1781,4 +1781,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "6132e13795caca36dea2c22c4072ac09d1ec72582f4b6068a56e173c9a1ce10b" +content-hash = "99d1404454910dbcbe20e544ba5a37a487e948926e29cd4583937c32b772456b" diff --git a/packages/opentelemetry-instrumentation-openai/pyproject.toml b/packages/opentelemetry-instrumentation-openai/pyproject.toml index a7ed3253c2..7744418999 100644 --- a/packages/opentelemetry-instrumentation-openai/pyproject.toml +++ b/packages/opentelemetry-instrumentation-openai/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" tiktoken = ">=0.6.0, <1" [tool.poetry.group.dev.dependencies] diff --git a/packages/opentelemetry-instrumentation-pinecone/poetry.lock b/packages/opentelemetry-instrumentation-pinecone/poetry.lock index 125e1dc272..f0aebde184 100644 --- a/packages/opentelemetry-instrumentation-pinecone/poetry.lock +++ b/packages/opentelemetry-instrumentation-pinecone/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -547,7 +547,7 @@ wrapt = ">=1.0.0,<2.0.0" [[package]] name = "opentelemetry-instrumentation-openai" -version = "0.28.2" +version = "0.33.2" description = "OpenTelemetry OpenAI instrumentation" optional = false python-versions = ">=3.9,<4" @@ -558,7 +558,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" tiktoken = ">=0.6.0, <1" [package.extras] @@ -601,13 +601,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1601,4 +1601,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "4b4d12b4701b9a681643eafe8ee526652eedd277bb712b695a7ebc0222de6f85" +content-hash = "c202f256983a86fa64d445856320fb0e347be5c3a35b537b0c922bbdd487491e" diff --git a/packages/opentelemetry-instrumentation-pinecone/pyproject.toml b/packages/opentelemetry-instrumentation-pinecone/pyproject.toml index bc09f1a21e..834be25dcf 100644 --- a/packages/opentelemetry-instrumentation-pinecone/pyproject.toml +++ b/packages/opentelemetry-instrumentation-pinecone/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-qdrant/poetry.lock b/packages/opentelemetry-instrumentation-qdrant/poetry.lock index a04c475c91..27eb105e3a 100644 --- a/packages/opentelemetry-instrumentation-qdrant/poetry.lock +++ b/packages/opentelemetry-instrumentation-qdrant/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -499,13 +499,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -989,4 +989,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "3b42fb4c9fca6b4d15448cf13893dd9991ca87b95b89b38f7af0291b307aee6f" +content-hash = "841a8beccdf025ea3b699d4259fbfcf5b205590fd82f6a3ed03b799959bc8cd9" diff --git a/packages/opentelemetry-instrumentation-qdrant/pyproject.toml b/packages/opentelemetry-instrumentation-qdrant/pyproject.toml index 4e85636923..4212c512d5 100644 --- a/packages/opentelemetry-instrumentation-qdrant/pyproject.toml +++ b/packages/opentelemetry-instrumentation-qdrant/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-replicate/poetry.lock b/packages/opentelemetry-instrumentation-replicate/poetry.lock index a4467bfe35..a0326b24d4 100644 --- a/packages/opentelemetry-instrumentation-replicate/poetry.lock +++ b/packages/opentelemetry-instrumentation-replicate/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -388,13 +388,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1010,4 +1010,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "1eba399bb7674f6c3174c509b2a48e81d3444c1728ebb6f9cfe9575b8afb08ea" +content-hash = "91d7d8d08b9b5c5cb81e9d3e7d16a5d61807e039965c40aeb50b7f433428f40c" diff --git a/packages/opentelemetry-instrumentation-replicate/pyproject.toml b/packages/opentelemetry-instrumentation-replicate/pyproject.toml index 760bbaef6a..9f9e5b2462 100644 --- a/packages/opentelemetry-instrumentation-replicate/pyproject.toml +++ b/packages/opentelemetry-instrumentation-replicate/pyproject.toml @@ -23,7 +23,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-sagemaker/poetry.lock b/packages/opentelemetry-instrumentation-sagemaker/poetry.lock index 2022ad151a..550a00af5f 100644 --- a/packages/opentelemetry-instrumentation-sagemaker/poetry.lock +++ b/packages/opentelemetry-instrumentation-sagemaker/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "autopep8" @@ -348,13 +348,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -884,4 +884,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "324fe583ad8df2932ed81088132ab537b3a0ec83051f4c60ec64a49d1d80bc98" +content-hash = "7eeac9e91143763c38cd6fb8128eafca575f12ab57a97cbabf8c489469645df3" diff --git a/packages/opentelemetry-instrumentation-sagemaker/pyproject.toml b/packages/opentelemetry-instrumentation-sagemaker/pyproject.toml index f8a5df1c7d..325d97111a 100644 --- a/packages/opentelemetry-instrumentation-sagemaker/pyproject.toml +++ b/packages/opentelemetry-instrumentation-sagemaker/pyproject.toml @@ -25,7 +25,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.26.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-together/poetry.lock b/packages/opentelemetry-instrumentation-together/poetry.lock index c0ca12e03d..40c0fcb0db 100644 --- a/packages/opentelemetry-instrumentation-together/poetry.lock +++ b/packages/opentelemetry-instrumentation-together/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -786,13 +786,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1658,4 +1658,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "9082b396374acbf5f88fe09beb114f409a8aba145a4ef43a1d06f5566842c824" +content-hash = "547ff3f77da49fd506202e7ec11e78399243dcb4c813e7d6e8ffe13d0d95cf3d" diff --git a/packages/opentelemetry-instrumentation-together/pyproject.toml b/packages/opentelemetry-instrumentation-together/pyproject.toml index d0f3cbc5dc..eb36f8b9f9 100644 --- a/packages/opentelemetry-instrumentation-together/pyproject.toml +++ b/packages/opentelemetry-instrumentation-together/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-transformers/poetry.lock b/packages/opentelemetry-instrumentation-transformers/poetry.lock index 8efd6f77a9..63fac5fad2 100644 --- a/packages/opentelemetry-instrumentation-transformers/poetry.lock +++ b/packages/opentelemetry-instrumentation-transformers/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "autopep8" @@ -162,13 +162,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -398,4 +398,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "65121b539ce1d4d088cc5afe90b1b8aab0bd846ba2e222702add5ce74670cff0" +content-hash = "afc430c1a16e93866bb1a71aa4c9899503e0a1869c8aa5922acf792dd333cec0" diff --git a/packages/opentelemetry-instrumentation-transformers/pyproject.toml b/packages/opentelemetry-instrumentation-transformers/pyproject.toml index 8f319eeee5..a09046bdbc 100644 --- a/packages/opentelemetry-instrumentation-transformers/pyproject.toml +++ b/packages/opentelemetry-instrumentation-transformers/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-vertexai/poetry.lock b/packages/opentelemetry-instrumentation-vertexai/poetry.lock index 035df5a7a9..f52ae4572f 100644 --- a/packages/opentelemetry-instrumentation-vertexai/poetry.lock +++ b/packages/opentelemetry-instrumentation-vertexai/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -882,13 +882,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1664,4 +1664,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "7861387245eaaac964dc15a050efd09cc1432cf600033c3727c0b3793912c189" +content-hash = "e34a44a381a8c8dbf888d371687377cd18cd50ba8ea21e61d6457b5158cd5224" diff --git a/packages/opentelemetry-instrumentation-vertexai/pyproject.toml b/packages/opentelemetry-instrumentation-vertexai/pyproject.toml index cd622a757c..db2fe67b1f 100644 --- a/packages/opentelemetry-instrumentation-vertexai/pyproject.toml +++ b/packages/opentelemetry-instrumentation-vertexai/pyproject.toml @@ -28,7 +28,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-watsonx/poetry.lock b/packages/opentelemetry-instrumentation-watsonx/poetry.lock index 0b70a20034..7612c5fd5c 100644 --- a/packages/opentelemetry-instrumentation-watsonx/poetry.lock +++ b/packages/opentelemetry-instrumentation-watsonx/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "autopep8" @@ -608,13 +608,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1229,4 +1229,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "022eea38ea32360d5b7d96a8096a164f6200a66775e6064747edb6dd0bd8be69" +content-hash = "575e9f6663f44eb79c833049998c4cff00b577630bcd83491bafec3f0de1c174" diff --git a/packages/opentelemetry-instrumentation-watsonx/pyproject.toml b/packages/opentelemetry-instrumentation-watsonx/pyproject.toml index 8b957bf9fa..c2fb6b5b01 100644 --- a/packages/opentelemetry-instrumentation-watsonx/pyproject.toml +++ b/packages/opentelemetry-instrumentation-watsonx/pyproject.toml @@ -15,7 +15,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-instrumentation-weaviate/poetry.lock b/packages/opentelemetry-instrumentation-weaviate/poetry.lock index b87026e959..ae6a9d39ff 100644 --- a/packages/opentelemetry-instrumentation-weaviate/poetry.lock +++ b/packages/opentelemetry-instrumentation-weaviate/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -750,7 +750,7 @@ wrapt = ">=1.0.0,<2.0.0" [[package]] name = "opentelemetry-instrumentation-openai" -version = "0.28.2" +version = "0.33.2" description = "OpenTelemetry OpenAI instrumentation" optional = false python-versions = ">=3.9,<4" @@ -761,7 +761,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" tiktoken = ">=0.6.0, <1" [package.extras] @@ -804,13 +804,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -1667,4 +1667,4 @@ instruments = [] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "cbe45639981564ed03cd43f99b69a27b15c67e9c4ec2cdfb1a135638f299d4a2" +content-hash = "32c29e9dcfc38821cd16d181cb3460f168949ac89caee854f7fc10b9bac9710c" diff --git a/packages/opentelemetry-instrumentation-weaviate/pyproject.toml b/packages/opentelemetry-instrumentation-weaviate/pyproject.toml index 669bec36b3..da828d60c1 100644 --- a/packages/opentelemetry-instrumentation-weaviate/pyproject.toml +++ b/packages/opentelemetry-instrumentation-weaviate/pyproject.toml @@ -27,7 +27,7 @@ python = ">=3.9,<4" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [tool.poetry.group.dev.dependencies] autopep8 = "^2.2.0" diff --git a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py index 10d074ff2f..8658077273 100644 --- a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py +++ b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py @@ -34,7 +34,7 @@ class SpanAttributes: # Semantic Conventions for LLM requests, this needs to be removed after # OpenTelemetry Semantic Conventions support Gen AI. # Issue at https://github.com/open-telemetry/opentelemetry-python/issues/3868 - # Refer to https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/llm-spans.md + # Refer to https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-spans.md # for more detail for LLM spans from OpenTelemetry Community. LLM_SYSTEM = "gen_ai.system" LLM_REQUEST_MODEL = "gen_ai.request.model" @@ -46,9 +46,10 @@ class SpanAttributes: LLM_RESPONSE_MODEL = "gen_ai.response.model" LLM_USAGE_COMPLETION_TOKENS = "gen_ai.usage.completion_tokens" LLM_USAGE_PROMPT_TOKENS = "gen_ai.usage.prompt_tokens" + LLM_USAGE_CACHE_CREATION_INPUT_TOKENS = "gen_ai.usage.cache_creation_input_tokens" + LLM_USAGE_CACHE_READ_INPUT_TOKENS = "gen_ai.usage.cache_read_input_tokens" LLM_TOKEN_TYPE = "gen_ai.token.type" # To be added - # LLM_RESPONSE_FINISH_REASON = "gen_ai.response.finish_reasons" # LLM_RESPONSE_ID = "gen_ai.response.id" # LLM diff --git a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py index 3d26edf777..df12433297 100644 --- a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py +++ b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py @@ -1 +1 @@ -__version__ = "0.4.1" +__version__ = "0.4.2" diff --git a/packages/opentelemetry-semantic-conventions-ai/pyproject.toml b/packages/opentelemetry-semantic-conventions-ai/pyproject.toml index e9388c419e..047ca9bfec 100644 --- a/packages/opentelemetry-semantic-conventions-ai/pyproject.toml +++ b/packages/opentelemetry-semantic-conventions-ai/pyproject.toml @@ -8,7 +8,7 @@ show_missing = true [tool.poetry] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" authors = [ "Gal Kleinman ", diff --git a/packages/traceloop-sdk/poetry.lock b/packages/traceloop-sdk/poetry.lock index d1681e1454..1b817f1941 100644 --- a/packages/traceloop-sdk/poetry.lock +++ b/packages/traceloop-sdk/poetry.lock @@ -1489,7 +1489,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1511,7 +1511,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1534,7 +1534,7 @@ anthropic = ">=0.17.0" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.source] type = "directory" @@ -1553,7 +1553,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1575,7 +1575,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1597,7 +1597,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1619,7 +1619,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1641,7 +1641,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1663,7 +1663,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1685,7 +1685,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1708,7 +1708,7 @@ inflection = "^0.5.1" opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1745,7 +1745,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1767,7 +1767,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1789,7 +1789,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1811,7 +1811,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = ["ollama (>=0.3.2,<0.4.0)"] @@ -1833,7 +1833,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" tiktoken = ">=0.6.0, <1" [package.extras] @@ -1856,7 +1856,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1878,7 +1878,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1900,7 +1900,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -1942,7 +1942,7 @@ develop = true opentelemetry-api = "^1.26.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.source] type = "directory" @@ -1998,7 +1998,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -2020,7 +2020,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.source] type = "directory" @@ -2060,7 +2060,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -2082,7 +2082,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -2104,7 +2104,7 @@ develop = true opentelemetry-api = "^1.27.0" opentelemetry-instrumentation = "^0.48b0" opentelemetry-semantic-conventions = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" [package.extras] instruments = [] @@ -2160,13 +2160,13 @@ opentelemetry-api = "1.27.0" [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.1" +version = "0.4.2" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"}, - {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, ] [[package]] @@ -3485,4 +3485,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "38ca90d48eb8fefebc864dcb62ad31b7f018bb3439beeeed1ccafe06ba1d1336" +content-hash = "6ee78682ca73201a791cc3617997af267895556f6e9bbe939091402c56923e18" diff --git a/packages/traceloop-sdk/pyproject.toml b/packages/traceloop-sdk/pyproject.toml index e185b0b42f..708c1e0654 100644 --- a/packages/traceloop-sdk/pyproject.toml +++ b/packages/traceloop-sdk/pyproject.toml @@ -34,7 +34,7 @@ opentelemetry-instrumentation-requests = "^0.48b0" opentelemetry-instrumentation-sqlalchemy = "^0.48b0" opentelemetry-instrumentation-urllib3 = "^0.48b0" opentelemetry-instrumentation-threading = "^0.48b0" -opentelemetry-semantic-conventions-ai = "0.4.1" +opentelemetry-semantic-conventions-ai = "0.4.2" opentelemetry-instrumentation-mistralai = {path="../opentelemetry-instrumentation-mistralai", develop=true} opentelemetry-instrumentation-openai = {path="../opentelemetry-instrumentation-openai", develop=true} opentelemetry-instrumentation-ollama = {path="../opentelemetry-instrumentation-ollama", develop=true}