diff --git a/src/langtrace_python_sdk/instrumentation/gemini/patch.py b/src/langtrace_python_sdk/instrumentation/gemini/patch.py index a707dae9..53b0319f 100644 --- a/src/langtrace_python_sdk/instrumentation/gemini/patch.py +++ b/src/langtrace_python_sdk/instrumentation/gemini/patch.py @@ -137,7 +137,7 @@ def set_response_attributes( if hasattr(result, "text"): set_event_completion(span, [{"role": "assistant", "content": result.text}]) - if hasattr(result, "usage_metadata"): + if hasattr(result, "usage_metadata") and result.usage_metadata is not None: usage = result.usage_metadata input_tokens = usage.prompt_token_count output_tokens = usage.candidates_token_count @@ -152,7 +152,7 @@ def build_streaming_response(span, response): item_to_yield = item complete_response += str(item.text) yield item_to_yield - if hasattr(item, "usage_metadata"): + if hasattr(item, "usage_metadata") and item.usage_metadata is not None: usage = item.usage_metadata input_tokens = usage.prompt_token_count output_tokens = usage.candidates_token_count @@ -171,7 +171,7 @@ async def abuild_streaming_response(span, response): item_to_yield = item complete_response += str(item.text) yield item_to_yield - if hasattr(item, "usage_metadata"): + if hasattr(item, "usage_metadata") and item.usage_metadata is not None: usage = item.usage_metadata input_tokens = usage.prompt_token_count output_tokens = usage.candidates_token_count diff --git a/src/langtrace_python_sdk/instrumentation/langchain_core/patch.py b/src/langtrace_python_sdk/instrumentation/langchain_core/patch.py index ce93c4c5..7b6e0821 100644 --- a/src/langtrace_python_sdk/instrumentation/langchain_core/patch.py +++ b/src/langtrace_python_sdk/instrumentation/langchain_core/patch.py @@ -98,7 +98,7 @@ def traced_method(wrapped, instance, args, kwargs): result = wrapped(*args, **kwargs) if trace_output: span.set_attribute("langchain.outputs", to_json_string(result)) - if hasattr(result, "usage_metadata"): + if hasattr(result, "usage_metadata") and result.usage_metadata is not None: span.set_attribute( SpanAttributes.LLM_USAGE_PROMPT_TOKENS, result.usage_metadata["input_tokens"], diff --git a/src/langtrace_python_sdk/instrumentation/vertexai/patch.py b/src/langtrace_python_sdk/instrumentation/vertexai/patch.py index 10c0de70..8ca55cfe 100644 --- a/src/langtrace_python_sdk/instrumentation/vertexai/patch.py +++ b/src/langtrace_python_sdk/instrumentation/vertexai/patch.py @@ -77,7 +77,7 @@ def set_response_attributes(span: Span, result): if hasattr(result, "text"): set_event_completion(span, [{"role": "assistant", "content": result.text}]) - if hasattr(result, "usage_metadata"): + if hasattr(result, "usage_metadata") and result.usage_metadata is not None: usage = result.usage_metadata input_tokens = usage.prompt_token_count output_tokens = usage.candidates_token_count diff --git a/src/langtrace_python_sdk/utils/llm.py b/src/langtrace_python_sdk/utils/llm.py index c16d0b9d..b9355124 100644 --- a/src/langtrace_python_sdk/utils/llm.py +++ b/src/langtrace_python_sdk/utils/llm.py @@ -421,7 +421,7 @@ def set_usage_attributes(self, chunk): self.completion_tokens = chunk.usage.completion_tokens # VertexAI - if hasattr(chunk, "usage_metadata"): + if hasattr(chunk, "usage_metadata") and chunk.usage_metadata is not None: self.completion_tokens = chunk.usage_metadata.candidates_token_count self.prompt_tokens = chunk.usage_metadata.prompt_token_count diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index 2c316257..ef7fb174 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "3.3.13" +__version__ = "3.3.14"