From 0be24cbb3b7498c4ad645f5e99a436aa430b9d80 Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Thu, 16 Jan 2025 15:14:10 -0500 Subject: [PATCH 1/2] fix: fix async submit impact --- scope3ai/tracers/anthropic/chat.py | 6 +++--- scope3ai/tracers/cohere/chat.py | 4 ++-- scope3ai/tracers/cohere/chat_v2.py | 4 ++-- scope3ai/tracers/litellm/chat.py | 4 ++-- scope3ai/tracers/mistralai/chat.py | 4 ++-- scope3ai/tracers/openai/chat.py | 2 +- scope3ai/tracers/openai/speech_to_text.py | 22 +++++++++++++------ scope3ai/tracers/openai/text_to_image.py | 22 +++++++++++++------ scope3ai/tracers/openai/text_to_speech.py | 25 ++++++++++++++-------- scope3ai/tracers/openai/translation.py | 26 +++++++++++++++-------- 10 files changed, 75 insertions(+), 44 deletions(-) diff --git a/scope3ai/tracers/anthropic/chat.py b/scope3ai/tracers/anthropic/chat.py index 674f8af..426ff99 100644 --- a/scope3ai/tracers/anthropic/chat.py +++ b/scope3ai/tracers/anthropic/chat.py @@ -108,7 +108,7 @@ async def __stream_text__(self) -> AsyncIterator[str]: # type: ignore[misc] request_duration_ms=requests_latency * 1000, managed_service_id=PROVIDER, ) - self.scope3ai = Scope3AI.get_instance().submit_impact(scope3_row) + self.scope3ai = await Scope3AI.get_instance().asubmit_impact(scope3_row) def __init__(self, parent) -> None: # noqa: ANN001 super().__init__( @@ -212,7 +212,7 @@ async def __stream__(self) -> AsyncIterator[_T]: request_duration_ms=request_latency * 1000, managed_service_id=PROVIDER, ) - self.scope3ai = Scope3AI.get_instance().submit_impact(scope3_row) + self.scope3ai = await Scope3AI.get_instance().asubmit_impact(scope3_row) def __init__(self, parent) -> None: # noqa: ANN001 super().__init__( @@ -265,7 +265,7 @@ async def _anthropic_async_chat_wrapper( request_duration_ms=request_latency * 1000, managed_service_id=PROVIDER, ) - scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) + scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) if scope3ai_ctx is not None: return Message(**response.model_dump(), scope3ai=scope3ai_ctx) else: diff --git a/scope3ai/tracers/cohere/chat.py b/scope3ai/tracers/cohere/chat.py index a0f8d63..6e3e9c8 100644 --- a/scope3ai/tracers/cohere/chat.py +++ b/scope3ai/tracers/cohere/chat.py @@ -70,7 +70,7 @@ async def cohere_async_chat_wrapper( request_duration_ms=request_latency * 1000, managed_service_id=PROVIDER, ) - scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) + scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) return NonStreamedChatResponse(**response.dict(), scope3ai=scope3ai_ctx) @@ -122,7 +122,7 @@ async def cohere_async_stream_chat_wrapper( request_duration_ms=request_latency * 1000, managed_service_id=PROVIDER, ) - scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) + scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) yield StreamEndStreamedChatResponse(**event.dict(), scope3ai=scope3ai_ctx) else: yield event diff --git a/scope3ai/tracers/cohere/chat_v2.py b/scope3ai/tracers/cohere/chat_v2.py index 17a8144..b7950c8 100644 --- a/scope3ai/tracers/cohere/chat_v2.py +++ b/scope3ai/tracers/cohere/chat_v2.py @@ -73,7 +73,7 @@ async def cohere_async_chat_v2_wrapper( request_duration_ms=request_latency * 1000, managed_service_id=PROVIDER, ) - scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) + scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) return ChatResponse(**response.dict(), scope3ai=scope3ai_ctx) @@ -125,5 +125,5 @@ async def cohere_async_stream_chat_v2_wrapper( request_duration_ms=request_latency * 1000, managed_service_id=PROVIDER, ) - scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) + scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) yield Scope3AIStreamedChatResponseV2(type="scope3ai", scope3ai=scope3ai_ctx) diff --git a/scope3ai/tracers/litellm/chat.py b/scope3ai/tracers/litellm/chat.py index 3995829..d1457f0 100644 --- a/scope3ai/tracers/litellm/chat.py +++ b/scope3ai/tracers/litellm/chat.py @@ -114,7 +114,7 @@ async def litellm_async_chat_wrapper_base( request_duration_ms=float(request_latency) * 1000, managed_service_id=PROVIDER, ) - scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) + scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) if scope3ai_ctx is not None: return ChatCompletion(**response.model_dump(), scope3ai=scope3ai_ctx) else: @@ -143,7 +143,7 @@ async def litellm_async_chat_wrapper_stream( # type: ignore[misc] request_duration_ms=float(request_latency) * 1000, managed_service_id=PROVIDER, ) - scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) + scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) if scope3ai_ctx is not None: yield ChatCompletionChunk(**chunk.model_dump(), scope3ai=scope3ai_ctx) else: diff --git a/scope3ai/tracers/mistralai/chat.py b/scope3ai/tracers/mistralai/chat.py index 8c19651..fa70c9a 100644 --- a/scope3ai/tracers/mistralai/chat.py +++ b/scope3ai/tracers/mistralai/chat.py @@ -92,7 +92,7 @@ async def mistralai_v1_async_chat_wrapper( request_duration_ms=request_latency * 1000, managed_service_id=PROVIDER, ) - scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) + scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) chat = ChatCompletionResponse(**response.model_dump()) chat.scope3ai = scope3ai_ctx return chat @@ -116,7 +116,7 @@ async def _generator( request_duration_ms=request_latency * 1000, managed_service_id=PROVIDER, ) - scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) + scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) chunk.data = CompletionChunk(**chunk.data.model_dump(), scope3ai=scope3ai_ctx) yield chunk diff --git a/scope3ai/tracers/openai/chat.py b/scope3ai/tracers/openai/chat.py index 2730741..100c53a 100644 --- a/scope3ai/tracers/openai/chat.py +++ b/scope3ai/tracers/openai/chat.py @@ -208,7 +208,7 @@ async def openai_async_chat_wrapper_stream( managed_service_id=PROVIDER, ) - scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row) + scope3_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) yield ChatCompletionChunk(**chunk.model_dump(), scope3ai=scope3_ctx) else: yield chunk diff --git a/scope3ai/tracers/openai/speech_to_text.py b/scope3ai/tracers/openai/speech_to_text.py index db9b5ab..4350121 100644 --- a/scope3ai/tracers/openai/speech_to_text.py +++ b/scope3ai/tracers/openai/speech_to_text.py @@ -32,9 +32,9 @@ class TranscriptionVerbose(_TranscriptionVerbose): scope3ai: Optional[Scope3AIContext] = None -def _openai_speech_to_text_wrapper( +def _openai_speech_to_text_get_impact_row( response: Any, request_latency: float, kwargs: dict -) -> Union[Transcription, TranscriptionVerbose, str]: +) -> (Union[Transcription, TranscriptionVerbose, str], ImpactRow): model = kwargs["model"] encoder = tiktoken.get_encoding("cl100k_base") @@ -58,7 +58,6 @@ def _openai_speech_to_text_wrapper( task=Task.speech_to_text, **options, ) - scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row) if isinstance(response, _Transcription): result = Transcription.model_construct(**response.model_dump()) @@ -69,8 +68,7 @@ def _openai_speech_to_text_wrapper( else: logger.error(f"Unexpected response type: {type(response)}") return response - result.scope3ai = scope3_ctx - return result + return result, scope3_row def openai_speech_to_text_wrapper( @@ -79,7 +77,12 @@ def openai_speech_to_text_wrapper( timer_start = time.perf_counter() response = wrapped(*args, **kwargs) request_latency = (time.perf_counter() - timer_start) * 1000 - return _openai_speech_to_text_wrapper(response, request_latency, kwargs) + result, impact_row = _openai_speech_to_text_get_impact_row( + response, request_latency, kwargs + ) + scope3_ctx = Scope3AI.get_instance().submit_impact(impact_row) + result.scope3ai = scope3_ctx + return result async def openai_async_speech_to_text_wrapper( @@ -88,4 +91,9 @@ async def openai_async_speech_to_text_wrapper( timer_start = time.perf_counter() response = await wrapped(*args, **kwargs) request_latency = (time.perf_counter() - timer_start) * 1000 - return _openai_speech_to_text_wrapper(response, request_latency, kwargs) + result, impact_row = _openai_speech_to_text_get_impact_row( + response, request_latency, kwargs + ) + scope3_ctx = await Scope3AI.get_instance().asubmit_impact(impact_row) + result.scope3ai = scope3_ctx + return result diff --git a/scope3ai/tracers/openai/text_to_image.py b/scope3ai/tracers/openai/text_to_image.py index 132b791..249b82c 100644 --- a/scope3ai/tracers/openai/text_to_image.py +++ b/scope3ai/tracers/openai/text_to_image.py @@ -18,9 +18,9 @@ class ImageResponse(_ImageResponse): scope3ai: Optional[Scope3AIContext] = None -def _openai_image_wrapper( +def _openai_image_get_impact_row( response: _ImageResponse, request_latency: float, **kwargs: Any -) -> ImageResponse: +) -> (ImageResponse, ImpactRow): model = kwargs.get("model", DEFAULT_MODEL) size = RootImage(root=kwargs.get("size", DEFAULT_SIZE)) n = kwargs.get("n", DEFAULT_N) @@ -33,10 +33,8 @@ def _openai_image_wrapper( managed_service_id=PROVIDER, ) - scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) result = ImageResponse.model_construct(**response.model_dump()) - result.scope3ai = scope3ai_ctx - return result + return result, scope3_row def openai_image_wrapper( @@ -45,7 +43,12 @@ def openai_image_wrapper( timer_start = time.perf_counter() response = wrapped(*args, **kwargs) request_latency = time.perf_counter() - timer_start - return _openai_image_wrapper(response, request_latency, **kwargs) + result, impact_row = _openai_image_get_impact_row( + response, request_latency, **kwargs + ) + scope3_ctx = Scope3AI.get_instance().submit_impact(impact_row) + result.scope3ai = scope3_ctx + return result async def openai_async_image_wrapper( @@ -54,4 +57,9 @@ async def openai_async_image_wrapper( timer_start = time.perf_counter() response = await wrapped(*args, **kwargs) request_latency = time.perf_counter() - timer_start - return _openai_image_wrapper(response, request_latency, **kwargs) + result, impact_row = _openai_image_get_impact_row( + response, request_latency, **kwargs + ) + scope3_ctx = await Scope3AI.get_instance().asubmit_impact(impact_row) + result.scope3ai = scope3_ctx + return result diff --git a/scope3ai/tracers/openai/text_to_speech.py b/scope3ai/tracers/openai/text_to_speech.py index f099bbc..2d07572 100644 --- a/scope3ai/tracers/openai/text_to_speech.py +++ b/scope3ai/tracers/openai/text_to_speech.py @@ -20,11 +20,11 @@ class HttpxBinaryResponseContent(_legacy_response.HttpxBinaryResponseContent): scope3ai: Optional[Scope3AIContext] = None -def _openai_text_to_speech_submit( +def _openai_text_to_speech_get_impact_row( response: _legacy_response.HttpxBinaryResponseContent, request_latency: float, kwargs: Any, -) -> HttpxBinaryResponseContent: +) -> (HttpxBinaryResponseContent, ImpactRow): # try getting duration response_format = kwargs["response_format"] duration = _get_audio_duration(response_format, response.content) @@ -49,13 +49,10 @@ def _openai_text_to_speech_submit( task=Task.text_to_speech, ) - scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row) - wrapped_response = HttpxBinaryResponseContent( response=response.response, ) - wrapped_response.scope3ai = scope3_ctx - return wrapped_response + return wrapped_response, scope3_row def openai_text_to_speech_wrapper( @@ -64,7 +61,12 @@ def openai_text_to_speech_wrapper( timer_start = time.perf_counter() response = wrapped(*args, **kwargs) request_latency = (time.perf_counter() - timer_start) * 1000 - return _openai_text_to_speech_submit(response, request_latency, kwargs) + result, impact_row = _openai_text_to_speech_get_impact_row( + response, request_latency, kwargs + ) + scope3_ctx = Scope3AI.get_instance().submit_impact(impact_row) + result.scope3ai = scope3_ctx + return result async def openai_async_text_to_speech_wrapper( @@ -72,5 +74,10 @@ async def openai_async_text_to_speech_wrapper( ) -> HttpxBinaryResponseContent: timer_start = time.perf_counter() response = await wrapped(*args, **kwargs) - request_latency = time.perf_counter() - timer_start - return _openai_text_to_speech_submit(response, request_latency, kwargs) + request_latency = (time.perf_counter() - timer_start) * 1000 + result, impact_row = _openai_text_to_speech_get_impact_row( + response, request_latency, kwargs + ) + scope3_ctx = await Scope3AI.get_instance().asubmit_impact(impact_row) + result.scope3ai = scope3_ctx + return result diff --git a/scope3ai/tracers/openai/translation.py b/scope3ai/tracers/openai/translation.py index 17ee53b..d484a26 100644 --- a/scope3ai/tracers/openai/translation.py +++ b/scope3ai/tracers/openai/translation.py @@ -1,6 +1,6 @@ import logging import time -from typing import Any, Callable, Optional, Union +from typing import Any, Callable, Optional, Union, Tuple import tiktoken from openai.resources.audio.translations import AsyncTranslations, Translations @@ -32,9 +32,9 @@ class TranslationVerbose(_TranslationVerbose): scope3ai: Optional[Scope3AIContext] = None -def _openai_translation_wrapper( +def _openai_translation_get_impact_row( response: Any, request_latency: float, kwargs: dict -) -> Union[Translation, TranslationVerbose, AnnotatedStr]: +) -> Tuple[Union[Translation, TranslationVerbose, AnnotatedStr], ImpactRow]: model = kwargs["model"] encoder = tiktoken.get_encoding("cl100k_base") @@ -58,7 +58,6 @@ def _openai_translation_wrapper( task=Task.translation, **options, ) - scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row) if isinstance(response, _Translation): result = Translation.model_construct(**response.model_dump()) @@ -68,9 +67,8 @@ def _openai_translation_wrapper( result = AnnotatedStr(str) else: logger.error(f"Unexpected response type: {type(response)}") - return response - result.scope3ai = scope3_ctx - return result + return response, scope3_row + return result, scope3_row def openai_translation_wrapper( @@ -79,7 +77,12 @@ def openai_translation_wrapper( timer_start = time.perf_counter() response = wrapped(*args, **kwargs) request_latency = (time.perf_counter() - timer_start) * 1000 - return _openai_translation_wrapper(response, request_latency, kwargs) + result, impact_row = _openai_translation_get_impact_row( + response, request_latency, kwargs + ) + scope3_ctx = Scope3AI.get_instance().submit_impact(impact_row) + result.scope3ai = scope3_ctx + return result async def openai_async_translation_wrapper( @@ -88,4 +91,9 @@ async def openai_async_translation_wrapper( timer_start = time.perf_counter() response = await wrapped(*args, **kwargs) request_latency = (time.perf_counter() - timer_start) * 1000 - return _openai_translation_wrapper(response, request_latency, kwargs) + result, impact_row = _openai_translation_get_impact_row( + response, request_latency, kwargs + ) + scope3_ctx = await Scope3AI.get_instance().asubmit_impact(impact_row) + result.scope3ai = scope3_ctx + return result From dd0110dd47a5105382f6aa9a3fb9641afd8dbc56 Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Thu, 16 Jan 2025 15:58:02 -0500 Subject: [PATCH 2/2] fix: fixing tracers tests --- scope3ai/tracers/mistralai/chat.py | 6 +- scope3ai/tracers/openai/chat.py | 4 +- tests/test_anthropic_tracer.py | 70 ++++++++++++++++++----- tests/test_cohere_tracer.py | 40 ++++++++++--- tests/test_cohere_tracer_v2.py | 40 ++++++++++--- tests/test_liteml_tracer.py | 40 ++++++++++--- tests/test_mistralai.py | 76 +++++++++++++++++++++++++ tests/test_mistralai_v1.py | 50 ---------------- tests/test_openai_image_tracer.py | 56 ++++++++++++++++-- tests/test_openai_multimodal.py | 59 +++++++++++++------ tests/test_openai_stt_tracer.py | 27 ++++++++- tests/test_openai_translation_tracer.py | 31 +++++++++- tests/test_openai_tts_tracer.py | 18 +++++- 13 files changed, 392 insertions(+), 125 deletions(-) create mode 100644 tests/test_mistralai.py delete mode 100644 tests/test_mistralai_v1.py diff --git a/scope3ai/tracers/mistralai/chat.py b/scope3ai/tracers/mistralai/chat.py index fa70c9a..f7ada72 100644 --- a/scope3ai/tracers/mistralai/chat.py +++ b/scope3ai/tracers/mistralai/chat.py @@ -65,7 +65,7 @@ def mistralai_v1_chat_wrapper_stream( input_tokens=token_count, output_tokens=chunk.data.usage.completion_tokens if chunk.data.usage - else None, + else 0, request_duration_ms=request_latency * 1000, managed_service_id=PROVIDER, ) @@ -110,9 +110,7 @@ async def _generator( scope3_row = ImpactRow( model_id=model_name, input_tokens=token_count, - output_tokens=chunk.data.usage.completion_tokens - if chunk.data.usage - else None, + output_tokens=chunk.data.usage.completion_tokens if chunk.data.usage else 0, request_duration_ms=request_latency * 1000, managed_service_id=PROVIDER, ) diff --git a/scope3ai/tracers/openai/chat.py b/scope3ai/tracers/openai/chat.py index 100c53a..28e6fb2 100644 --- a/scope3ai/tracers/openai/chat.py +++ b/scope3ai/tracers/openai/chat.py @@ -60,7 +60,8 @@ def _openai_aggregate_multimodal_audio(content: dict, row: ImpactRow) -> None: # decode the base64 data audio_data = base64.b64decode(b64data) - duration = _get_audio_duration(format, audio_data) + # TODO: accept audio duration as float in AiApi + duration = int(_get_audio_duration(format, audio_data)) if row.input_audio_seconds is None: row.input_audio_seconds = duration @@ -107,7 +108,6 @@ def _openai_chat_wrapper( messages = kwargs.get("messages", []) for message in messages: _openai_aggregate_multimodal(message, scope3_row) - scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) return ChatCompletion(**response.model_dump(), scope3ai=scope3ai_ctx) diff --git a/tests/test_anthropic_tracer.py b/tests/test_anthropic_tracer.py index d40e19c..157aba0 100644 --- a/tests/test_anthropic_tracer.py +++ b/tests/test_anthropic_tracer.py @@ -3,7 +3,7 @@ @pytest.mark.vcr -def test_anthropic_chat(tracer_init): +def test_anthropic_chat(tracer_with_sync_init): client = Anthropic() response = client.messages.create( max_tokens=100, @@ -14,12 +14,18 @@ def test_anthropic_chat(tracer_init): assert getattr(response, "scope3ai") is not None assert response.scope3ai.request.input_tokens == 10 assert response.scope3ai.request.output_tokens == 37 - assert response.scope3ai.impact is None + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.asyncio -async def test_anthropic_async_chat(tracer_init): +async def test_anthropic_async_chat(tracer_with_sync_init): client = AsyncAnthropic() response = await client.messages.create( max_tokens=100, @@ -30,11 +36,17 @@ async def test_anthropic_async_chat(tracer_init): assert getattr(response, "scope3ai") is not None assert response.scope3ai.request.input_tokens == 10 assert response.scope3ai.request.output_tokens == 43 - assert response.scope3ai.impact is None + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr -def test_anthropic_stream_chat(tracer_init): +def test_anthropic_stream_chat(tracer_with_sync_init): client = Anthropic() text_response = "" @@ -49,14 +61,20 @@ def test_anthropic_stream_chat(tracer_init): assert getattr(stream, "scope3ai") is not None assert stream.scope3ai.request.input_tokens == 10 assert stream.scope3ai.request.output_tokens == 45 - assert stream.scope3ai.impact is None + assert stream.scope3ai.impact is not None + assert stream.scope3ai.impact.total_impact is not None + assert stream.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert stream.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert stream.scope3ai.impact.total_impact.usage_water_ml > 0 + assert stream.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert stream.scope3ai.impact.total_impact.embodied_water_ml > 0 assert len(text_response) > 0 @pytest.mark.vcr @pytest.mark.asyncio -async def test_anthropic_async_stream_chat(tracer_init): +async def test_anthropic_async_stream_chat(tracer_with_sync_init): client = AsyncAnthropic() text_response = "" @@ -71,13 +89,19 @@ async def test_anthropic_async_stream_chat(tracer_init): assert getattr(stream, "scope3ai") is not None assert stream.scope3ai.request.input_tokens == 10 assert stream.scope3ai.request.output_tokens == 44 - assert stream.scope3ai.impact is None + assert stream.scope3ai.impact is not None + assert stream.scope3ai.impact.total_impact is not None + assert stream.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert stream.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert stream.scope3ai.impact.total_impact.usage_water_ml > 0 + assert stream.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert stream.scope3ai.impact.total_impact.embodied_water_ml > 0 assert len(text_response) > 0 @pytest.mark.vcr -def test_anthropic_stream_chat_from_create_context(tracer_init): +def test_anthropic_stream_chat_from_create_context(tracer_with_sync_init): client = Anthropic() with client.messages.create( @@ -94,11 +118,17 @@ def test_anthropic_stream_chat_from_create_context(tracer_init): assert getattr(stream, "scope3ai") is not None assert stream.scope3ai.request.input_tokens == 10 assert stream.scope3ai.request.output_tokens == 31 - assert stream.scope3ai.impact is None + assert stream.scope3ai.impact is not None + assert stream.scope3ai.impact.total_impact is not None + assert stream.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert stream.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert stream.scope3ai.impact.total_impact.usage_water_ml > 0 + assert stream.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert stream.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr -def test_anthropic_stream_chat_from_create_linear(tracer_init): +def test_anthropic_stream_chat_from_create_linear(tracer_with_sync_init): client = Anthropic() stream = client.messages.create( @@ -115,12 +145,18 @@ def test_anthropic_stream_chat_from_create_linear(tracer_init): assert getattr(stream, "scope3ai") is not None assert stream.scope3ai.request.input_tokens == 10 assert stream.scope3ai.request.output_tokens == 37 - assert stream.scope3ai.impact is None + assert stream.scope3ai.impact is not None + assert stream.scope3ai.impact.total_impact is not None + assert stream.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert stream.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert stream.scope3ai.impact.total_impact.usage_water_ml > 0 + assert stream.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert stream.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.asyncio -async def test_anthropic_stream_async_chat_from_create_linear(tracer_init): +async def test_anthropic_stream_async_chat_from_create_linear(tracer_with_sync_init): client = AsyncAnthropic() stream = await client.messages.create( @@ -137,4 +173,10 @@ async def test_anthropic_stream_async_chat_from_create_linear(tracer_init): assert getattr(stream, "scope3ai") is not None assert stream.scope3ai.request.input_tokens == 10 assert stream.scope3ai.request.output_tokens == 31 - assert stream.scope3ai.impact is None + assert stream.scope3ai.impact is not None + assert stream.scope3ai.impact.total_impact is not None + assert stream.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert stream.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert stream.scope3ai.impact.total_impact.usage_water_ml > 0 + assert stream.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert stream.scope3ai.impact.total_impact.embodied_water_ml > 0 diff --git a/tests/test_cohere_tracer.py b/tests/test_cohere_tracer.py index 6e4942b..a731237 100644 --- a/tests/test_cohere_tracer.py +++ b/tests/test_cohere_tracer.py @@ -3,30 +3,42 @@ @pytest.mark.vcr -def test_cohere_chat(tracer_init): +def test_cohere_chat(tracer_with_sync_init): client = Client() response = client.chat(message="Hello!", max_tokens=100) assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None assert response.scope3ai.request.input_tokens == 203 assert response.scope3ai.request.output_tokens == 9 - assert response.scope3ai.impact is None + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.asyncio -async def test_cohere_async_chat(tracer_init): +async def test_cohere_async_chat(tracer_with_sync_init): client = AsyncClient() response = await client.chat(message="Hello!", max_tokens=100) assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None assert response.scope3ai.request.input_tokens == 203 assert response.scope3ai.request.output_tokens == 9 - assert response.scope3ai.impact is None + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr -def test_cohere_stream_chat(tracer_init): +def test_cohere_stream_chat(tracer_with_sync_init): client = Client() stream = client.chat_stream(message="Tell me a short story", max_tokens=100) for event in stream: @@ -36,12 +48,18 @@ def test_cohere_stream_chat(tracer_init): assert getattr(event, "scope3ai") is not None assert event.scope3ai.request.input_tokens == 206 assert event.scope3ai.request.output_tokens == 100 - assert event.scope3ai.impact is None + assert event.scope3ai.impact is not None + assert event.scope3ai.impact.total_impact is not None + assert event.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert event.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert event.scope3ai.impact.total_impact.usage_water_ml > 0 + assert event.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert event.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.asyncio -async def test_cohere_async_stream_chat(tracer_init): +async def test_cohere_async_stream_chat(tracer_with_sync_init): client = AsyncClient() stream = client.chat_stream(message="Tell me a short story", max_tokens=100) async for event in stream: @@ -51,4 +69,10 @@ async def test_cohere_async_stream_chat(tracer_init): assert getattr(event, "scope3ai") is not None assert event.scope3ai.request.input_tokens == 206 assert event.scope3ai.request.output_tokens == 100 - assert event.scope3ai.impact is None + assert event.scope3ai.impact is not None + assert event.scope3ai.impact.total_impact is not None + assert event.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert event.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert event.scope3ai.impact.total_impact.usage_water_ml > 0 + assert event.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert event.scope3ai.impact.total_impact.embodied_water_ml > 0 diff --git a/tests/test_cohere_tracer_v2.py b/tests/test_cohere_tracer_v2.py index 10eb255..95ab7f1 100644 --- a/tests/test_cohere_tracer_v2.py +++ b/tests/test_cohere_tracer_v2.py @@ -3,7 +3,7 @@ @pytest.mark.vcr -def test_cohere_chat_v2(tracer_init): +def test_cohere_chat_v2(tracer_with_sync_init): client = ClientV2() response = client.chat( model="command-r-plus-08-2024", @@ -13,12 +13,18 @@ def test_cohere_chat_v2(tracer_init): assert getattr(response, "scope3ai") is not None assert response.scope3ai.request.input_tokens == 204 assert response.scope3ai.request.output_tokens == 9 - assert response.scope3ai.impact is None + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.asyncio -async def test_cohere_async_chat_v2(tracer_init): +async def test_cohere_async_chat_v2(tracer_with_sync_init): client = AsyncClientV2() response = await client.chat( model="command-r-plus-08-2024", @@ -28,11 +34,17 @@ async def test_cohere_async_chat_v2(tracer_init): assert getattr(response, "scope3ai") is not None assert response.scope3ai.request.input_tokens == 204 assert response.scope3ai.request.output_tokens == 9 - assert response.scope3ai.impact is None + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr -def test_cohere_stream_chat_v2(tracer_init): +def test_cohere_stream_chat_v2(tracer_with_sync_init): client = ClientV2() stream = client.chat_stream( model="command-r-plus-08-2024", @@ -47,7 +59,13 @@ def test_cohere_stream_chat_v2(tracer_init): assert getattr(event, "scope3ai") is not None assert event.scope3ai.request.input_tokens == 206 assert event.scope3ai.request.output_tokens == 100 - assert event.scope3ai.impact is None + assert event.scope3ai.impact is not None + assert event.scope3ai.impact.total_impact is not None + assert event.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert event.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert event.scope3ai.impact.total_impact.usage_water_ml > 0 + assert event.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert event.scope3ai.impact.total_impact.embodied_water_ml > 0 event_received = True assert event_received is True @@ -55,7 +73,7 @@ def test_cohere_stream_chat_v2(tracer_init): @pytest.mark.vcr @pytest.mark.asyncio -async def test_cohere_async_stream_chat_v2(tracer_init): +async def test_cohere_async_stream_chat_v2(tracer_with_sync_init): client = AsyncClientV2() stream = client.chat_stream( model="command-r-plus-08-2024", @@ -70,7 +88,13 @@ async def test_cohere_async_stream_chat_v2(tracer_init): assert getattr(event, "scope3ai") is not None assert event.scope3ai.request.input_tokens == 206 assert event.scope3ai.request.output_tokens == 100 - assert event.scope3ai.impact is None + assert event.scope3ai.impact is not None + assert event.scope3ai.impact.total_impact is not None + assert event.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert event.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert event.scope3ai.impact.total_impact.usage_water_ml > 0 + assert event.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert event.scope3ai.impact.total_impact.embodied_water_ml > 0 event_received = True assert event_received is True diff --git a/tests/test_liteml_tracer.py b/tests/test_liteml_tracer.py index 5ae1938..fcb5af3 100644 --- a/tests/test_liteml_tracer.py +++ b/tests/test_liteml_tracer.py @@ -3,7 +3,7 @@ @pytest.mark.vcr -def test_litellm_chat(tracer_init): +def test_litellm_chat(tracer_with_sync_init): response = litellm.completion( model="huggingface/meta-llama/Meta-Llama-3-8B-Instruct", messages=[{"role": "user", "content": "Hello World!"}], @@ -12,12 +12,18 @@ def test_litellm_chat(tracer_init): assert getattr(response, "scope3ai") is not None assert response.scope3ai.request.input_tokens == 44 assert response.scope3ai.request.output_tokens == 69 - assert response.scope3ai.impact is None + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.asyncio -async def test_litellm_async_chat(tracer_init): +async def test_litellm_async_chat(tracer_with_sync_init): response = await litellm.acompletion( messages=[{"role": "user", "content": "Hello World!"}], model="command-r", @@ -25,11 +31,17 @@ async def test_litellm_async_chat(tracer_init): assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None assert response.scope3ai.request.input_tokens == 3 - assert response.scope3ai.impact is None + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr -def test_litellm_stream_chat(tracer_init): +def test_litellm_stream_chat(tracer_with_sync_init): stream = litellm.completion( messages=[{"role": "user", "content": "Hello World!"}], model="claude-3-5-sonnet-20240620", @@ -37,12 +49,18 @@ def test_litellm_stream_chat(tracer_init): ) for chunk in stream: assert getattr(chunk, "scope3ai") is not None - assert chunk.scope3ai.impact is None + assert chunk.scope3ai.impact is not None + assert chunk.scope3ai.impact.total_impact is not None + assert chunk.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert chunk.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert chunk.scope3ai.impact.total_impact.usage_water_ml > 0 + assert chunk.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert chunk.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.asyncio -async def test_litellm_async_stream_chat(tracer_init): +async def test_litellm_async_stream_chat(tracer_with_sync_init): stream = await litellm.acompletion( messages=[{"role": "user", "content": "Hello World!"}], model="claude-3-5-sonnet-20240620", @@ -50,4 +68,10 @@ async def test_litellm_async_stream_chat(tracer_init): ) async for chunk in stream: assert getattr(chunk, "scope3ai") is not None - assert chunk.scope3ai.impact is None + assert chunk.scope3ai.impact is not None + assert chunk.scope3ai.impact.total_impact is not None + assert chunk.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert chunk.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert chunk.scope3ai.impact.total_impact.usage_water_ml > 0 + assert chunk.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert chunk.scope3ai.impact.total_impact.embodied_water_ml > 0 diff --git a/tests/test_mistralai.py b/tests/test_mistralai.py new file mode 100644 index 0000000..3a45c5f --- /dev/null +++ b/tests/test_mistralai.py @@ -0,0 +1,76 @@ +import pytest +from mistralai import Mistral + + +@pytest.mark.vcr +def test_mistralai_chat(tracer_with_sync_init): + client = Mistral() + response = client.chat.complete( + messages=[{"role": "user", "content": "Hello World!"}], model="mistral-tiny" + ) + assert len(response.choices) > 0 + assert getattr(response, "scope3ai") is not None + assert response.scope3ai.request.input_tokens == 6 + assert response.scope3ai.request.output_tokens == 18 + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 + + +@pytest.mark.vcr +@pytest.mark.asyncio +async def test_mistralai_async_chat(tracer_with_sync_init): + client = Mistral() + response = await client.chat.complete_async( + messages=[{"role": "user", "content": "Hello World!"}], model="mistral-tiny" + ) + assert len(response.choices) > 0 + assert getattr(response, "scope3ai") is not None + assert response.scope3ai.request.input_tokens == 6 + assert response.scope3ai.request.output_tokens == 18 + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 + + +@pytest.mark.vcr +def test_mistralai_stream_chat(tracer_with_sync_init): + client = Mistral() + stream = client.chat.stream( + messages=[{"role": "user", "content": "Hello World!"}], model="mistral-tiny" + ) + for chunk in stream: + assert getattr(chunk.data, "scope3ai") is not None + assert chunk.data.scope3ai.impact is not None + assert chunk.data.scope3ai.impact.total_impact is not None + assert chunk.data.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert chunk.data.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert chunk.data.scope3ai.impact.total_impact.usage_water_ml > 0 + assert chunk.data.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert chunk.data.scope3ai.impact.total_impact.embodied_water_ml > 0 + + +@pytest.mark.vcr +@pytest.mark.asyncio +async def test_mistralai_async_stream_chat(tracer_with_sync_init): + client = Mistral() + stream = await client.chat.stream_async( + messages=[{"role": "user", "content": "Hello World!"}], model="mistral-tiny" + ) + async for chunk in stream: + assert getattr(chunk.data, "scope3ai") is not None + assert chunk.data.scope3ai.impact is not None + assert chunk.data.scope3ai.impact.total_impact is not None + assert chunk.data.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert chunk.data.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert chunk.data.scope3ai.impact.total_impact.usage_water_ml > 0 + assert chunk.data.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert chunk.data.scope3ai.impact.total_impact.embodied_water_ml > 0 diff --git a/tests/test_mistralai_v1.py b/tests/test_mistralai_v1.py deleted file mode 100644 index a8d1457..0000000 --- a/tests/test_mistralai_v1.py +++ /dev/null @@ -1,50 +0,0 @@ -import pytest -from mistralai import Mistral - - -@pytest.mark.vcr -def test_mistralai_chat(tracer_init): - client = Mistral() - response = client.chat.complete( - messages=[{"role": "user", "content": "Hello World!"}], model="mistral-tiny" - ) - assert len(response.choices) > 0 - assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.input_tokens == 6 - assert response.scope3ai.request.output_tokens == 18 - assert response.scope3ai.impact is None - - -@pytest.mark.vcr -@pytest.mark.asyncio -async def test_mistralai_async_chat(tracer_init): - client = Mistral() - response = await client.chat.complete_async( - messages=[{"role": "user", "content": "Hello World!"}], model="mistral-tiny" - ) - assert len(response.choices) > 0 - assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.input_tokens == 6 - assert response.scope3ai.request.output_tokens == 18 - assert response.scope3ai.impact is None - - -@pytest.mark.vcr -def test_mistralai_stream_chat(tracer_init): - client = Mistral() - stream = client.chat.stream( - messages=[{"role": "user", "content": "Hello World!"}], model="mistral-tiny" - ) - for chunk in stream: - assert getattr(chunk.data, "scope3ai") is not None - - -@pytest.mark.vcr -@pytest.mark.asyncio -async def test_mistralai_async_stream_chat(tracer_init): - client = Mistral() - stream = await client.chat.stream_async( - messages=[{"role": "user", "content": "Hello World!"}], model="mistral-tiny" - ) - async for chunk in stream: - assert getattr(chunk.data, "scope3ai") is not None diff --git a/tests/test_openai_image_tracer.py b/tests/test_openai_image_tracer.py index 9bf7833..a030516 100644 --- a/tests/test_openai_image_tracer.py +++ b/tests/test_openai_image_tracer.py @@ -9,7 +9,7 @@ @pytest.mark.parametrize("image_size", ["256x256", "512x512", None]) @pytest.mark.parametrize("n", [1, 2]) @pytest.mark.parametrize("model", ["dall-e-2", None]) -def test_openai_image_wrapper(tracer_init, image_size, n, model): +def test_openai_image_wrapper(tracer_with_sync_init, image_size, n, model): from openai import OpenAI kwargs = {} @@ -29,12 +29,19 @@ def test_openai_image_wrapper(tracer_init, image_size, n, model): assert response.scope3ai is not None assert response.scope3ai.request.output_images is not None assert len(response.scope3ai.request.output_images) == n + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.parametrize("n", [1, 2]) @pytest.mark.parametrize("model", ["dall-e-2", None]) -def test_openai_image_create_variation_wrapper(tracer_init, n, model): +def test_openai_image_create_variation_wrapper(tracer_with_sync_init, n, model): from openai import OpenAI kwargs = {} @@ -52,12 +59,19 @@ def test_openai_image_create_variation_wrapper(tracer_init, n, model): assert response.scope3ai is not None assert response.scope3ai.request.output_images is not None assert len(response.scope3ai.request.output_images) == n + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.parametrize("n", [1, 2]) @pytest.mark.parametrize("model", ["dall-e-2", None]) -def test_openai_image_edit_wrapper(tracer_init, n, model): +def test_openai_image_edit_wrapper(tracer_with_sync_init, n, model): from openai import OpenAI kwargs = {} @@ -77,13 +91,20 @@ def test_openai_image_edit_wrapper(tracer_init, n, model): assert response.scope3ai is not None assert response.scope3ai.request.output_images is not None assert len(response.scope3ai.request.output_images) == n + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.asyncio @pytest.mark.parametrize("n", [1, 2]) @pytest.mark.parametrize("model", ["dall-e-2", None]) -async def test_openai_image_generate_wrapper_async(tracer_init, n, model): +async def test_openai_image_generate_wrapper_async(tracer_with_sync_init, n, model): from openai import AsyncOpenAI kwargs = {} @@ -101,13 +122,22 @@ async def test_openai_image_generate_wrapper_async(tracer_init, n, model): assert response.scope3ai is not None assert response.scope3ai.request.output_images is not None assert len(response.scope3ai.request.output_images) == n + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.asyncio @pytest.mark.parametrize("n", [1, 2]) @pytest.mark.parametrize("model", ["dall-e-2", None]) -async def test_openai_image_create_variation_wrapper_async(tracer_init, n, model): +async def test_openai_image_create_variation_wrapper_async( + tracer_with_sync_init, n, model +): from openai import AsyncOpenAI kwargs = {} @@ -125,13 +155,20 @@ async def test_openai_image_create_variation_wrapper_async(tracer_init, n, model assert response.scope3ai is not None assert response.scope3ai.request.output_images is not None assert len(response.scope3ai.request.output_images) == n + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.asyncio @pytest.mark.parametrize("n", [1, 2]) @pytest.mark.parametrize("model", ["dall-e-2", None]) -async def test_openai_image_edit_wrapper_async(tracer_init, n, model): +async def test_openai_image_edit_wrapper_async(tracer_with_sync_init, n, model): from openai import AsyncOpenAI kwargs = {} @@ -151,3 +188,10 @@ async def test_openai_image_edit_wrapper_async(tracer_init, n, model): assert response.scope3ai is not None assert response.scope3ai.request.output_images is not None assert len(response.scope3ai.request.output_images) == n + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 diff --git a/tests/test_openai_multimodal.py b/tests/test_openai_multimodal.py index 0af7e8c..a00d08b 100644 --- a/tests/test_openai_multimodal.py +++ b/tests/test_openai_multimodal.py @@ -27,7 +27,7 @@ def load_image_b64(path: Path) -> str: @pytest.mark.vcr -def test_openai_multimodal_vision(tracer_init): +def test_openai_multimodal_vision(tracer_with_sync_init): from openai import OpenAI from scope3ai.api.typesgen import Image @@ -55,15 +55,20 @@ def test_openai_multimodal_vision(tracer_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.input_tokens == 872 assert response.scope3ai.request.output_tokens == 57 assert response.scope3ai.request.input_images == [Image(root="1024x1024")] - assert response.scope3ai.impact is None + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr -def test_openai_multimodal_vision_2_images(tracer_init): +def test_openai_multimodal_vision_2_images(tracer_with_sync_init): from openai import OpenAI from scope3ai.api.typesgen import Image @@ -97,18 +102,23 @@ def test_openai_multimodal_vision_2_images(tracer_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.input_tokens == 34016 assert response.scope3ai.request.output_tokens == 47 assert response.scope3ai.request.input_images == [ Image(root="512x512"), Image(root="1024x1024"), ] - assert response.scope3ai.impact is None + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr -def test_openai_multimodal_audio(tracer_init): +def test_openai_multimodal_audio(tracer_with_sync_init): from openai import OpenAI client = OpenAI() @@ -135,15 +145,20 @@ def test_openai_multimodal_audio(tracer_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.input_tokens == 29 assert response.scope3ai.request.output_tokens == 15 - assert response.scope3ai.request.input_audio_seconds > 1 - assert response.scope3ai.impact is None + assert response.scope3ai.request.input_audio_seconds >= 1 + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr -def test_openai_multimodal_audio_2(tracer_init): +def test_openai_multimodal_audio_2(tracer_with_sync_init): from openai import OpenAI client = OpenAI() @@ -177,11 +192,16 @@ def test_openai_multimodal_audio_2(tracer_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.input_tokens == 46 assert response.scope3ai.request.output_tokens == 17 - assert response.scope3ai.request.input_audio_seconds > 2 - assert response.scope3ai.impact is None + assert response.scope3ai.request.input_audio_seconds >= 1 + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 # XXX openai does not support audio and image yet. @@ -189,7 +209,7 @@ def test_openai_multimodal_audio_2(tracer_init): # and the model gpt-4o does not support input_audio # not even o1 support audio and image and text. # @pytest.mark.vcr -# def test_openai_multimodal_audio_and_image(tracer_init): +# def test_openai_multimodal_audio_and_image(tracer_with_sync_init): # from openai import OpenAI # # client = OpenAI() @@ -222,9 +242,14 @@ def test_openai_multimodal_audio_2(tracer_init): # ) # assert len(response.choices) > 0 # assert getattr(response, "scope3ai") is not None - # assert response.scope3ai.request.input_tokens == 29 # assert response.scope3ai.request.output_tokens == 15 # assert response.scope3ai.request.input_images == "512x512" # assert response.scope3ai.request.input_audio_seconds > 0 -# assert response.scope3ai.impact is None +# assert response.scope3ai.impact is not None +# assert response.scope3ai.impact.total_impact is not None +# assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 +# assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 +# assert response.scope3ai.impact.total_impact.usage_water_ml > 0 +# assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 +# assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 diff --git a/tests/test_openai_stt_tracer.py b/tests/test_openai_stt_tracer.py index df0d022..d1a3845 100644 --- a/tests/test_openai_stt_tracer.py +++ b/tests/test_openai_stt_tracer.py @@ -10,7 +10,7 @@ "response_format", ["json", "text", "srt", "verbose_json", "vtt"] ) @pytest.mark.parametrize("model", ["whisper-1"]) -def test_openai_stt_wrapper(tracer_init, response_format, model): +def test_openai_stt_wrapper(tracer_with_sync_init, response_format, model): from openai import OpenAI client = OpenAI() @@ -23,6 +23,13 @@ def test_openai_stt_wrapper(tracer_init, response_format, model): assert response.scope3ai is not None assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.request_duration_ms > 0 + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @@ -31,7 +38,7 @@ def test_openai_stt_wrapper(tracer_init, response_format, model): "response_format", ["json", "text", "srt", "verbose_json", "vtt"] ) @pytest.mark.parametrize("model", ["whisper-1"]) -async def test_openai_stt_wrapper_async(tracer_init, response_format, model): +async def test_openai_stt_wrapper_async(tracer_with_sync_init, response_format, model): from openai import AsyncOpenAI client = AsyncOpenAI() @@ -44,6 +51,13 @@ async def test_openai_stt_wrapper_async(tracer_init, response_format, model): assert response.scope3ai is not None assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.request_duration_ms > 0 + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @@ -58,7 +72,7 @@ async def test_openai_stt_wrapper_async(tracer_init, response_format, model): ], ) @pytest.mark.parametrize("model", ["whisper-1"]) -def test_openai_stt_wrapper_file_duration(tracer_init, model, pathtype): +def test_openai_stt_wrapper_file_duration(tracer_with_sync_init, model, pathtype): from openai import OpenAI client = OpenAI() @@ -88,3 +102,10 @@ def test_openai_stt_wrapper_file_duration(tracer_init, model, pathtype): assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.request_duration_ms > 0 assert response.scope3ai.request.input_audio_seconds > 0 + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 diff --git a/tests/test_openai_translation_tracer.py b/tests/test_openai_translation_tracer.py index 679f45e..6a369be 100644 --- a/tests/test_openai_translation_tracer.py +++ b/tests/test_openai_translation_tracer.py @@ -10,7 +10,7 @@ "response_format", ["json", "text", "srt", "verbose_json", "vtt"] ) @pytest.mark.parametrize("model", ["whisper-1"]) -def test_openai_translation_wrapper(tracer_init, response_format, model): +def test_openai_translation_wrapper(tracer_with_sync_init, response_format, model): from openai import OpenAI client = OpenAI() @@ -23,6 +23,13 @@ def test_openai_translation_wrapper(tracer_init, response_format, model): assert response.scope3ai is not None assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.request_duration_ms > 0 + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @@ -31,7 +38,9 @@ def test_openai_translation_wrapper(tracer_init, response_format, model): "response_format", ["json", "text", "srt", "verbose_json", "vtt"] ) @pytest.mark.parametrize("model", ["whisper-1"]) -async def test_openai_translation_wrapper_async(tracer_init, response_format, model): +async def test_openai_translation_wrapper_async( + tracer_with_sync_init, response_format, model +): from openai import AsyncOpenAI client = AsyncOpenAI() @@ -44,6 +53,13 @@ async def test_openai_translation_wrapper_async(tracer_init, response_format, mo assert response.scope3ai is not None assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.request_duration_ms > 0 + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @@ -58,7 +74,9 @@ async def test_openai_translation_wrapper_async(tracer_init, response_format, mo ], ) @pytest.mark.parametrize("model", ["whisper-1"]) -def test_openai_translation_wrapper_file_duration(tracer_init, model, pathtype): +def test_openai_translation_wrapper_file_duration( + tracer_with_sync_init, model, pathtype +): from openai import OpenAI client = OpenAI() @@ -88,3 +106,10 @@ def test_openai_translation_wrapper_file_duration(tracer_init, model, pathtype): assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.request_duration_ms > 0 assert response.scope3ai.request.input_audio_seconds > 0 + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 diff --git a/tests/test_openai_tts_tracer.py b/tests/test_openai_tts_tracer.py index 1c88727..63fc9a2 100644 --- a/tests/test_openai_tts_tracer.py +++ b/tests/test_openai_tts_tracer.py @@ -6,7 +6,7 @@ @pytest.mark.vcr @pytest.mark.parametrize("audio_format", ["mp3", "opus", "aac", "wav"]) # pcm, flac @pytest.mark.parametrize("model", ["tts-1", "tts-1-hd"]) -def test_openai_tts_wrapper(tracer_init, audio_format, model): +def test_openai_tts_wrapper(tracer_with_sync_init, audio_format, model): from openai import OpenAI client = OpenAI() @@ -22,13 +22,20 @@ def test_openai_tts_wrapper(tracer_init, audio_format, model): assert response.scope3ai.request.output_audio_seconds is not None assert response.scope3ai.request.output_audio_seconds > 0.5 assert response.scope3ai.request.output_audio_seconds < 3 + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0 @pytest.mark.vcr @pytest.mark.asyncio @pytest.mark.parametrize("audio_format", ["mp3", "opus", "aac", "wav"]) # pcm, flac @pytest.mark.parametrize("model", ["tts-1", "tts-1-hd"]) -async def test_openai_tts_wrapper_async(tracer_init, audio_format, model): +async def test_openai_tts_wrapper_async(tracer_with_sync_init, audio_format, model): from openai import AsyncOpenAI client = AsyncOpenAI() @@ -44,3 +51,10 @@ async def test_openai_tts_wrapper_async(tracer_init, audio_format, model): assert response.scope3ai.request.output_audio_seconds is not None assert response.scope3ai.request.output_audio_seconds > 0.5 assert response.scope3ai.request.output_audio_seconds < 3 + assert response.scope3ai.impact is not None + assert response.scope3ai.impact.total_impact is not None + assert response.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert response.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.usage_water_ml > 0 + assert response.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 + assert response.scope3ai.impact.total_impact.embodied_water_ml > 0