diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index ea77e5512b..aa9b36e167 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -331,10 +331,7 @@ async def _prepare_request( ctx.state.run_step += 1 model_settings = merge_model_settings(ctx.deps.model_settings, None) - with ctx.deps.tracer.start_as_current_span( - 'preparing model request params', attributes=dict(run_step=ctx.state.run_step) - ): - model_request_parameters = await _prepare_request_parameters(ctx) + model_request_parameters = await _prepare_request_parameters(ctx) return model_settings, model_request_parameters def _finish_handling( diff --git a/tests/models/test_fallback.py b/tests/models/test_fallback.py index b0381e315f..cbf20f94b8 100644 --- a/tests/models/test_fallback.py +++ b/tests/models/test_fallback.py @@ -5,7 +5,6 @@ from datetime import timezone import pytest -from dirty_equals import IsJson from inline_snapshot import snapshot from pydantic_ai import Agent, ModelHTTPError @@ -117,50 +116,22 @@ def test_first_failed_instrumented(capfire: CaptureLogfire) -> None: assert capfire.exporter.exported_spans_as_dict() == snapshot( [ { - 'name': 'preparing model request params', + 'name': 'chat function:success_response:', 'context': {'trace_id': 1, 'span_id': 3, 'is_remote': False}, 'parent': {'trace_id': 1, 'span_id': 1, 'is_remote': False}, 'start_time': 2000000000, 'end_time': 3000000000, - 'attributes': { - 'run_step': 1, - 'logfire.span_type': 'span', - 'logfire.msg': 'preparing model request params', - }, - }, - { - 'name': 'chat function:success_response:', - 'context': {'trace_id': 1, 'span_id': 5, 'is_remote': False}, - 'parent': {'trace_id': 1, 'span_id': 1, 'is_remote': False}, - 'start_time': 4000000000, - 'end_time': 5000000000, 'attributes': { 'gen_ai.operation.name': 'chat', 'model_request_parameters': '{"function_tools": [], "allow_text_result": true, "result_tools": []}', 'logfire.span_type': 'span', 'logfire.msg': 'chat fallback:function:failure_response:,function:success_response:', - 'gen_ai.usage.input_tokens': 51, - 'gen_ai.usage.output_tokens': 1, 'gen_ai.system': 'function', 'gen_ai.request.model': 'function:success_response:', + 'gen_ai.usage.input_tokens': 51, + 'gen_ai.usage.output_tokens': 1, 'gen_ai.response.model': 'function:success_response:', - 'events': IsJson( - [ - { - 'content': 'hello', - 'role': 'user', - 'gen_ai.system': 'function', - 'gen_ai.message.index': 0, - 'event.name': 'gen_ai.user.message', - }, - { - 'index': 0, - 'message': {'role': 'assistant', 'content': 'success'}, - 'gen_ai.system': 'function', - 'event.name': 'gen_ai.choice', - }, - ] - ), + 'events': '[{"content": "hello", "role": "user", "gen_ai.system": "function", "gen_ai.message.index": 0, "event.name": "gen_ai.user.message"}, {"index": 0, "message": {"role": "assistant", "content": "success"}, "gen_ai.system": "function", "event.name": "gen_ai.choice"}]', 'logfire.json_schema': '{"type": "object", "properties": {"events": {"type": "array"}, "model_request_parameters": {"type": "object"}}}', }, }, @@ -169,7 +140,7 @@ def test_first_failed_instrumented(capfire: CaptureLogfire) -> None: 'context': {'trace_id': 1, 'span_id': 1, 'is_remote': False}, 'parent': None, 'start_time': 1000000000, - 'end_time': 6000000000, + 'end_time': 4000000000, 'attributes': { 'model_name': 'fallback:function:failure_response:,function:success_response:', 'agent_name': 'agent', @@ -215,23 +186,11 @@ async def test_first_failed_instrumented_stream(capfire: CaptureLogfire) -> None assert capfire.exporter.exported_spans_as_dict() == snapshot( [ { - 'name': 'preparing model request params', + 'name': 'chat function::success_response_stream', 'context': {'trace_id': 1, 'span_id': 3, 'is_remote': False}, 'parent': {'trace_id': 1, 'span_id': 1, 'is_remote': False}, 'start_time': 2000000000, 'end_time': 3000000000, - 'attributes': { - 'run_step': 1, - 'logfire.span_type': 'span', - 'logfire.msg': 'preparing model request params', - }, - }, - { - 'name': 'chat function::success_response_stream', - 'context': {'trace_id': 1, 'span_id': 5, 'is_remote': False}, - 'parent': {'trace_id': 1, 'span_id': 1, 'is_remote': False}, - 'start_time': 4000000000, - 'end_time': 5000000000, 'attributes': { 'gen_ai.operation.name': 'chat', 'model_request_parameters': '{"function_tools": [], "allow_text_result": true, "result_tools": []}', @@ -251,7 +210,7 @@ async def test_first_failed_instrumented_stream(capfire: CaptureLogfire) -> None 'context': {'trace_id': 1, 'span_id': 1, 'is_remote': False}, 'parent': None, 'start_time': 1000000000, - 'end_time': 6000000000, + 'end_time': 4000000000, 'attributes': { 'model_name': 'fallback:function::failure_response_stream,function::success_response_stream', 'agent_name': 'agent', diff --git a/tests/test_logfire.py b/tests/test_logfire.py index b01c22c93d..1deedec903 100644 --- a/tests/test_logfire.py +++ b/tests/test_logfire.py @@ -91,17 +91,15 @@ async def my_ret(x: int) -> str: 'id': 0, 'message': 'my_agent run', 'children': [ - {'id': 1, 'message': 'preparing model request params'}, - {'id': 2, 'message': 'chat test'}, + {'id': 1, 'message': 'chat test'}, { - 'id': 3, + 'id': 2, 'message': 'running 1 tool', 'children': [ - {'id': 4, 'message': 'running tool: my_ret'}, + {'id': 3, 'message': 'running tool: my_ret'}, ], }, - {'id': 5, 'message': 'preparing model request params'}, - {'id': 6, 'message': 'chat test'}, + {'id': 4, 'message': 'chat test'}, ], } ] @@ -166,14 +164,7 @@ async def my_ret(x: int) -> str: ), } ) - assert summary.attributes[1] == snapshot( - { - 'run_step': 1, - 'logfire.span_type': 'span', - 'logfire.msg': 'preparing model request params', - } - ) - chat_span_attributes = summary.attributes[2] + chat_span_attributes = summary.attributes[1] if instrument is True or instrument.event_mode == 'attributes': attribute_mode_attributes = {k: chat_span_attributes.pop(k) for k in ['events']} assert attribute_mode_attributes == snapshot(