diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 05f8c4046f..fd53f26121 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -685,30 +685,53 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: finally: try: if instrumentation_settings and run_span.is_recording(): - run_span.set_attributes(self._run_span_end_attributes(state, usage, instrumentation_settings)) + run_span.set_attributes( + self._run_span_end_attributes( + instrumentation_settings, usage, state.message_history, graph_deps.new_message_index + ) + ) finally: run_span.end() def _run_span_end_attributes( - self, state: _agent_graph.GraphAgentState, usage: _usage.RunUsage, settings: InstrumentationSettings + self, + settings: InstrumentationSettings, + usage: _usage.RunUsage, + message_history: list[_messages.ModelMessage], + new_message_index: int, ): - literal_instructions, _ = self._get_instructions() - if settings.version == 1: attrs = { 'all_messages_events': json.dumps( - [ - InstrumentedModel.event_to_dict(e) - for e in settings.messages_to_otel_events(state.message_history) - ] + [InstrumentedModel.event_to_dict(e) for e in settings.messages_to_otel_events(message_history)] ) } else: - attrs = { - 'pydantic_ai.all_messages': json.dumps(settings.messages_to_otel_messages(list(state.message_history))), - **settings.system_instructions_attributes(literal_instructions), + # Store the last instructions here for convenience + last_instructions = InstrumentedModel._get_instructions(message_history) # pyright: ignore[reportPrivateUsage] + attrs: dict[str, Any] = { + 'pydantic_ai.all_messages': json.dumps(settings.messages_to_otel_messages(list(message_history))), + **settings.system_instructions_attributes(last_instructions), } + # If this agent run was provided with existing history, store an attribute indicating the point at which the + # new messages begin. + if new_message_index > 0: + attrs['pydantic_ai.new_message_index'] = new_message_index + + # If the instructions for this agent run were not always the same, store an attribute that indicates that. + # This can signal to an observability UI that different steps in the agent run had different instructions. + # Note: We purposely only look at "new" messages because they are the only ones produced by this agent run. + if any( + ( + isinstance(m, _messages.ModelRequest) + and m.instructions is not None + and m.instructions != last_instructions + ) + for m in message_history[new_message_index:] + ): + attrs['pydantic_ai.variable_instructions'] = True + return { **usage.opentelemetry_attributes(), **attrs, @@ -716,7 +739,7 @@ def _run_span_end_attributes( { 'type': 'object', 'properties': { - **{attr: {'type': 'array'} for attr in attrs.keys()}, + **{k: {'type': 'array'} if isinstance(v, str) else {} for k, v in attrs.items()}, 'final_result': {'type': 'object'}, }, } diff --git a/tests/test_logfire.py b/tests/test_logfire.py index f434cdbaeb..ec0d35001d 100644 --- a/tests/test_logfire.py +++ b/tests/test_logfire.py @@ -10,7 +10,7 @@ from pydantic import BaseModel from typing_extensions import NotRequired, TypedDict -from pydantic_ai import Agent, ModelMessage, ModelResponse, TextPart, ToolCallPart +from pydantic_ai import Agent, ModelMessage, ModelRequest, ModelResponse, TextPart, ToolCallPart, UserPromptPart from pydantic_ai._utils import get_traceparent from pydantic_ai.exceptions import ModelRetry, UnexpectedModelBehavior from pydantic_ai.models.function import AgentInfo, FunctionModel @@ -2119,3 +2119,774 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: }, ] ) + + +@pytest.mark.skipif(not logfire_installed, reason='logfire not installed') +@pytest.mark.parametrize( + 'instrument', + [InstrumentationSettings(version=1), InstrumentationSettings(version=2), InstrumentationSettings(version=3)], +) +def test_static_function_instructions_in_agent_run_span( + get_logfire_summary: Callable[[], LogfireSummary], instrument: InstrumentationSettings +) -> None: + @dataclass + class MyOutput: + content: str + + my_agent = Agent(model=TestModel(), instrument=instrument) + + @my_agent.instructions + def instructions(): + return 'Here are some instructions' + + result = my_agent.run_sync('Hello', output_type=MyOutput) + assert result.output == MyOutput(content='a') + + summary = get_logfire_summary() + chat_span_attributes = summary.attributes[1] + if instrument.version == 1: + assert summary.attributes[0] == snapshot( + { + 'model_name': 'test', + 'agent_name': 'my_agent', + 'gen_ai.agent.name': 'my_agent', + 'logfire.msg': 'my_agent run', + 'logfire.span_type': 'span', + 'gen_ai.usage.input_tokens': 51, + 'gen_ai.usage.output_tokens': 5, + 'all_messages_events': IsJson( + snapshot( + [ + { + 'content': 'Here are some instructions', + 'role': 'system', + 'event.name': 'gen_ai.system.message', + }, + { + 'content': 'Hello', + 'role': 'user', + 'gen_ai.message.index': 0, + 'event.name': 'gen_ai.user.message', + }, + { + 'role': 'assistant', + 'tool_calls': [ + { + 'id': IsStr(), + 'type': 'function', + 'function': {'name': 'final_result', 'arguments': {'content': 'a'}}, + } + ], + 'gen_ai.message.index': 1, + 'event.name': 'gen_ai.assistant.message', + }, + { + 'content': 'Final result processed.', + 'role': 'tool', + 'id': IsStr(), + 'name': 'final_result', + 'gen_ai.message.index': 2, + 'event.name': 'gen_ai.tool.message', + }, + ] + ) + ), + 'final_result': '{"content": "a"}', + 'logfire.json_schema': IsJson( + snapshot( + { + 'type': 'object', + 'properties': { + 'all_messages_events': {'type': 'array'}, + 'final_result': {'type': 'object'}, + }, + } + ) + ), + } + ) + + assert chat_span_attributes['events'] == IsJson( + snapshot( + [ + { + 'content': 'Here are some instructions', + 'role': 'system', + 'gen_ai.system': 'test', + 'event.name': 'gen_ai.system.message', + }, + { + 'event.name': 'gen_ai.user.message', + 'content': 'Hello', + 'role': 'user', + 'gen_ai.message.index': 0, + 'gen_ai.system': 'test', + }, + { + 'event.name': 'gen_ai.choice', + 'index': 0, + 'message': { + 'role': 'assistant', + 'tool_calls': [ + { + 'id': IsStr(), + 'type': 'function', + 'function': {'name': 'final_result', 'arguments': {'content': 'a'}}, + } + ], + }, + 'gen_ai.system': 'test', + }, + ] + ) + ) + else: + if instrument.version == 2: + assert summary.traces == snapshot( + [ + { + 'id': 0, + 'name': 'agent run', + 'message': 'my_agent run', + 'children': [{'id': 1, 'name': 'chat test', 'message': 'chat test'}], + } + ] + ) + else: + assert summary.traces == snapshot( + [ + { + 'id': 0, + 'name': 'invoke_agent my_agent', + 'message': 'my_agent run', + 'children': [{'id': 1, 'name': 'chat test', 'message': 'chat test'}], + } + ] + ) + + assert summary.attributes[0] == snapshot( + { + 'model_name': 'test', + 'agent_name': 'my_agent', + 'gen_ai.agent.name': 'my_agent', + 'logfire.msg': 'my_agent run', + 'logfire.span_type': 'span', + 'final_result': '{"content": "a"}', + 'gen_ai.usage.input_tokens': 51, + 'gen_ai.usage.output_tokens': 5, + 'pydantic_ai.all_messages': IsJson( + snapshot( + [ + {'role': 'user', 'parts': [{'type': 'text', 'content': 'Hello'}]}, + { + 'role': 'assistant', + 'parts': [ + { + 'type': 'tool_call', + 'id': IsStr(), + 'name': 'final_result', + 'arguments': {'content': 'a'}, + } + ], + }, + { + 'role': 'user', + 'parts': [ + { + 'type': 'tool_call_response', + 'id': IsStr(), + 'name': 'final_result', + 'result': 'Final result processed.', + } + ], + }, + ] + ) + ), + 'gen_ai.system_instructions': '[{"type": "text", "content": "Here are some instructions"}]', + 'logfire.json_schema': IsJson( + snapshot( + { + 'type': 'object', + 'properties': { + 'pydantic_ai.all_messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, + 'final_result': {'type': 'object'}, + }, + } + ) + ), + } + ) + + assert chat_span_attributes['gen_ai.input.messages'] == IsJson( + snapshot([{'role': 'user', 'parts': [{'type': 'text', 'content': 'Hello'}]}]) + ) + assert chat_span_attributes['gen_ai.output.messages'] == IsJson( + snapshot( + [ + { + 'role': 'assistant', + 'parts': [ + { + 'type': 'tool_call', + 'id': IsStr(), + 'name': 'final_result', + 'arguments': {'content': 'a'}, + } + ], + } + ] + ) + ) + + +@pytest.mark.skipif(not logfire_installed, reason='logfire not installed') +@pytest.mark.parametrize( + 'instrument', + [InstrumentationSettings(version=1), InstrumentationSettings(version=2), InstrumentationSettings(version=3)], +) +def test_dynamic_function_instructions_in_agent_run_span( + get_logfire_summary: Callable[[], LogfireSummary], instrument: InstrumentationSettings +) -> None: + @dataclass + class MyOutput: + content: str + + my_agent = Agent(model=TestModel(), instrument=instrument) + + @my_agent.instructions + def instructions(ctx: RunContext[None]): + return f'This is step {ctx.run_step + 1}' + + @my_agent.tool_plain + def my_tool() -> str: + return 'This is a tool call' + + result = my_agent.run_sync('Hello', output_type=MyOutput) + assert result.output == MyOutput(content='a') + + summary = get_logfire_summary() + chat_span_attributes = summary.attributes[1] + if instrument.version == 1: + assert summary.attributes[0] == snapshot( + { + 'model_name': 'test', + 'agent_name': 'my_agent', + 'gen_ai.agent.name': 'my_agent', + 'logfire.msg': 'my_agent run', + 'logfire.span_type': 'span', + 'gen_ai.usage.input_tokens': 107, + 'gen_ai.usage.output_tokens': 9, + 'all_messages_events': IsJson( + snapshot( + [ + { + 'content': 'This is step 2', + 'role': 'system', + 'event.name': 'gen_ai.system.message', + }, + { + 'content': 'Hello', + 'role': 'user', + 'gen_ai.message.index': 0, + 'event.name': 'gen_ai.user.message', + }, + { + 'role': 'assistant', + 'tool_calls': [ + { + 'id': 'pyd_ai_tool_call_id__my_tool', + 'type': 'function', + 'function': {'name': 'my_tool', 'arguments': {}}, + } + ], + 'gen_ai.message.index': 1, + 'event.name': 'gen_ai.assistant.message', + }, + { + 'content': 'This is a tool call', + 'role': 'tool', + 'id': 'pyd_ai_tool_call_id__my_tool', + 'name': 'my_tool', + 'gen_ai.message.index': 2, + 'event.name': 'gen_ai.tool.message', + }, + { + 'role': 'assistant', + 'tool_calls': [ + { + 'id': 'pyd_ai_tool_call_id__final_result', + 'type': 'function', + 'function': {'name': 'final_result', 'arguments': {'content': 'a'}}, + } + ], + 'gen_ai.message.index': 3, + 'event.name': 'gen_ai.assistant.message', + }, + { + 'content': 'Final result processed.', + 'role': 'tool', + 'id': 'pyd_ai_tool_call_id__final_result', + 'name': 'final_result', + 'gen_ai.message.index': 4, + 'event.name': 'gen_ai.tool.message', + }, + ] + ) + ), + 'final_result': '{"content": "a"}', + 'logfire.json_schema': IsJson( + snapshot( + { + 'type': 'object', + 'properties': { + 'all_messages_events': {'type': 'array'}, + 'final_result': {'type': 'object'}, + }, + } + ) + ), + } + ) + + assert chat_span_attributes['events'] == IsJson( + snapshot( + [ + { + 'content': 'This is step 1', + 'role': 'system', + 'gen_ai.system': 'test', + 'event.name': 'gen_ai.system.message', + }, + { + 'event.name': 'gen_ai.user.message', + 'content': 'Hello', + 'role': 'user', + 'gen_ai.message.index': 0, + 'gen_ai.system': 'test', + }, + { + 'event.name': 'gen_ai.choice', + 'index': 0, + 'message': { + 'role': 'assistant', + 'tool_calls': [ + { + 'id': IsStr(), + 'type': 'function', + 'function': {'name': 'my_tool', 'arguments': {}}, + } + ], + }, + 'gen_ai.system': 'test', + }, + ] + ) + ) + else: + if instrument.version == 2: + assert summary.traces == snapshot( + [ + { + 'id': 0, + 'name': 'agent run', + 'message': 'my_agent run', + 'children': [ + {'id': 1, 'name': 'chat test', 'message': 'chat test'}, + { + 'id': 2, + 'name': 'running tools', + 'message': 'running 1 tool', + 'children': [{'id': 3, 'name': 'running tool', 'message': 'running tool: my_tool'}], + }, + {'id': 4, 'name': 'chat test', 'message': 'chat test'}, + ], + } + ] + ) + else: + assert summary.traces == snapshot( + [ + { + 'id': 0, + 'name': 'invoke_agent my_agent', + 'message': 'my_agent run', + 'children': [ + {'id': 1, 'name': 'chat test', 'message': 'chat test'}, + { + 'id': 2, + 'name': 'running tools', + 'message': 'running 1 tool', + 'children': [ + {'id': 3, 'name': 'execute_tool my_tool', 'message': 'running tool: my_tool'} + ], + }, + {'id': 4, 'name': 'chat test', 'message': 'chat test'}, + ], + } + ] + ) + + assert summary.attributes[0] == snapshot( + { + 'model_name': 'test', + 'agent_name': 'my_agent', + 'gen_ai.agent.name': 'my_agent', + 'logfire.msg': 'my_agent run', + 'logfire.span_type': 'span', + 'final_result': '{"content": "a"}', + 'gen_ai.usage.input_tokens': 107, + 'gen_ai.usage.output_tokens': 9, + 'pydantic_ai.all_messages': IsJson( + snapshot( + [ + {'role': 'user', 'parts': [{'type': 'text', 'content': 'Hello'}]}, + { + 'role': 'assistant', + 'parts': [ + { + 'type': 'tool_call', + 'id': 'pyd_ai_tool_call_id__my_tool', + 'name': 'my_tool', + 'arguments': {}, + } + ], + }, + { + 'role': 'user', + 'parts': [ + { + 'type': 'tool_call_response', + 'id': 'pyd_ai_tool_call_id__my_tool', + 'name': 'my_tool', + 'result': 'This is a tool call', + } + ], + }, + { + 'role': 'assistant', + 'parts': [ + { + 'type': 'tool_call', + 'id': IsStr(), + 'name': 'final_result', + 'arguments': {'content': 'a'}, + } + ], + }, + { + 'role': 'user', + 'parts': [ + { + 'type': 'tool_call_response', + 'id': IsStr(), + 'name': 'final_result', + 'result': 'Final result processed.', + } + ], + }, + ] + ) + ), + 'gen_ai.system_instructions': '[{"type": "text", "content": "This is step 2"}]', + 'pydantic_ai.variable_instructions': True, + 'logfire.json_schema': IsJson( + snapshot( + { + 'type': 'object', + 'properties': { + 'pydantic_ai.all_messages': {'type': 'array'}, + 'gen_ai.system_instructions': {'type': 'array'}, + 'pydantic_ai.variable_instructions': {}, + 'final_result': {'type': 'object'}, + }, + } + ) + ), + } + ) + + assert chat_span_attributes['gen_ai.input.messages'] == IsJson( + snapshot([{'role': 'user', 'parts': [{'type': 'text', 'content': 'Hello'}]}]) + ) + assert chat_span_attributes['gen_ai.output.messages'] == IsJson( + snapshot( + [ + { + 'role': 'assistant', + 'parts': [ + { + 'type': 'tool_call', + 'id': IsStr(), + 'name': 'my_tool', + 'arguments': {}, + } + ], + } + ] + ) + ) + + +@pytest.mark.skipif(not logfire_installed, reason='logfire not installed') +@pytest.mark.parametrize( + 'instrument', + [InstrumentationSettings(version=1), InstrumentationSettings(version=2), InstrumentationSettings(version=3)], +) +def test_function_instructions_with_history_in_agent_run_span( + get_logfire_summary: Callable[[], LogfireSummary], instrument: InstrumentationSettings +) -> None: + @dataclass + class MyOutput: + content: str + + my_agent = Agent(model=TestModel(), instrument=instrument) + + @my_agent.instructions + def instructions(ctx: RunContext[None]): + return 'Instructions for the current agent run' + + result = my_agent.run_sync( + 'Hello', + message_history=[ + ModelRequest(parts=[UserPromptPart(content='Hi')], instructions='Instructions from a previous agent run'), + ModelResponse(parts=[TextPart(content='Hello')]), + ], + output_type=MyOutput, + ) + assert result.output == MyOutput(content='a') + + summary = get_logfire_summary() + chat_span_attributes = summary.attributes[1] + if instrument.version == 1: + assert summary.attributes[0] == snapshot( + { + 'model_name': 'test', + 'agent_name': 'my_agent', + 'gen_ai.agent.name': 'my_agent', + 'logfire.msg': 'my_agent run', + 'logfire.span_type': 'span', + 'gen_ai.usage.input_tokens': 52, + 'gen_ai.usage.output_tokens': 6, + 'all_messages_events': IsJson( + snapshot( + [ + { + 'content': 'Instructions for the current agent run', + 'role': 'system', + 'event.name': 'gen_ai.system.message', + }, + { + 'content': 'Hi', + 'role': 'user', + 'gen_ai.message.index': 0, + 'event.name': 'gen_ai.user.message', + }, + { + 'role': 'assistant', + 'content': 'Hello', + 'gen_ai.message.index': 1, + 'event.name': 'gen_ai.assistant.message', + }, + { + 'content': 'Hello', + 'role': 'user', + 'gen_ai.message.index': 2, + 'event.name': 'gen_ai.user.message', + }, + { + 'role': 'assistant', + 'tool_calls': [ + { + 'id': 'pyd_ai_tool_call_id__final_result', + 'type': 'function', + 'function': {'name': 'final_result', 'arguments': {'content': 'a'}}, + } + ], + 'gen_ai.message.index': 3, + 'event.name': 'gen_ai.assistant.message', + }, + { + 'content': 'Final result processed.', + 'role': 'tool', + 'id': 'pyd_ai_tool_call_id__final_result', + 'name': 'final_result', + 'gen_ai.message.index': 4, + 'event.name': 'gen_ai.tool.message', + }, + ] + ) + ), + 'final_result': '{"content": "a"}', + 'logfire.json_schema': IsJson( + snapshot( + { + 'type': 'object', + 'properties': { + 'all_messages_events': {'type': 'array'}, + 'final_result': {'type': 'object'}, + }, + } + ) + ), + } + ) + + assert chat_span_attributes['events'] == IsJson( + snapshot( + [ + { + 'content': 'Instructions for the current agent run', + 'role': 'system', + 'gen_ai.system': 'test', + 'event.name': 'gen_ai.system.message', + }, + { + 'content': 'Hi', + 'role': 'user', + 'gen_ai.system': 'test', + 'gen_ai.message.index': 0, + 'event.name': 'gen_ai.user.message', + }, + { + 'role': 'assistant', + 'content': 'Hello', + 'gen_ai.system': 'test', + 'gen_ai.message.index': 1, + 'event.name': 'gen_ai.assistant.message', + }, + { + 'content': 'Hello', + 'role': 'user', + 'gen_ai.system': 'test', + 'gen_ai.message.index': 2, + 'event.name': 'gen_ai.user.message', + }, + { + 'event.name': 'gen_ai.choice', + 'index': 0, + 'message': { + 'role': 'assistant', + 'tool_calls': [ + { + 'id': IsStr(), + 'type': 'function', + 'function': {'name': 'final_result', 'arguments': {'content': 'a'}}, + } + ], + }, + 'gen_ai.system': 'test', + }, + ] + ) + ) + else: + if instrument.version == 2: + assert summary.traces == snapshot( + [ + { + 'id': 0, + 'name': 'agent run', + 'message': 'my_agent run', + 'children': [{'id': 1, 'name': 'chat test', 'message': 'chat test'}], + } + ] + ) + else: + assert summary.traces == snapshot( + [ + { + 'id': 0, + 'name': 'invoke_agent my_agent', + 'message': 'my_agent run', + 'children': [{'id': 1, 'name': 'chat test', 'message': 'chat test'}], + } + ] + ) + + assert summary.attributes[0] == snapshot( + { + 'model_name': 'test', + 'agent_name': 'my_agent', + 'gen_ai.agent.name': 'my_agent', + 'logfire.msg': 'my_agent run', + 'logfire.span_type': 'span', + 'final_result': '{"content": "a"}', + 'gen_ai.usage.input_tokens': 52, + 'gen_ai.usage.output_tokens': 6, + 'pydantic_ai.all_messages': IsJson( + snapshot( + [ + {'role': 'user', 'parts': [{'type': 'text', 'content': 'Hi'}]}, + {'role': 'assistant', 'parts': [{'type': 'text', 'content': 'Hello'}]}, + {'role': 'user', 'parts': [{'type': 'text', 'content': 'Hello'}]}, + { + 'role': 'assistant', + 'parts': [ + { + 'type': 'tool_call', + 'id': IsStr(), + 'name': 'final_result', + 'arguments': {'content': 'a'}, + } + ], + }, + { + 'role': 'user', + 'parts': [ + { + 'type': 'tool_call_response', + 'id': IsStr(), + 'name': 'final_result', + 'result': 'Final result processed.', + } + ], + }, + ] + ) + ), + 'pydantic_ai.new_message_index': 2, + 'gen_ai.system_instructions': '[{"type": "text", "content": "Instructions for the current agent run"}]', + 'logfire.json_schema': IsJson( + snapshot( + { + 'type': 'object', + 'properties': { + 'pydantic_ai.all_messages': {'type': 'array'}, + 'pydantic_ai.new_message_index': {}, + 'gen_ai.system_instructions': {'type': 'array'}, + 'final_result': {'type': 'object'}, + }, + } + ) + ), + } + ) + + assert chat_span_attributes['gen_ai.input.messages'] == IsJson( + snapshot( + [ + {'role': 'user', 'parts': [{'type': 'text', 'content': 'Hi'}]}, + {'role': 'assistant', 'parts': [{'type': 'text', 'content': 'Hello'}]}, + {'role': 'user', 'parts': [{'type': 'text', 'content': 'Hello'}]}, + ] + ) + ) + assert chat_span_attributes['gen_ai.output.messages'] == IsJson( + snapshot( + [ + { + 'role': 'assistant', + 'parts': [ + { + 'type': 'tool_call', + 'id': IsStr(), + 'name': 'final_result', + 'arguments': {'content': 'a'}, + } + ], + } + ] + ) + )