diff --git a/docs/deferred-tools.md b/docs/deferred-tools.md index 4a50027cf7..31e14149c0 100644 --- a/docs/deferred-tools.md +++ b/docs/deferred-tools.md @@ -93,10 +93,20 @@ for call in requests.approvals: results.approvals[call.tool_call_id] = result -result = agent.run_sync(message_history=messages, deferred_tool_results=results) +result = agent.run_sync( + 'Now create a backup of README.md', # (2)! + message_history=messages, + deferred_tool_results=results, +) print(result.output) """ -I successfully updated `README.md` and cleared `.env`, but was not able to delete `__init__.py`. +Here's what I've done: +- Attempted to delete __init__.py, but deletion is not allowed. +- Updated README.md with: Hello, world! +- Cleared .env (set to empty). +- Created a backup at README.md.bak containing: Hello, world! + +If you want a different backup name or format (e.g., timestamped like README_2025-11-24.bak), let me know. """ print(result.all_messages()) """ @@ -158,16 +168,44 @@ print(result.all_messages()) tool_call_id='delete_file', timestamp=datetime.datetime(...), ), + UserPromptPart( + content='Now create a backup of README.md', + timestamp=datetime.datetime(...), + ), + ], + run_id='...', + ), + ModelResponse( + parts=[ + ToolCallPart( + tool_name='update_file', + args={'path': 'README.md.bak', 'content': 'Hello, world!'}, + tool_call_id='update_file_backup', + ) + ], + usage=RequestUsage(input_tokens=86, output_tokens=31), + model_name='gpt-5', + timestamp=datetime.datetime(...), + run_id='...', + ), + ModelRequest( + parts=[ + ToolReturnPart( + tool_name='update_file', + content="File 'README.md.bak' updated: 'Hello, world!'", + tool_call_id='update_file_backup', + timestamp=datetime.datetime(...), + ) ], run_id='...', ), ModelResponse( parts=[ TextPart( - content='I successfully updated `README.md` and cleared `.env`, but was not able to delete `__init__.py`.' + content="Here's what I've done:\n- Attempted to delete __init__.py, but deletion is not allowed.\n- Updated README.md with: Hello, world!\n- Cleared .env (set to empty).\n- Created a backup at README.md.bak containing: Hello, world!\n\nIf you want a different backup name or format (e.g., timestamped like README_2025-11-24.bak), let me know." ) ], - usage=RequestUsage(input_tokens=79, output_tokens=39), + usage=RequestUsage(input_tokens=93, output_tokens=89), model_name='gpt-5', timestamp=datetime.datetime(...), run_id='...', @@ -177,6 +215,7 @@ print(result.all_messages()) ``` 1. The optional `metadata` parameter can attach arbitrary context to deferred tool calls, accessible in `DeferredToolRequests.metadata` keyed by `tool_call_id`. +2. This second agent run continues from where the first run left off, providing the tool approval results and optionally a new `user_prompt` to give the model additional instructions alongside the deferred results. _(This example is complete, it can be run "as is")_ diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index c973befc70..186a386e4a 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -307,10 +307,6 @@ async def _handle_deferred_tool_results( # noqa: C901 raise exceptions.UserError( 'Tool call results were provided, but the message history does not contain any unprocessed tool calls.' ) - if self.user_prompt is not None: - raise exceptions.UserError( - 'Cannot provide a new user prompt when the message history contains unprocessed tool calls.' - ) tool_call_results: dict[str, DeferredToolResult | Literal['skip']] | None = None tool_call_results = {} @@ -338,7 +334,9 @@ async def _handle_deferred_tool_results( # noqa: C901 tool_call_results[part.tool_call_id] = 'skip' # Skip ModelRequestNode and go directly to CallToolsNode - return CallToolsNode[DepsT, NodeRunEndT](last_model_response, tool_call_results=tool_call_results) + return CallToolsNode[DepsT, NodeRunEndT]( + last_model_response, tool_call_results=tool_call_results, user_prompt=self.user_prompt + ) async def _reevaluate_dynamic_prompts( self, messages: list[_messages.ModelMessage], run_context: RunContext[DepsT] @@ -543,6 +541,13 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]): model_response: _messages.ModelResponse tool_call_results: dict[str, DeferredToolResult | Literal['skip']] | None = None + user_prompt: str | Sequence[_messages.UserContent] | None = None + """Optional user prompt to include alongside tool call results. + + This prompt is only sent to the model when the `model_response` contains tool calls. + If the `model_response` has final output instead, this user prompt is ignored. + The user prompt will be appended after all tool return parts in the next model request. + """ _events_iterator: AsyncIterator[_messages.HandleResponseEvent] | None = field(default=None, init=False, repr=False) _next_node: ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]] | None = field( @@ -723,6 +728,10 @@ async def _handle_tool_calls( final_result = output_final_result[0] self._next_node = self._handle_final_result(ctx, final_result, output_parts) else: + # Add user prompt if provided, after all tool return parts + if self.user_prompt is not None: + output_parts.append(_messages.UserPromptPart(self.user_prompt)) + instructions = await ctx.deps.get_instructions(run_context) self._next_node = ModelRequestNode[DepsT, NodeRunEndT]( _messages.ModelRequest(parts=output_parts, instructions=instructions) diff --git a/tests/test_agent.py b/tests/test_agent.py index 8cc6b8b38c..c912334434 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -5583,15 +5583,6 @@ async def test_run_with_deferred_tool_results_errors(): deferred_tool_results=DeferredToolResults(approvals={'create_file': True}), ) - with pytest.raises( - UserError, match='Cannot provide a new user prompt when the message history contains unprocessed tool calls.' - ): - await agent.run( - 'Hello again', - message_history=message_history, - deferred_tool_results=DeferredToolResults(approvals={'create_file': True}), - ) - message_history: list[ModelMessage] = [ ModelRequest(parts=[UserPromptPart(content='Hello')]), ModelResponse( @@ -5628,6 +5619,103 @@ async def test_run_with_deferred_tool_results_errors(): ) +async def test_user_prompt_with_deferred_tool_results(): + """Test that user_prompt can be provided alongside deferred_tool_results.""" + from pydantic_ai.exceptions import ApprovalRequired + + def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + # First call: model requests tool approval + if len(messages) == 1: + return ModelResponse( + parts=[ + ToolCallPart( + tool_name='update_file', tool_call_id='update_file_1', args={'path': '.env', 'content': ''} + ), + ] + ) + # Second call: model responds to tool results and user prompt + else: + # Verify we received both tool results and user prompt + last_request = messages[-1] + assert isinstance(last_request, ModelRequest) + has_tool_return = any(isinstance(p, ToolReturnPart) for p in last_request.parts) + has_user_prompt = any(isinstance(p, UserPromptPart) for p in last_request.parts) + assert has_tool_return, 'Expected tool return part in request' + assert has_user_prompt, 'Expected user prompt part in request' + + # Get user prompt content + user_prompt_content = next(p.content for p in last_request.parts if isinstance(p, UserPromptPart)) + return ModelResponse(parts=[TextPart(f'Approved and {user_prompt_content}')]) + + agent = Agent(FunctionModel(llm), output_type=[str, DeferredToolRequests]) + + @agent.tool + def update_file(ctx: RunContext, path: str, content: str) -> str: + if path == '.env' and not ctx.tool_call_approved: + raise ApprovalRequired + return f'File {path!r} updated' + + # First run: get deferred tool requests + result = await agent.run('Update .env file') + assert isinstance(result.output, DeferredToolRequests) + assert len(result.output.approvals) == 1 + + messages = result.all_messages() + # Snapshot the message history after first run to show the state before deferred tool results + assert messages == snapshot( + [ + ModelRequest( + parts=[UserPromptPart(content='Update .env file', timestamp=IsDatetime())], + run_id=IsStr(), + ), + ModelResponse( + parts=[ + ToolCallPart( + tool_name='update_file', tool_call_id='update_file_1', args={'path': '.env', 'content': ''} + ) + ], + usage=RequestUsage(input_tokens=53, output_tokens=6), + model_name='function:llm:', + timestamp=IsDatetime(), + run_id=IsStr(), + ), + ] + ) + + # Second run: provide approvals AND user prompt + results = DeferredToolResults(approvals={result.output.approvals[0].tool_call_id: True}) + result2 = await agent.run('continue with the operation', message_history=messages, deferred_tool_results=results) + + assert isinstance(result2.output, str) + assert 'continue with the operation' in result2.output + + # Snapshot the new messages to show how tool results and user prompt are combined + new_messages = result2.new_messages() + assert new_messages == snapshot( + [ + ModelRequest( + parts=[ + ToolReturnPart( + tool_name='update_file', + content="File '.env' updated", + tool_call_id='update_file_1', + timestamp=IsDatetime(), + ), + UserPromptPart(content='continue with the operation', timestamp=IsDatetime()), + ], + run_id=IsStr(), + ), + ModelResponse( + parts=[TextPart(content='Approved and continue with the operation')], + usage=RequestUsage(input_tokens=61, output_tokens=12), + model_name='function:llm:', + timestamp=IsDatetime(), + run_id=IsStr(), + ), + ] + ) + + def test_tool_requires_approval_error(): agent = Agent('test') diff --git a/tests/test_examples.py b/tests/test_examples.py index 407816b60a..ff87b7a7fa 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -874,6 +874,30 @@ async def model_logic( # noqa: C901 ) ] ) + elif isinstance(m, UserPromptPart) and m.content == 'Now create a backup of README.md': + return ModelResponse( + parts=[ + ToolCallPart( + tool_name='update_file', + args={'path': 'README.md.bak', 'content': 'Hello, world!'}, + tool_call_id='update_file_backup', + ) + ], + ) + elif isinstance(m, ToolReturnPart) and m.tool_name == 'update_file' and 'README.md.bak' in m.content: + return ModelResponse( + parts=[ + TextPart( + "Here's what I've done:\n" + '- Attempted to delete __init__.py, but deletion is not allowed.\n' + '- Updated README.md with: Hello, world!\n' + '- Cleared .env (set to empty).\n' + '- Created a backup at README.md.bak containing: Hello, world!\n' + '\n' + 'If you want a different backup name or format (e.g., timestamped like README_2025-11-24.bak), let me know.' + ) + ], + ) elif isinstance(m, ToolReturnPart) and m.tool_name == 'calculate_answer': return ModelResponse( parts=[TextPart('The answer to the ultimate question of life, the universe, and everything is 42.')] diff --git a/tests/test_tools.py b/tests/test_tools.py index f65105b4e6..bcdf537994 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1396,6 +1396,59 @@ def my_tool(ctx: RunContext[None], x: int) -> int: assert result.output == snapshot('Done!') +def test_approval_required_with_user_prompt(): + """Test that user_prompt can be provided alongside deferred_tool_results for approval.""" + + def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + if len(messages) == 1: + # First call: request approval + return ModelResponse( + parts=[ + ToolCallPart('my_tool', {'x': 1}, tool_call_id='my_tool'), + ] + ) + else: + # Second call: respond to both tool result and user prompt + last_request = messages[-1] + assert isinstance(last_request, ModelRequest) + + # Verify we received both tool return and user prompt + has_tool_return = any(isinstance(p, ToolReturnPart) for p in last_request.parts) + has_user_prompt = any(isinstance(p, UserPromptPart) for p in last_request.parts) + assert has_tool_return, 'Expected tool return in request' + assert has_user_prompt, 'Expected user prompt in request' + + # Get user prompt content + user_prompt = next(p.content for p in last_request.parts if isinstance(p, UserPromptPart)) + return ModelResponse(parts=[TextPart(f'Tool executed and {user_prompt}')]) + + agent = Agent(FunctionModel(llm), output_type=[str, DeferredToolRequests]) + + @agent.tool + def my_tool(ctx: RunContext[None], x: int) -> int: + if not ctx.tool_call_approved: + raise ApprovalRequired + return x * 42 + + # First run: get approval request + result = agent.run_sync('Hello') + messages = result.all_messages() + assert isinstance(result.output, DeferredToolRequests) + assert len(result.output.approvals) == 1 + + # Second run: provide approval AND user prompt + result = agent.run_sync( + user_prompt='continue with extra instructions', + message_history=messages, + deferred_tool_results=DeferredToolResults(approvals={'my_tool': True}), + ) + + # Verify the response includes both tool result and user prompt + assert isinstance(result.output, str) + assert 'continue with extra instructions' in result.output + assert 'Tool executed' in result.output + + def test_call_deferred_with_metadata(): """Test that CallDeferred exception can carry metadata.""" agent = Agent(TestModel(), output_type=[str, DeferredToolRequests])