Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 43 additions & 4 deletions docs/deferred-tools.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,10 +93,20 @@ for call in requests.approvals:

results.approvals[call.tool_call_id] = result

result = agent.run_sync(message_history=messages, deferred_tool_results=results)
result = agent.run_sync(
'Now create a backup of README.md', # (2)!
message_history=messages,
deferred_tool_results=results,
)
print(result.output)
"""
I successfully updated `README.md` and cleared `.env`, but was not able to delete `__init__.py`.
Here's what I've done:
- Attempted to delete __init__.py, but deletion is not allowed.
- Updated README.md with: Hello, world!
- Cleared .env (set to empty).
- Created a backup at README.md.bak containing: Hello, world!

If you want a different backup name or format (e.g., timestamped like README_2025-11-24.bak), let me know.
"""
print(result.all_messages())
"""
Expand Down Expand Up @@ -158,16 +168,44 @@ print(result.all_messages())
tool_call_id='delete_file',
timestamp=datetime.datetime(...),
),
UserPromptPart(
content='Now create a backup of README.md',
timestamp=datetime.datetime(...),
),
],
run_id='...',
),
ModelResponse(
parts=[
ToolCallPart(
tool_name='update_file',
args={'path': 'README.md.bak', 'content': 'Hello, world!'},
tool_call_id='update_file_backup',
)
],
usage=RequestUsage(input_tokens=86, output_tokens=31),
model_name='gpt-5',
timestamp=datetime.datetime(...),
run_id='...',
),
ModelRequest(
parts=[
ToolReturnPart(
tool_name='update_file',
content="File 'README.md.bak' updated: 'Hello, world!'",
tool_call_id='update_file_backup',
timestamp=datetime.datetime(...),
)
],
run_id='...',
),
ModelResponse(
parts=[
TextPart(
content='I successfully updated `README.md` and cleared `.env`, but was not able to delete `__init__.py`.'
content="Here's what I've done:\n- Attempted to delete __init__.py, but deletion is not allowed.\n- Updated README.md with: Hello, world!\n- Cleared .env (set to empty).\n- Created a backup at README.md.bak containing: Hello, world!\n\nIf you want a different backup name or format (e.g., timestamped like README_2025-11-24.bak), let me know."
)
],
usage=RequestUsage(input_tokens=79, output_tokens=39),
usage=RequestUsage(input_tokens=93, output_tokens=89),
model_name='gpt-5',
timestamp=datetime.datetime(...),
run_id='...',
Expand All @@ -177,6 +215,7 @@ print(result.all_messages())
```

1. The optional `metadata` parameter can attach arbitrary context to deferred tool calls, accessible in `DeferredToolRequests.metadata` keyed by `tool_call_id`.
2. This second agent run continues from where the first run left off, providing the tool approval results and optionally a new `user_prompt` to give the model additional instructions alongside the deferred results.

_(This example is complete, it can be run "as is")_

Expand Down
13 changes: 8 additions & 5 deletions pydantic_ai_slim/pydantic_ai/_agent_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,10 +307,6 @@ async def _handle_deferred_tool_results( # noqa: C901
raise exceptions.UserError(
'Tool call results were provided, but the message history does not contain any unprocessed tool calls.'
)
if self.user_prompt is not None:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if we can also remove the error here, now that we have CallToolsNode.user_prompt:

elif isinstance(last_message, _messages.ModelResponse):
if self.user_prompt is None:
run_context = build_run_context(ctx)
instructions = await ctx.deps.get_instructions(run_context)
if not instructions:
# If there's no new prompt or instructions, skip ModelRequestNode and go directly to CallToolsNode
return CallToolsNode[DepsT, NodeRunEndT](last_message)
elif last_message.tool_calls:
raise exceptions.UserError(
'Cannot provide a new user prompt when the message history contains unprocessed tool calls.'
)

raise exceptions.UserError(
'Cannot provide a new user prompt when the message history contains unprocessed tool calls.'
)

tool_call_results: dict[str, DeferredToolResult | Literal['skip']] | None = None
tool_call_results = {}
Expand Down Expand Up @@ -338,7 +334,9 @@ async def _handle_deferred_tool_results( # noqa: C901
tool_call_results[part.tool_call_id] = 'skip'

# Skip ModelRequestNode and go directly to CallToolsNode
return CallToolsNode[DepsT, NodeRunEndT](last_model_response, tool_call_results=tool_call_results)
return CallToolsNode[DepsT, NodeRunEndT](
last_model_response, tool_call_results=tool_call_results, user_prompt=self.user_prompt
)

async def _reevaluate_dynamic_prompts(
self, messages: list[_messages.ModelMessage], run_context: RunContext[DepsT]
Expand Down Expand Up @@ -543,6 +541,7 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):

model_response: _messages.ModelResponse
tool_call_results: dict[str, DeferredToolResult | Literal['skip']] | None = None
user_prompt: str | Sequence[_messages.UserContent] | None = None
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's add a docstring to clarify that this user prompt will only sent to the model along with tool call results if the model response being processed has tool calls. If it had final output, this user prompt is ignored.


_events_iterator: AsyncIterator[_messages.HandleResponseEvent] | None = field(default=None, init=False, repr=False)
_next_node: ModelRequestNode[DepsT, NodeRunEndT] | End[result.FinalResult[NodeRunEndT]] | None = field(
Expand Down Expand Up @@ -723,6 +722,10 @@ async def _handle_tool_calls(
final_result = output_final_result[0]
self._next_node = self._handle_final_result(ctx, final_result, output_parts)
else:
# Add user prompt if provided, after all tool return parts
if self.user_prompt is not None:
output_parts.append(_messages.UserPromptPart(self.user_prompt))

instructions = await ctx.deps.get_instructions(run_context)
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
_messages.ModelRequest(parts=output_parts, instructions=instructions)
Expand Down
60 changes: 51 additions & 9 deletions tests/test_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5583,15 +5583,6 @@ async def test_run_with_deferred_tool_results_errors():
deferred_tool_results=DeferredToolResults(approvals={'create_file': True}),
)

with pytest.raises(
UserError, match='Cannot provide a new user prompt when the message history contains unprocessed tool calls.'
):
await agent.run(
'Hello again',
message_history=message_history,
deferred_tool_results=DeferredToolResults(approvals={'create_file': True}),
)

message_history: list[ModelMessage] = [
ModelRequest(parts=[UserPromptPart(content='Hello')]),
ModelResponse(
Expand Down Expand Up @@ -5628,6 +5619,57 @@ async def test_run_with_deferred_tool_results_errors():
)


async def test_user_prompt_with_deferred_tool_results():
"""Test that user_prompt can be provided alongside deferred_tool_results."""
from pydantic_ai.exceptions import ApprovalRequired

def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse:
# First call: model requests tool approval
if len(messages) == 1:
return ModelResponse(
parts=[
ToolCallPart(
tool_name='update_file', tool_call_id='update_file_1', args={'path': '.env', 'content': ''}
),
]
)
# Second call: model responds to tool results and user prompt
else:
# Verify we received both tool results and user prompt
last_request = messages[-1]
assert isinstance(last_request, ModelRequest)
has_tool_return = any(isinstance(p, ToolReturnPart) for p in last_request.parts)
has_user_prompt = any(isinstance(p, UserPromptPart) for p in last_request.parts)
assert has_tool_return, 'Expected tool return part in request'
assert has_user_prompt, 'Expected user prompt part in request'

# Get user prompt content
user_prompt_content = next(p.content for p in last_request.parts if isinstance(p, UserPromptPart))
return ModelResponse(parts=[TextPart(f'Approved and {user_prompt_content}')])

agent = Agent(FunctionModel(llm), output_type=[str, DeferredToolRequests])

@agent.tool
def update_file(ctx: RunContext, path: str, content: str) -> str:
if path == '.env' and not ctx.tool_call_approved:
raise ApprovalRequired
return f'File {path!r} updated'

# First run: get deferred tool requests
result = await agent.run('Update .env file')
assert isinstance(result.output, DeferredToolRequests)
assert len(result.output.approvals) == 1

messages = result.all_messages()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we include this in the test with a snapshot, as well as new_messages for the next run? I always find it easier to understand what's going on that way.


# Second run: provide approvals AND user prompt
results = DeferredToolResults(approvals={result.output.approvals[0].tool_call_id: True})
result2 = await agent.run('continue with the operation', message_history=messages, deferred_tool_results=results)

assert isinstance(result2.output, str)
assert 'continue with the operation' in result2.output


def test_tool_requires_approval_error():
agent = Agent('test')

Expand Down
24 changes: 24 additions & 0 deletions tests/test_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -874,6 +874,30 @@ async def model_logic( # noqa: C901
)
]
)
elif isinstance(m, UserPromptPart) and m.content == 'Now create a backup of README.md':
return ModelResponse(
parts=[
ToolCallPart(
tool_name='update_file',
args={'path': 'README.md.bak', 'content': 'Hello, world!'},
tool_call_id='update_file_backup',
)
],
)
elif isinstance(m, ToolReturnPart) and m.tool_name == 'update_file' and 'README.md.bak' in m.content:
return ModelResponse(
parts=[
TextPart(
"Here's what I've done:\n"
'- Attempted to delete __init__.py, but deletion is not allowed.\n'
'- Updated README.md with: Hello, world!\n'
'- Cleared .env (set to empty).\n'
'- Created a backup at README.md.bak containing: Hello, world!\n'
'\n'
'If you want a different backup name or format (e.g., timestamped like README_2025-11-24.bak), let me know.'
)
],
)
elif isinstance(m, ToolReturnPart) and m.tool_name == 'calculate_answer':
return ModelResponse(
parts=[TextPart('The answer to the ultimate question of life, the universe, and everything is 42.')]
Expand Down
53 changes: 53 additions & 0 deletions tests/test_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -1396,6 +1396,59 @@ def my_tool(ctx: RunContext[None], x: int) -> int:
assert result.output == snapshot('Done!')


def test_approval_required_with_user_prompt():
"""Test that user_prompt can be provided alongside deferred_tool_results for approval."""

def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse:
if len(messages) == 1:
# First call: request approval
return ModelResponse(
parts=[
ToolCallPart('my_tool', {'x': 1}, tool_call_id='my_tool'),
]
)
else:
# Second call: respond to both tool result and user prompt
last_request = messages[-1]
assert isinstance(last_request, ModelRequest)

# Verify we received both tool return and user prompt
has_tool_return = any(isinstance(p, ToolReturnPart) for p in last_request.parts)
has_user_prompt = any(isinstance(p, UserPromptPart) for p in last_request.parts)
assert has_tool_return, 'Expected tool return in request'
assert has_user_prompt, 'Expected user prompt in request'

# Get user prompt content
user_prompt = next(p.content for p in last_request.parts if isinstance(p, UserPromptPart))
return ModelResponse(parts=[TextPart(f'Tool executed and {user_prompt}')])

agent = Agent(FunctionModel(llm), output_type=[str, DeferredToolRequests])

@agent.tool
def my_tool(ctx: RunContext[None], x: int) -> int:
if not ctx.tool_call_approved:
raise ApprovalRequired
return x * 42

# First run: get approval request
result = agent.run_sync('Hello')
messages = result.all_messages()
assert isinstance(result.output, DeferredToolRequests)
assert len(result.output.approvals) == 1

# Second run: provide approval AND user prompt
result = agent.run_sync(
user_prompt='continue with extra instructions',
message_history=messages,
deferred_tool_results=DeferredToolResults(approvals={'my_tool': True}),
)

# Verify the response includes both tool result and user prompt
assert isinstance(result.output, str)
assert 'continue with extra instructions' in result.output
assert 'Tool executed' in result.output


def test_call_deferred_with_metadata():
"""Test that CallDeferred exception can carry metadata."""
agent = Agent(TestModel(), output_type=[str, DeferredToolRequests])
Expand Down