Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 10 additions & 2 deletions pydantic_ai_slim/pydantic_ai/_agent_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -588,7 +588,11 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa
# as the empty response and request will not create any items in the API payload,
# in the hope the model will return a non-empty response this time.
ctx.state.increment_retries(ctx.deps.max_result_retries, model_settings=ctx.deps.model_settings)
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[]))
run_context = build_run_context(ctx)
instructions = await ctx.deps.get_instructions(run_context)
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
_messages.ModelRequest(parts=[], instructions=instructions)
)
return

text = ''
Expand Down Expand Up @@ -652,7 +656,11 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa
ctx.state.increment_retries(
ctx.deps.max_result_retries, error=e, model_settings=ctx.deps.model_settings
)
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
run_context = build_run_context(ctx)
instructions = await ctx.deps.get_instructions(run_context)
self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
_messages.ModelRequest(parts=[e.tool_retry], instructions=instructions)
)

self._events_iterator = _run_stream()

Expand Down
35 changes: 34 additions & 1 deletion tests/test_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -1836,7 +1836,14 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse:
tool_call_id=IsStr(),
timestamp=IsDatetime(),
)
]
],
instructions="""\
Always respond with a JSON object that's compatible with this schema:

{"additionalProperties": false, "properties": {"city": {"type": "string"}}, "required": ["city"], "type": "object", "title": "get_weather"}

Don't include any text or Markdown fencing before or after.\
""",
),
ModelResponse(
parts=[TextPart(content='{"city": "Mexico City"}')],
Expand Down Expand Up @@ -3852,6 +3859,32 @@ def empty_instructions() -> str:
)


def test_multi_agent_instructions_with_structured_output():
"""Test that Agent2 uses its own instructions when called with Agent1's history.

Reproduces issue #3207: when running agents sequentially with no user_prompt
and structured output, Agent2's instructions were ignored.
"""

class Output(BaseModel):
text: str

agent1 = Agent('test', instructions='Agent 1 instructions')
agent2 = Agent('test', instructions='Agent 2 instructions', output_type=Output)

result1 = agent1.run_sync('Hello')

# TestModel doesn't support structured output, so this will fail with retries
# But we can still verify that Agent2's instructions are used in retry requests
with capture_run_messages() as messages:
with pytest.raises(UnexpectedModelBehavior):
agent2.run_sync(message_history=result1.new_messages())

# Verify Agent2's retry requests used Agent2's instructions (not Agent1's)
requests = [m for m in messages if isinstance(m, ModelRequest)]
assert any(r.instructions == 'Agent 2 instructions' for r in requests)


def test_empty_final_response():
def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse:
if len(messages) == 1:
Expand Down