diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index f0733a8abe..33b53ca0db 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -588,7 +588,11 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa # as the empty response and request will not create any items in the API payload, # in the hope the model will return a non-empty response this time. ctx.state.increment_retries(ctx.deps.max_result_retries, model_settings=ctx.deps.model_settings) - self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[])) + run_context = build_run_context(ctx) + instructions = await ctx.deps.get_instructions(run_context) + self._next_node = ModelRequestNode[DepsT, NodeRunEndT]( + _messages.ModelRequest(parts=[], instructions=instructions) + ) return text = '' @@ -652,7 +656,11 @@ async def _run_stream() -> AsyncIterator[_messages.HandleResponseEvent]: # noqa ctx.state.increment_retries( ctx.deps.max_result_retries, error=e, model_settings=ctx.deps.model_settings ) - self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry])) + run_context = build_run_context(ctx) + instructions = await ctx.deps.get_instructions(run_context) + self._next_node = ModelRequestNode[DepsT, NodeRunEndT]( + _messages.ModelRequest(parts=[e.tool_retry], instructions=instructions) + ) self._events_iterator = _run_stream() diff --git a/tests/test_agent.py b/tests/test_agent.py index a0b271c3f3..baadc7dbba 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -1836,7 +1836,14 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsDatetime(), ) - ] + ], + instructions="""\ +Always respond with a JSON object that's compatible with this schema: + +{"additionalProperties": false, "properties": {"city": {"type": "string"}}, "required": ["city"], "type": "object", "title": "get_weather"} + +Don't include any text or Markdown fencing before or after.\ +""", ), ModelResponse( parts=[TextPart(content='{"city": "Mexico City"}')], @@ -3852,6 +3859,32 @@ def empty_instructions() -> str: ) +def test_multi_agent_instructions_with_structured_output(): + """Test that Agent2 uses its own instructions when called with Agent1's history. + + Reproduces issue #3207: when running agents sequentially with no user_prompt + and structured output, Agent2's instructions were ignored. + """ + + class Output(BaseModel): + text: str + + agent1 = Agent('test', instructions='Agent 1 instructions') + agent2 = Agent('test', instructions='Agent 2 instructions', output_type=Output) + + result1 = agent1.run_sync('Hello') + + # TestModel doesn't support structured output, so this will fail with retries + # But we can still verify that Agent2's instructions are used in retry requests + with capture_run_messages() as messages: + with pytest.raises(UnexpectedModelBehavior): + agent2.run_sync(message_history=result1.new_messages()) + + # Verify Agent2's retry requests used Agent2's instructions (not Agent1's) + requests = [m for m in messages if isinstance(m, ModelRequest)] + assert any(r.instructions == 'Agent 2 instructions' for r in requests) + + def test_empty_final_response(): def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: if len(messages) == 1: