Skip to content

UnexpectedModelBehavior with Gemini - finish_reason == 'stop' and candidate.content.parts is empty #3289

@amiyapatanaik

Description

@amiyapatanaik

Initial Checks

Description

My Gemini based agent consistently triggers this exception. It always happens after all tool calls are complete successfully and only in the last step it fails with UnexpectedModelBehavior : Content field missing from Gemini response, body. The full logtrace is shown below:

Traceback (most recent call last):
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/opentelemetry/trace/__init__.py", line 589, in use_span
    yield span
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_graph/beta/graph.py", line 254, in iter
    yield graph_run
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_ai/agent/__init__.py", line 662, in iter
    yield agent_run
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_ai/agent/abstract.py", line 235, in run
    async for node in agent_run:
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_ai/run.py", line 148, in __anext__
    task = await anext(self._graph_run)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_graph/beta/graph.py", line 410, in __anext__
    self._next = await self._iterator.asend(self._next)
                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_graph/beta/graph.py", line 497, in iter_graph
    with _unwrap_exception_groups():
         ^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/contextlib.py", line 158, in __exit__
    self.gen.throw(value)
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_graph/beta/graph.py", line 866, in _unwrap_exception_groups
    raise exception
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_graph/beta/graph.py", line 638, in _run_tracked_task
    result = await self._run_task(t_)
             ^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_graph/beta/graph.py", line 667, in _run_task
    output = await node.call(step_context)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_graph/beta/step.py", line 253, in _call_node
    return await node.run(GraphRunContext(state=ctx.state, deps=ctx.deps))
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_ai/_agent_graph.py", line 417, in run
    return await self._make_request(ctx)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_ai/_agent_graph.py", line 459, in _make_request
    model_response = await ctx.deps.model.request(message_history, model_settings, model_request_parameters)
                     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_ai/models/instrumented.py", line 362, in request
    response = await self.wrapped.request(messages, model_settings, model_request_parameters)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_ai/models/google.py", line 240, in request
    return self._process_response(response)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/amiya/Codebase/nexdoc/code/viz-agent/.venv/lib/python3.12/site-packages/pydantic_ai/models/google.py", line 475, in _process_response
    raise UnexpectedModelBehavior(
pydantic_ai.exceptions.UnexpectedModelBehavior: Content field missing from Gemini response, body:
{
  "sdk_http_response": {
    "headers": {
      "content-type": "application/json; charset=UTF-8",
      "vary": "Origin, X-Origin, Referer",
      "content-encoding": "gzip",
      "date": "Thu, 30 Oct 2025 02:37:24 GMT",
      "server": "scaffolding on HTTPServer2",
      "x-xss-protection": "0",
      "x-frame-options": "SAMEORIGIN",
      "x-content-type-options": "nosniff",
      "server-timing": "gfet4t7; dur=2480",
      "alt-svc": "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000",
      "transfer-encoding": "chunked"
    },
    "body": null
  },
  "candidates": [
    {
      "content": {
        "parts": null,
        "role": "model"
      },
      "citation_metadata": null,
      "finish_message": null,
      "token_count": null,
      "finish_reason": "STOP",
      "url_context_metadata": null,
      "avg_logprobs": null,
      "grounding_metadata": null,
      "index": 0,
      "logprobs_result": null,
      "safety_ratings": null
    }
  ],
  "create_time": null,
  "model_version": "gemini-2.5-pro",
  "prompt_feedback": null,
  "response_id": "ZM8CaejpA-vJjuMP_PuQmQQ",
  "usage_metadata": {
    "cache_tokens_details": [
      {
        "modality": "TEXT",
        "token_count": 3055
      },
      {
        "modality": "IMAGE",
        "token_count": 126
      }
    ],
    "cached_content_token_count": 3181,
    "candidates_token_count": null,
    "candidates_tokens_details": null,
    "prompt_token_count": 6495,
    "prompt_tokens_details": [
      {
        "modality": "TEXT",
        "token_count": 6237
      },
      {
        "modality": "IMAGE",
        "token_count": 258
      }
    ],
    "thoughts_token_count": null,
    "tool_use_prompt_token_count": null,
    "tool_use_prompt_tokens_details": null,
    "total_token_count": 6495,
    "traffic_type": null
  },
  "automatic_function_calling_history": [],
  "parsed": null
}

It appears for whatever reason, in the last step, the agent perhaps is sending an empty response. The gemini code that is causing this (line 469 to 477):

        if candidate.content is None or candidate.content.parts is None:
            if finish_reason == 'content_filter' and raw_finish_reason:
                raise UnexpectedModelBehavior(
                    f'Content filter {raw_finish_reason.value!r} triggered', response.model_dump_json()
                )
            else:
                raise UnexpectedModelBehavior(
                    'Content field missing from Gemini response', response.model_dump_json()
                )  # pragma: no cover

if I print the candidate, I get this:

content=Content(
  role='model'
) citation_metadata=None finish_message=None token_count=None finish_reason=<FinishReason.STOP: 'STOP'> url_context_metadata=None avg_logprobs=None grounding_metadata=None index=0 logprobs_result=None safety_ratings=None

candidate.content.parts is None is True, finish_reason is stop.

So I guess the correct thing to do here it appears is:

     if candidate.content is None or candidate.content.parts is None:
            # If finish_reason is 'stop', do not raise any issue.
            if finish_reason == 'stop':
                pass  # Let it fall through (parts will be empty)
            elif finish_reason == 'content_filter' and raw_finish_reason:
                raise UnexpectedModelBehavior(
                    f'Content filter {raw_finish_reason.value!r} triggered', response.model_dump_json()
                )
            elif raw_finish_reason:
                raise UnexpectedModelBehavior(
                    f'Generation stopped: {raw_finish_reason.value!r}', response.model_dump_json()
                )
            else:
                raise UnexpectedModelBehavior(
                    'Content field missing from Gemini response', response.model_dump_json()
                )  # pragma: no cover

and that would resolve the issue. What I'm not sure is if that is an expected behaviour.

Example Code

Python, Pydantic AI & LLM client version

pydantic AI 1.8.0
Python 3.12

Metadata

Metadata

Assignees

No one assigned

    Labels

    bugSomething isn't working

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions