Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 23 additions & 11 deletions src/google/adk/models/lite_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -823,11 +823,15 @@ def _model_response_to_generate_content_response(
) -> LlmResponse:
"""Converts a litellm response to LlmResponse. Also adds usage metadata.

When a response has no message (e.g., turn ends with only tool calls),
returns an empty LlmResponse with finish_reason and usage_metadata if
available, instead of raising ValueError.

Args:
response: The model response.

Returns:
The LlmResponse.
The LlmResponse. May have empty content if message is None.
"""

message = None
Expand All @@ -837,17 +841,25 @@ def _model_response_to_generate_content_response(
message = first_choice.get("message", None)
finish_reason = first_choice.get("finish_reason", None)

if not message:
raise ValueError("No message in response")
# Handle case where message is None or empty (valid when turn ends with
# tool calls only). Create empty LlmResponse instead of raising error.
if message:
thought_parts = _convert_reasoning_value_to_parts(
_extract_reasoning_value(message)
)
llm_response = _message_to_generate_content_response(
message,
model_version=response.model,
thought_parts=thought_parts or None,
)
else:
# Create empty LlmResponse when message is None or empty
llm_response = LlmResponse(
content=types.Content(role="model", parts=[]),
model_version=response.model,
)

thought_parts = _convert_reasoning_value_to_parts(
_extract_reasoning_value(message)
)
llm_response = _message_to_generate_content_response(
message,
model_version=response.model,
thought_parts=thought_parts or None,
)
# Common logic for finish_reason and usage_metadata
if finish_reason:
# If LiteLLM already provides a FinishReason enum (e.g., for Gemini), use
# it directly. Otherwise, map the finish_reason string to the enum.
Expand Down
78 changes: 78 additions & 0 deletions tests/unittests/models/test_litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -2610,6 +2610,84 @@ async def test_finish_reason_propagation(
mock_acompletion.assert_called_once()


def test_model_response_to_generate_content_response_no_message_with_finish_reason():
"""Test response with no message but finish_reason returns empty LlmResponse.

This test covers issue #3618: when a turn ends with tool calls and no final
message, we should return an empty LlmResponse instead of raising ValueError.
"""
response = ModelResponse(
model="test_model",
choices=[{
"finish_reason": "tool_calls",
# message is missing/None
}],
usage={
"prompt_tokens": 10,
"completion_tokens": 5,
"total_tokens": 15,
},
)

llm_response = _model_response_to_generate_content_response(response)

# Should return empty LlmResponse, not raise ValueError
assert llm_response.content is not None
assert llm_response.content.role == "model"
assert len(llm_response.content.parts) == 0
# tool_calls maps to STOP
assert llm_response.finish_reason == types.FinishReason.STOP
assert llm_response.usage_metadata is not None
assert llm_response.usage_metadata.prompt_token_count == 10
assert llm_response.usage_metadata.candidates_token_count == 5
assert llm_response.model_version == "test_model"


def test_model_response_to_generate_content_response_no_message_no_finish_reason():
"""Test response with no message and no finish_reason returns empty LlmResponse."""
response = ModelResponse(
model="test_model",
choices=[{
# Both message and finish_reason are missing
}],
)

llm_response = _model_response_to_generate_content_response(response)

# Should return empty LlmResponse, not raise ValueError
assert llm_response.content is not None
assert llm_response.content.role == "model"
assert len(llm_response.content.parts) == 0
# finish_reason may be None or have a default value - the important thing
# is that we don't raise ValueError
assert llm_response.model_version == "test_model"


def test_model_response_to_generate_content_response_empty_message_dict():
"""Test response with empty message dict returns empty LlmResponse."""
response = ModelResponse(
model="test_model",
choices=[{
"message": {}, # Empty dict is falsy
"finish_reason": "stop",
}],
usage={
"prompt_tokens": 5,
"completion_tokens": 3,
"total_tokens": 8,
},
)

llm_response = _model_response_to_generate_content_response(response)

# Should return empty LlmResponse, not raise ValueError
assert llm_response.content is not None
assert llm_response.content.role == "model"
assert len(llm_response.content.parts) == 0
assert llm_response.finish_reason == types.FinishReason.STOP
assert llm_response.usage_metadata is not None


@pytest.mark.asyncio
async def test_finish_reason_unknown_maps_to_other(
mock_acompletion, lite_llm_instance
Expand Down