diff --git a/src/google/adk/models/lite_llm.py b/src/google/adk/models/lite_llm.py index 9e3698b190..f47485e3c2 100644 --- a/src/google/adk/models/lite_llm.py +++ b/src/google/adk/models/lite_llm.py @@ -367,11 +367,21 @@ def _content_to_message_param( tool_messages = [] for part in content.parts: if part.function_response: + # FIX: Check if response is already a string before serializing. + # MCP tool responses come as JSON strings, but _safe_json_serialize was + # double-serializing them (json.dumps on already-JSON strings), causing + # triple-nested JSON like: '{"content": [{"type": "text", "text": "{\n \"type\"..."}]}' + # This prevented Claude/GPT from parsing tool results correctly. + response_content = ( + part.function_response.response + if isinstance(part.function_response.response, str) + else _safe_json_serialize(part.function_response.response) + ) tool_messages.append( ChatCompletionToolMessage( role="tool", tool_call_id=part.function_response.id, - content=_safe_json_serialize(part.function_response.response), + content=response_content, ) ) if tool_messages: