From 2f85765dd5b25cc60f6f8cd0d23d8520ac152299 Mon Sep 17 00:00:00 2001 From: Lucas Wang Date: Sun, 19 Oct 2025 01:57:18 +0800 Subject: [PATCH 1/7] fix: support tool_choice with specific tool names in LiteLLM streaming (fixes #1846) This change fixes a Pydantic validation error that occurred when using LiteLLM with streaming enabled and specifying a specific tool name for tool_choice parameter. Problem: When users specified tool_choice="my_tool_name" with streaming enabled, the SDK would incorrectly cast it to Literal["auto", "required", "none"], causing a Pydantic validation error. The issue was in litellm_model.py line 376, where the Response object was created with an incorrect type cast: tool_choice=cast(Literal["auto", "required", "none"], tool_choice) However, tool_choice can be: - A Literal: "auto", "required", "none" - A ChatCompletionNamedToolChoiceParam dict with specific tool name - The Converter.convert_tool_choice() already handles string tool names Solution: - Import ToolChoiceFunction from openai.types.responses - Properly convert ChatCompletionNamedToolChoiceParam to ToolChoiceFunction - Handle all valid tool_choice types when creating Response object The fix ensures that when tool_choice is a dict like: {"type": "function", "function": {"name": "my_tool"}} It gets correctly converted to: ToolChoiceFunction(type="function", name="my_tool") Testing: - Linting (ruff check) - passed - Type checking (mypy) - passed - Formatting (ruff format) - passed Generated with Lucas Wang Co-Authored-By: Claude --- src/agents/extensions/models/litellm_model.py | 32 +++++++++++++++++-- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index 301e06b3d..062ccb95c 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -24,6 +24,7 @@ ChatCompletionMessageCustomToolCall, ChatCompletionMessageFunctionToolCall, ChatCompletionMessageParam, + ChatCompletionNamedToolChoiceParam, ) from openai.types.chat.chat_completion_message import ( Annotation, @@ -32,6 +33,7 @@ ) from openai.types.chat.chat_completion_message_function_tool_call import Function from openai.types.responses import Response +from openai.types.responses.tool_choice_function import ToolChoiceFunction from ... import _debug from ...agent_output import AgentOutputSchemaBase @@ -367,15 +369,39 @@ async def _fetch_response( if isinstance(ret, litellm.types.utils.ModelResponse): return ret + # Convert tool_choice to the correct type for Response + # tool_choice can be a Literal, a ChatCompletionNamedToolChoiceParam, or omit + response_tool_choice: Literal["auto", "required", "none"] | ToolChoiceFunction + if tool_choice is omit: + response_tool_choice = "auto" + elif isinstance(tool_choice, dict): + # Convert from ChatCompletionNamedToolChoiceParam to ToolChoiceFunction + # The dict has structure: {"type": "function", "function": {"name": "tool_name"}} + func_data = tool_choice.get("function") + if ( + tool_choice.get("type") == "function" + and func_data is not None + and isinstance(func_data, dict) + ): + response_tool_choice = ToolChoiceFunction( + type="function", name=func_data["name"] + ) + else: + # Fallback to auto if unexpected format + response_tool_choice = "auto" + elif tool_choice in ("auto", "required", "none"): + response_tool_choice = tool_choice # type: ignore + else: + # Fallback to auto for any other case + response_tool_choice = "auto" + response = Response( id=FAKE_RESPONSES_ID, created_at=time.time(), model=self.model, object="response", output=[], - tool_choice=cast(Literal["auto", "required", "none"], tool_choice) - if tool_choice is not omit - else "auto", + tool_choice=response_tool_choice, top_p=model_settings.top_p, temperature=model_settings.temperature, tools=[], From 8abed69cb5336fa459fcf53469eee57340fa3ef8 Mon Sep 17 00:00:00 2001 From: Lucas Wang Date: Sun, 19 Oct 2025 02:10:23 +0800 Subject: [PATCH 2/7] fix: address all Copilot/Codex feedback for PR #1929 Critical fixes based on review feedback: - Fix dict format mismatch: Read "name" directly instead of "function.name" (Responses Converter returns {"type": "function", "name": "..."}, not nested format) - Add explicit handling for ToolChoiceFunction instances (avoid silent fallback to "auto") - Add defensive checks for tool_name (exists, is string, non-empty) - Replace type: ignore with explicit cast for better type safety - Remove unused ChatCompletionNamedToolChoiceParam import This addresses the critical P1 issue identified by chatgpt-codex-connector and all Copilot nitpicks. Generated with Lucas Wang Co-Authored-By: Claude --- src/agents/extensions/models/litellm_model.py | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index 062ccb95c..bdbf0ffa4 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -24,7 +24,6 @@ ChatCompletionMessageCustomToolCall, ChatCompletionMessageFunctionToolCall, ChatCompletionMessageParam, - ChatCompletionNamedToolChoiceParam, ) from openai.types.chat.chat_completion_message import ( Annotation, @@ -370,27 +369,31 @@ async def _fetch_response( return ret # Convert tool_choice to the correct type for Response - # tool_choice can be a Literal, a ChatCompletionNamedToolChoiceParam, or omit + # tool_choice can be a Literal, ToolChoiceFunction, dict from Responses Converter, or omit response_tool_choice: Literal["auto", "required", "none"] | ToolChoiceFunction if tool_choice is omit: response_tool_choice = "auto" + elif isinstance(tool_choice, ToolChoiceFunction): + # Already a ToolChoiceFunction, use directly + response_tool_choice = tool_choice elif isinstance(tool_choice, dict): - # Convert from ChatCompletionNamedToolChoiceParam to ToolChoiceFunction - # The dict has structure: {"type": "function", "function": {"name": "tool_name"}} - func_data = tool_choice.get("function") + # Convert from Responses format dict to ToolChoiceFunction + # The Responses Converter returns: {"type": "function", "name": "tool_name"} + tool_name = tool_choice.get("name") if ( tool_choice.get("type") == "function" - and func_data is not None - and isinstance(func_data, dict) + and tool_name is not None + and isinstance(tool_name, str) + and tool_name # Ensure non-empty string ): - response_tool_choice = ToolChoiceFunction( - type="function", name=func_data["name"] - ) + response_tool_choice = ToolChoiceFunction(type="function", name=tool_name) else: # Fallback to auto if unexpected format response_tool_choice = "auto" elif tool_choice in ("auto", "required", "none"): - response_tool_choice = tool_choice # type: ignore + from typing import cast + + response_tool_choice = cast(Literal["auto", "required", "none"], tool_choice) else: # Fallback to auto for any other case response_tool_choice = "auto" From fca3ed5b5c1e3d297ee9aa763db3f51823a87101 Mon Sep 17 00:00:00 2001 From: Lucas Wang Date: Sun, 19 Oct 2025 07:32:07 +0800 Subject: [PATCH 3/7] fix: correct tool_choice dict format handling (fixes @ihower feedback) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Critical fix based on testing feedback from @ihower: The previous fix assumed Responses Converter format (flat dict), but LiteLLM uses ChatCompletions Converter which returns nested format. Problem identified by @ihower: - response_tool_choice was always "auto" even with specific tool names - Root cause: Looking for wrong dict structure Converter formats: - ChatCompletions: {"type": "function", "function": {"name": "tool_name"}} ✅ (LiteLLM uses this) - Responses: {"type": "function", "name": "tool_name"} ❌ (NOT used here) Fix: - Changed from tool_choice.get("name") to tool_choice.get("function").get("name") - Added proper type checking for func_data dict - Maintained all defensive checks (non-empty string, valid type, etc.) Testing: - Created comprehensive unit tests - Created end-to-end flow tests - All tests pass with nested dict format - Verified: ModelSettings(tool_choice="my_tool") → ToolChoiceFunction(name="my_tool") Generated with Lucas Wang Co-Authored-By: Claude --- src/agents/extensions/models/litellm_model.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index bdbf0ffa4..a95e48701 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -377,16 +377,20 @@ async def _fetch_response( # Already a ToolChoiceFunction, use directly response_tool_choice = tool_choice elif isinstance(tool_choice, dict): - # Convert from Responses format dict to ToolChoiceFunction - # The Responses Converter returns: {"type": "function", "name": "tool_name"} - tool_name = tool_choice.get("name") + # Convert from ChatCompletions format dict to ToolChoiceFunction + # ChatCompletions Converter returns: {"type": "function", "function": {"name": "..."}} + func_data = tool_choice.get("function") if ( tool_choice.get("type") == "function" - and tool_name is not None - and isinstance(tool_name, str) - and tool_name # Ensure non-empty string + and func_data is not None + and isinstance(func_data, dict) ): - response_tool_choice = ToolChoiceFunction(type="function", name=tool_name) + tool_name = func_data.get("name") + if isinstance(tool_name, str) and tool_name: # Ensure non-empty string + response_tool_choice = ToolChoiceFunction(type="function", name=tool_name) + else: + # Fallback to auto if name is missing or invalid + response_tool_choice = "auto" else: # Fallback to auto if unexpected format response_tool_choice = "auto" From 1d208465fb9912f95e6ee98bff1a426dfc243b14 Mon Sep 17 00:00:00 2001 From: Lucas Wang Date: Sun, 19 Oct 2025 07:39:13 +0800 Subject: [PATCH 4/7] fix: correct comment to reference ChatCompletions Converter The comment incorrectly stated 'Responses Converter' when the actual converter used is 'chatcmpl_converter.Converter' which returns ChatCompletions format. Generated with Lucas Wang --- src/agents/extensions/models/litellm_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index a95e48701..9c1ea4242 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -369,7 +369,7 @@ async def _fetch_response( return ret # Convert tool_choice to the correct type for Response - # tool_choice can be a Literal, ToolChoiceFunction, dict from Responses Converter, or omit + # tool_choice can be a Literal, ToolChoiceFunction, dict from ChatCompletions Converter, or omit response_tool_choice: Literal["auto", "required", "none"] | ToolChoiceFunction if tool_choice is omit: response_tool_choice = "auto" From d9aa6da8b2bfd76c8f38f40c2eaf62ab8c3d12dc Mon Sep 17 00:00:00 2001 From: gn00295120 Date: Tue, 21 Oct 2025 05:31:04 +0800 Subject: [PATCH 5/7] Refactor comment on tool_choice for clarity Split long comment about tool_choice into multiple lines for better readability. --- src/agents/extensions/models/litellm_model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index 9c1ea4242..63961631e 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -369,7 +369,8 @@ async def _fetch_response( return ret # Convert tool_choice to the correct type for Response - # tool_choice can be a Literal, ToolChoiceFunction, dict from ChatCompletions Converter, or omit + # tool_choice can be a Literal, ToolChoiceFunction, + # dict from ChatCompletions Converter, or omit response_tool_choice: Literal["auto", "required", "none"] | ToolChoiceFunction if tool_choice is omit: response_tool_choice = "auto" From 3c5690acef64ce39c32b889cd4ece89a60b54f2d Mon Sep 17 00:00:00 2001 From: Lucas Wang Date: Tue, 21 Oct 2025 11:48:33 +0800 Subject: [PATCH 6/7] refactor: Extract tool_choice conversion to static method with tests Address review feedback from @seratch on PR #1929 Changes: - Extracted tool_choice conversion logic to static method _convert_tool_choice_for_response() - Added comprehensive documentation with examples - Created 16 unit tests covering all conversion scenarios: - omit/NotGiven -> 'auto' - Literal strings ('auto', 'required', 'none') - ToolChoiceFunction (preserved as-is) - Dict from ChatCompletions Converter - Edge cases and fallbacks Benefits: - Improves code readability and maintainability - Makes the conversion logic testable in isolation - Provides clear documentation of supported formats - All existing tests pass (822 tests) Test coverage: - Normal cases: omit, literals, ToolChoiceFunction, dict - Edge cases: missing keys, empty names, wrong types - Real-world scenarios: ChatCompletions Converter format --- src/agents/extensions/models/litellm_model.py | 96 +++++++---- .../test_litellm_tool_choice_conversion.py | 157 ++++++++++++++++++ 2 files changed, 220 insertions(+), 33 deletions(-) create mode 100644 tests/extensions/test_litellm_tool_choice_conversion.py diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index 63961631e..06a1bc006 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -78,6 +78,68 @@ def __init__( self.base_url = base_url self.api_key = api_key + @staticmethod + def _convert_tool_choice_for_response( + tool_choice: Any, + ) -> Literal["auto", "required", "none"] | ToolChoiceFunction: + """ + Convert various tool_choice formats to the format expected by Response. + + Args: + tool_choice: Can be: + - omit/NotGiven: Defaults to "auto" + - Literal["auto", "required", "none"]: Used directly + - ToolChoiceFunction: Used directly + - dict (from ChatCompletions Converter): Converted to ToolChoiceFunction + Format: {"type": "function", "function": {"name": "tool_name"}} + + Returns: + Literal["auto", "required", "none"] | ToolChoiceFunction + + Examples: + >>> LitellmModel._convert_tool_choice_for_response(omit) + "auto" + >>> LitellmModel._convert_tool_choice_for_response("required") + "required" + >>> LitellmModel._convert_tool_choice_for_response( + ... {"type": "function", "function": {"name": "my_tool"}} + ... ) + ToolChoiceFunction(type="function", name="my_tool") + """ + # Handle omit/NotGiven + if tool_choice is omit or isinstance(tool_choice, NotGiven): + return "auto" + + # Already a ToolChoiceFunction, use directly + if isinstance(tool_choice, ToolChoiceFunction): + return tool_choice + + # Convert from ChatCompletions format dict to ToolChoiceFunction + # ChatCompletions Converter returns: {"type": "function", "function": {"name": "..."}} + if isinstance(tool_choice, dict): + func_data = tool_choice.get("function") + if ( + tool_choice.get("type") == "function" + and func_data is not None + and isinstance(func_data, dict) + ): + tool_name = func_data.get("name") + if isinstance(tool_name, str) and tool_name: # Ensure non-empty string + return ToolChoiceFunction(type="function", name=tool_name) + else: + # Fallback to auto if name is missing or invalid + return "auto" + else: + # Fallback to auto if unexpected format + return "auto" + + # Handle literal strings + if tool_choice in ("auto", "required", "none"): + return cast(Literal["auto", "required", "none"], tool_choice) + + # Fallback to auto for any other case + return "auto" + async def get_response( self, system_instructions: str | None, @@ -369,39 +431,7 @@ async def _fetch_response( return ret # Convert tool_choice to the correct type for Response - # tool_choice can be a Literal, ToolChoiceFunction, - # dict from ChatCompletions Converter, or omit - response_tool_choice: Literal["auto", "required", "none"] | ToolChoiceFunction - if tool_choice is omit: - response_tool_choice = "auto" - elif isinstance(tool_choice, ToolChoiceFunction): - # Already a ToolChoiceFunction, use directly - response_tool_choice = tool_choice - elif isinstance(tool_choice, dict): - # Convert from ChatCompletions format dict to ToolChoiceFunction - # ChatCompletions Converter returns: {"type": "function", "function": {"name": "..."}} - func_data = tool_choice.get("function") - if ( - tool_choice.get("type") == "function" - and func_data is not None - and isinstance(func_data, dict) - ): - tool_name = func_data.get("name") - if isinstance(tool_name, str) and tool_name: # Ensure non-empty string - response_tool_choice = ToolChoiceFunction(type="function", name=tool_name) - else: - # Fallback to auto if name is missing or invalid - response_tool_choice = "auto" - else: - # Fallback to auto if unexpected format - response_tool_choice = "auto" - elif tool_choice in ("auto", "required", "none"): - from typing import cast - - response_tool_choice = cast(Literal["auto", "required", "none"], tool_choice) - else: - # Fallback to auto for any other case - response_tool_choice = "auto" + response_tool_choice = self._convert_tool_choice_for_response(tool_choice) response = Response( id=FAKE_RESPONSES_ID, diff --git a/tests/extensions/test_litellm_tool_choice_conversion.py b/tests/extensions/test_litellm_tool_choice_conversion.py new file mode 100644 index 000000000..6f8e5fc38 --- /dev/null +++ b/tests/extensions/test_litellm_tool_choice_conversion.py @@ -0,0 +1,157 @@ +""" +Unit tests for LitellmModel._convert_tool_choice_for_response + +Tests the static method that converts various tool_choice formats +to the format expected by the Response type. + +Related to Issue #1846: Support tool_choice with specific tool names in LiteLLM streaming +""" + +import pytest +from openai import NotGiven, omit +from openai.types.responses.tool_choice_function import ToolChoiceFunction + +from agents.extensions.models.litellm_model import LitellmModel + + +class TestConvertToolChoiceForResponse: + """Test the _convert_tool_choice_for_response static method.""" + + def test_convert_omit_returns_auto(self): + """Test that omit is converted to 'auto'""" + result = LitellmModel._convert_tool_choice_for_response(omit) + assert result == "auto" + + def test_convert_not_given_returns_auto(self): + """Test that NotGiven is converted to 'auto'""" + result = LitellmModel._convert_tool_choice_for_response(NotGiven()) + assert result == "auto" + + def test_convert_literal_auto(self): + """Test that literal 'auto' is preserved""" + result = LitellmModel._convert_tool_choice_for_response("auto") + assert result == "auto" + + def test_convert_literal_required(self): + """Test that literal 'required' is preserved""" + result = LitellmModel._convert_tool_choice_for_response("required") + assert result == "required" + + def test_convert_literal_none(self): + """Test that literal 'none' is preserved""" + result = LitellmModel._convert_tool_choice_for_response("none") + assert result == "none" + + def test_convert_tool_choice_function_preserved(self): + """Test that ToolChoiceFunction is preserved as-is""" + tool_choice = ToolChoiceFunction(type="function", name="my_tool") + result = LitellmModel._convert_tool_choice_for_response(tool_choice) + assert result == tool_choice + assert isinstance(result, ToolChoiceFunction) + assert result.name == "my_tool" + + def test_convert_dict_from_chatcompletions_converter(self): + """ + Test conversion from ChatCompletions Converter dict format. + Format: {"type": "function", "function": {"name": "tool_name"}} + """ + tool_choice_dict = { + "type": "function", + "function": {"name": "my_custom_tool"}, + } + result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict) + assert isinstance(result, ToolChoiceFunction) + assert result.type == "function" + assert result.name == "my_custom_tool" + + def test_convert_dict_missing_function_name_returns_auto(self): + """Test that dict without function name falls back to 'auto'""" + tool_choice_dict = { + "type": "function", + "function": {}, # Missing 'name' + } + result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict) + assert result == "auto" + + def test_convert_dict_empty_function_name_returns_auto(self): + """Test that dict with empty function name falls back to 'auto'""" + tool_choice_dict = { + "type": "function", + "function": {"name": ""}, # Empty name + } + result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict) + assert result == "auto" + + def test_convert_dict_missing_function_key_returns_auto(self): + """Test that dict without 'function' key falls back to 'auto'""" + tool_choice_dict = {"type": "function"} # Missing 'function' key + result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict) + assert result == "auto" + + def test_convert_dict_wrong_type_returns_auto(self): + """Test that dict with wrong type falls back to 'auto'""" + tool_choice_dict = { + "type": "wrong_type", + "function": {"name": "my_tool"}, + } + result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict) + assert result == "auto" + + def test_convert_dict_function_not_dict_returns_auto(self): + """Test that dict with non-dict function value falls back to 'auto'""" + tool_choice_dict = { + "type": "function", + "function": "not_a_dict", + } + result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict) + assert result == "auto" + + def test_convert_unexpected_type_returns_auto(self): + """Test that unexpected types fall back to 'auto'""" + result = LitellmModel._convert_tool_choice_for_response(123) + assert result == "auto" + + result = LitellmModel._convert_tool_choice_for_response([]) + assert result == "auto" + + result = LitellmModel._convert_tool_choice_for_response(None) + assert result == "auto" + + +class TestToolChoiceConversionEdgeCases: + """Test edge cases and real-world scenarios.""" + + def test_real_world_scenario_chatcompletions_format(self): + """ + Test a real-world scenario from ChatCompletions Converter. + This is the actual format returned when tool_choice specifies a tool name. + """ + # This is what ChatCompletions Converter returns + tool_choice_from_converter = { + "type": "function", + "function": {"name": "get_weather"}, + } + result = LitellmModel._convert_tool_choice_for_response(tool_choice_from_converter) + assert isinstance(result, ToolChoiceFunction) + assert result.name == "get_weather" + assert result.type == "function" + + def test_none_string_vs_none_literal(self): + """Test that string 'none' works but None (NoneType) defaults to auto""" + # String "none" should be preserved + result = LitellmModel._convert_tool_choice_for_response("none") + assert result == "none" + + # NoneType should fallback to auto + result = LitellmModel._convert_tool_choice_for_response(None) + assert result == "auto" + + def test_complex_tool_name(self): + """Test that complex tool names are handled correctly""" + tool_choice_dict = { + "type": "function", + "function": {"name": "get_user_profile_with_special_chars_123"}, + } + result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict) + assert isinstance(result, ToolChoiceFunction) + assert result.name == "get_user_profile_with_special_chars_123" From a0ba8a295f7b662348cecc4f00d584c2f2514356 Mon Sep 17 00:00:00 2001 From: Lucas Wang Date: Tue, 21 Oct 2025 11:54:35 +0800 Subject: [PATCH 7/7] fix: Remove unused pytest import from tool_choice tests --- tests/extensions/test_litellm_tool_choice_conversion.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/extensions/test_litellm_tool_choice_conversion.py b/tests/extensions/test_litellm_tool_choice_conversion.py index 6f8e5fc38..ede12d33a 100644 --- a/tests/extensions/test_litellm_tool_choice_conversion.py +++ b/tests/extensions/test_litellm_tool_choice_conversion.py @@ -7,7 +7,6 @@ Related to Issue #1846: Support tool_choice with specific tool names in LiteLLM streaming """ -import pytest from openai import NotGiven, omit from openai.types.responses.tool_choice_function import ToolChoiceFunction