Skip to content

Commit 3c5690a

Browse files
committed
refactor: Extract tool_choice conversion to static method with tests
Address review feedback from @seratch on PR #1929 Changes: - Extracted tool_choice conversion logic to static method _convert_tool_choice_for_response() - Added comprehensive documentation with examples - Created 16 unit tests covering all conversion scenarios: - omit/NotGiven -> 'auto' - Literal strings ('auto', 'required', 'none') - ToolChoiceFunction (preserved as-is) - Dict from ChatCompletions Converter - Edge cases and fallbacks Benefits: - Improves code readability and maintainability - Makes the conversion logic testable in isolation - Provides clear documentation of supported formats - All existing tests pass (822 tests) Test coverage: - Normal cases: omit, literals, ToolChoiceFunction, dict - Edge cases: missing keys, empty names, wrong types - Real-world scenarios: ChatCompletions Converter format
1 parent d9aa6da commit 3c5690a

File tree

2 files changed

+220
-33
lines changed

2 files changed

+220
-33
lines changed

src/agents/extensions/models/litellm_model.py

Lines changed: 63 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,68 @@ def __init__(
7878
self.base_url = base_url
7979
self.api_key = api_key
8080

81+
@staticmethod
82+
def _convert_tool_choice_for_response(
83+
tool_choice: Any,
84+
) -> Literal["auto", "required", "none"] | ToolChoiceFunction:
85+
"""
86+
Convert various tool_choice formats to the format expected by Response.
87+
88+
Args:
89+
tool_choice: Can be:
90+
- omit/NotGiven: Defaults to "auto"
91+
- Literal["auto", "required", "none"]: Used directly
92+
- ToolChoiceFunction: Used directly
93+
- dict (from ChatCompletions Converter): Converted to ToolChoiceFunction
94+
Format: {"type": "function", "function": {"name": "tool_name"}}
95+
96+
Returns:
97+
Literal["auto", "required", "none"] | ToolChoiceFunction
98+
99+
Examples:
100+
>>> LitellmModel._convert_tool_choice_for_response(omit)
101+
"auto"
102+
>>> LitellmModel._convert_tool_choice_for_response("required")
103+
"required"
104+
>>> LitellmModel._convert_tool_choice_for_response(
105+
... {"type": "function", "function": {"name": "my_tool"}}
106+
... )
107+
ToolChoiceFunction(type="function", name="my_tool")
108+
"""
109+
# Handle omit/NotGiven
110+
if tool_choice is omit or isinstance(tool_choice, NotGiven):
111+
return "auto"
112+
113+
# Already a ToolChoiceFunction, use directly
114+
if isinstance(tool_choice, ToolChoiceFunction):
115+
return tool_choice
116+
117+
# Convert from ChatCompletions format dict to ToolChoiceFunction
118+
# ChatCompletions Converter returns: {"type": "function", "function": {"name": "..."}}
119+
if isinstance(tool_choice, dict):
120+
func_data = tool_choice.get("function")
121+
if (
122+
tool_choice.get("type") == "function"
123+
and func_data is not None
124+
and isinstance(func_data, dict)
125+
):
126+
tool_name = func_data.get("name")
127+
if isinstance(tool_name, str) and tool_name: # Ensure non-empty string
128+
return ToolChoiceFunction(type="function", name=tool_name)
129+
else:
130+
# Fallback to auto if name is missing or invalid
131+
return "auto"
132+
else:
133+
# Fallback to auto if unexpected format
134+
return "auto"
135+
136+
# Handle literal strings
137+
if tool_choice in ("auto", "required", "none"):
138+
return cast(Literal["auto", "required", "none"], tool_choice)
139+
140+
# Fallback to auto for any other case
141+
return "auto"
142+
81143
async def get_response(
82144
self,
83145
system_instructions: str | None,
@@ -369,39 +431,7 @@ async def _fetch_response(
369431
return ret
370432

371433
# Convert tool_choice to the correct type for Response
372-
# tool_choice can be a Literal, ToolChoiceFunction,
373-
# dict from ChatCompletions Converter, or omit
374-
response_tool_choice: Literal["auto", "required", "none"] | ToolChoiceFunction
375-
if tool_choice is omit:
376-
response_tool_choice = "auto"
377-
elif isinstance(tool_choice, ToolChoiceFunction):
378-
# Already a ToolChoiceFunction, use directly
379-
response_tool_choice = tool_choice
380-
elif isinstance(tool_choice, dict):
381-
# Convert from ChatCompletions format dict to ToolChoiceFunction
382-
# ChatCompletions Converter returns: {"type": "function", "function": {"name": "..."}}
383-
func_data = tool_choice.get("function")
384-
if (
385-
tool_choice.get("type") == "function"
386-
and func_data is not None
387-
and isinstance(func_data, dict)
388-
):
389-
tool_name = func_data.get("name")
390-
if isinstance(tool_name, str) and tool_name: # Ensure non-empty string
391-
response_tool_choice = ToolChoiceFunction(type="function", name=tool_name)
392-
else:
393-
# Fallback to auto if name is missing or invalid
394-
response_tool_choice = "auto"
395-
else:
396-
# Fallback to auto if unexpected format
397-
response_tool_choice = "auto"
398-
elif tool_choice in ("auto", "required", "none"):
399-
from typing import cast
400-
401-
response_tool_choice = cast(Literal["auto", "required", "none"], tool_choice)
402-
else:
403-
# Fallback to auto for any other case
404-
response_tool_choice = "auto"
434+
response_tool_choice = self._convert_tool_choice_for_response(tool_choice)
405435

406436
response = Response(
407437
id=FAKE_RESPONSES_ID,
Lines changed: 157 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,157 @@
1+
"""
2+
Unit tests for LitellmModel._convert_tool_choice_for_response
3+
4+
Tests the static method that converts various tool_choice formats
5+
to the format expected by the Response type.
6+
7+
Related to Issue #1846: Support tool_choice with specific tool names in LiteLLM streaming
8+
"""
9+
10+
import pytest
11+
from openai import NotGiven, omit
12+
from openai.types.responses.tool_choice_function import ToolChoiceFunction
13+
14+
from agents.extensions.models.litellm_model import LitellmModel
15+
16+
17+
class TestConvertToolChoiceForResponse:
18+
"""Test the _convert_tool_choice_for_response static method."""
19+
20+
def test_convert_omit_returns_auto(self):
21+
"""Test that omit is converted to 'auto'"""
22+
result = LitellmModel._convert_tool_choice_for_response(omit)
23+
assert result == "auto"
24+
25+
def test_convert_not_given_returns_auto(self):
26+
"""Test that NotGiven is converted to 'auto'"""
27+
result = LitellmModel._convert_tool_choice_for_response(NotGiven())
28+
assert result == "auto"
29+
30+
def test_convert_literal_auto(self):
31+
"""Test that literal 'auto' is preserved"""
32+
result = LitellmModel._convert_tool_choice_for_response("auto")
33+
assert result == "auto"
34+
35+
def test_convert_literal_required(self):
36+
"""Test that literal 'required' is preserved"""
37+
result = LitellmModel._convert_tool_choice_for_response("required")
38+
assert result == "required"
39+
40+
def test_convert_literal_none(self):
41+
"""Test that literal 'none' is preserved"""
42+
result = LitellmModel._convert_tool_choice_for_response("none")
43+
assert result == "none"
44+
45+
def test_convert_tool_choice_function_preserved(self):
46+
"""Test that ToolChoiceFunction is preserved as-is"""
47+
tool_choice = ToolChoiceFunction(type="function", name="my_tool")
48+
result = LitellmModel._convert_tool_choice_for_response(tool_choice)
49+
assert result == tool_choice
50+
assert isinstance(result, ToolChoiceFunction)
51+
assert result.name == "my_tool"
52+
53+
def test_convert_dict_from_chatcompletions_converter(self):
54+
"""
55+
Test conversion from ChatCompletions Converter dict format.
56+
Format: {"type": "function", "function": {"name": "tool_name"}}
57+
"""
58+
tool_choice_dict = {
59+
"type": "function",
60+
"function": {"name": "my_custom_tool"},
61+
}
62+
result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict)
63+
assert isinstance(result, ToolChoiceFunction)
64+
assert result.type == "function"
65+
assert result.name == "my_custom_tool"
66+
67+
def test_convert_dict_missing_function_name_returns_auto(self):
68+
"""Test that dict without function name falls back to 'auto'"""
69+
tool_choice_dict = {
70+
"type": "function",
71+
"function": {}, # Missing 'name'
72+
}
73+
result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict)
74+
assert result == "auto"
75+
76+
def test_convert_dict_empty_function_name_returns_auto(self):
77+
"""Test that dict with empty function name falls back to 'auto'"""
78+
tool_choice_dict = {
79+
"type": "function",
80+
"function": {"name": ""}, # Empty name
81+
}
82+
result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict)
83+
assert result == "auto"
84+
85+
def test_convert_dict_missing_function_key_returns_auto(self):
86+
"""Test that dict without 'function' key falls back to 'auto'"""
87+
tool_choice_dict = {"type": "function"} # Missing 'function' key
88+
result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict)
89+
assert result == "auto"
90+
91+
def test_convert_dict_wrong_type_returns_auto(self):
92+
"""Test that dict with wrong type falls back to 'auto'"""
93+
tool_choice_dict = {
94+
"type": "wrong_type",
95+
"function": {"name": "my_tool"},
96+
}
97+
result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict)
98+
assert result == "auto"
99+
100+
def test_convert_dict_function_not_dict_returns_auto(self):
101+
"""Test that dict with non-dict function value falls back to 'auto'"""
102+
tool_choice_dict = {
103+
"type": "function",
104+
"function": "not_a_dict",
105+
}
106+
result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict)
107+
assert result == "auto"
108+
109+
def test_convert_unexpected_type_returns_auto(self):
110+
"""Test that unexpected types fall back to 'auto'"""
111+
result = LitellmModel._convert_tool_choice_for_response(123)
112+
assert result == "auto"
113+
114+
result = LitellmModel._convert_tool_choice_for_response([])
115+
assert result == "auto"
116+
117+
result = LitellmModel._convert_tool_choice_for_response(None)
118+
assert result == "auto"
119+
120+
121+
class TestToolChoiceConversionEdgeCases:
122+
"""Test edge cases and real-world scenarios."""
123+
124+
def test_real_world_scenario_chatcompletions_format(self):
125+
"""
126+
Test a real-world scenario from ChatCompletions Converter.
127+
This is the actual format returned when tool_choice specifies a tool name.
128+
"""
129+
# This is what ChatCompletions Converter returns
130+
tool_choice_from_converter = {
131+
"type": "function",
132+
"function": {"name": "get_weather"},
133+
}
134+
result = LitellmModel._convert_tool_choice_for_response(tool_choice_from_converter)
135+
assert isinstance(result, ToolChoiceFunction)
136+
assert result.name == "get_weather"
137+
assert result.type == "function"
138+
139+
def test_none_string_vs_none_literal(self):
140+
"""Test that string 'none' works but None (NoneType) defaults to auto"""
141+
# String "none" should be preserved
142+
result = LitellmModel._convert_tool_choice_for_response("none")
143+
assert result == "none"
144+
145+
# NoneType should fallback to auto
146+
result = LitellmModel._convert_tool_choice_for_response(None)
147+
assert result == "auto"
148+
149+
def test_complex_tool_name(self):
150+
"""Test that complex tool names are handled correctly"""
151+
tool_choice_dict = {
152+
"type": "function",
153+
"function": {"name": "get_user_profile_with_special_chars_123"},
154+
}
155+
result = LitellmModel._convert_tool_choice_for_response(tool_choice_dict)
156+
assert isinstance(result, ToolChoiceFunction)
157+
assert result.name == "get_user_profile_with_special_chars_123"

0 commit comments

Comments
 (0)