Skip to content

Commit 60926d0

Browse files
committed
fix: #2007 TypeError object of type 'Omit' has no len()
1 parent 3b9368d commit 60926d0

File tree

3 files changed

+56
-34
lines changed

3 files changed

+56
-34
lines changed

src/agents/models/openai_chatcompletions.py

Lines changed: 33 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -290,30 +290,39 @@ async def _fetch_response(
290290

291291
stream_param: Literal[True] | Omit = True if stream else omit
292292

293-
ret = await self._get_client().chat.completions.create(
294-
model=self.model,
295-
messages=converted_messages,
296-
tools=tools_param,
297-
temperature=self._non_null_or_omit(model_settings.temperature),
298-
top_p=self._non_null_or_omit(model_settings.top_p),
299-
frequency_penalty=self._non_null_or_omit(model_settings.frequency_penalty),
300-
presence_penalty=self._non_null_or_omit(model_settings.presence_penalty),
301-
max_tokens=self._non_null_or_omit(model_settings.max_tokens),
302-
tool_choice=tool_choice,
303-
response_format=response_format,
304-
parallel_tool_calls=parallel_tool_calls,
305-
stream=cast(Any, stream_param),
306-
stream_options=self._non_null_or_omit(stream_options),
307-
store=self._non_null_or_omit(store),
308-
reasoning_effort=self._non_null_or_omit(reasoning_effort),
309-
verbosity=self._non_null_or_omit(model_settings.verbosity),
310-
top_logprobs=self._non_null_or_omit(model_settings.top_logprobs),
311-
extra_headers=self._merge_headers(model_settings),
312-
extra_query=model_settings.extra_query,
313-
extra_body=model_settings.extra_body,
314-
metadata=self._non_null_or_omit(model_settings.metadata),
315-
**(model_settings.extra_args or {}),
316-
)
293+
request_kwargs: dict[str, Any] = {
294+
"model": self.model,
295+
"messages": converted_messages,
296+
"tools": tools_param,
297+
"temperature": self._non_null_or_omit(model_settings.temperature),
298+
"top_p": self._non_null_or_omit(model_settings.top_p),
299+
"frequency_penalty": self._non_null_or_omit(model_settings.frequency_penalty),
300+
"presence_penalty": self._non_null_or_omit(model_settings.presence_penalty),
301+
"max_tokens": self._non_null_or_omit(model_settings.max_tokens),
302+
"tool_choice": tool_choice,
303+
"response_format": response_format,
304+
"parallel_tool_calls": parallel_tool_calls,
305+
"stream": cast(Any, stream_param),
306+
"stream_options": self._non_null_or_omit(stream_options),
307+
"store": self._non_null_or_omit(store),
308+
"reasoning_effort": self._non_null_or_omit(reasoning_effort),
309+
"verbosity": self._non_null_or_omit(model_settings.verbosity),
310+
"top_logprobs": self._non_null_or_omit(model_settings.top_logprobs),
311+
"extra_headers": self._merge_headers(model_settings),
312+
"extra_query": model_settings.extra_query,
313+
"extra_body": model_settings.extra_body,
314+
"metadata": self._non_null_or_omit(model_settings.metadata),
315+
}
316+
317+
request_kwargs.update(model_settings.extra_args or {})
318+
319+
sanitized_kwargs = {
320+
key: value
321+
for key, value in request_kwargs.items()
322+
if not isinstance(value, Omit) and value is not omit
323+
}
324+
325+
ret = await self._get_client().chat.completions.create(**sanitized_kwargs)
317326

318327
if isinstance(ret, ChatCompletion):
319328
return ret

tests/models/test_kwargs_functionality.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import litellm
22
import pytest
33
from litellm.types.utils import Choices, Message, ModelResponse, Usage
4+
from openai import Omit, omit
45
from openai.types.chat.chat_completion import ChatCompletion, Choice
56
from openai.types.chat.chat_completion_message import ChatCompletionMessage
67
from openai.types.completion_usage import CompletionUsage
@@ -124,6 +125,9 @@ def __init__(self):
124125
# Verify regular parameters are still passed
125126
assert captured["temperature"] == 0.7
126127

128+
assert all(not isinstance(value, Omit) for value in captured.values())
129+
assert omit not in captured.values()
130+
127131

128132
@pytest.mark.allow_call_model_methods
129133
@pytest.mark.asyncio

tests/test_openai_chatcompletions.py

Lines changed: 19 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import httpx
77
import pytest
8-
from openai import AsyncOpenAI, omit
8+
from openai import AsyncOpenAI, Omit, omit
99
from openai.types.chat.chat_completion import ChatCompletion, Choice
1010
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
1111
from openai.types.chat.chat_completion_message import ChatCompletionMessage
@@ -283,19 +283,22 @@ def __init__(self, completions: DummyCompletions) -> None:
283283
stream=False,
284284
)
285285
assert result is chat
286+
286287
# Ensure expected args were passed through to OpenAI client.
287288
kwargs = completions.kwargs
288-
assert kwargs["stream"] is omit
289-
assert kwargs["store"] is omit
290289
assert kwargs["model"] == "gpt-4"
291290
assert kwargs["messages"][0]["role"] == "system"
292291
assert kwargs["messages"][0]["content"] == "sys"
293292
assert kwargs["messages"][1]["role"] == "user"
294-
# Defaults for optional fields become the omit sentinel
295-
assert kwargs["tools"] is omit
296-
assert kwargs["tool_choice"] is omit
297-
assert kwargs["response_format"] is omit
298-
assert kwargs["stream_options"] is omit
293+
assert kwargs["messages"][1]["content"] == "hi"
294+
assert "stream" not in kwargs
295+
assert "store" not in kwargs
296+
assert "tools" not in kwargs
297+
assert "tool_choice" not in kwargs
298+
assert "response_format" not in kwargs
299+
assert "stream_options" not in kwargs
300+
assert "parallel_tool_calls" not in kwargs
301+
assert all(not isinstance(value, Omit) and value is not omit for value in kwargs.values())
299302

300303

301304
@pytest.mark.asyncio
@@ -340,8 +343,14 @@ def __init__(self, completions: DummyCompletions) -> None:
340343
)
341344
# Check OpenAI client was called for streaming
342345
assert completions.kwargs["stream"] is True
343-
assert completions.kwargs["store"] is omit
344-
assert completions.kwargs["stream_options"] is omit
346+
assert completions.kwargs["model"] == "gpt-4"
347+
assert "store" not in completions.kwargs
348+
assert "stream_options" not in completions.kwargs
349+
assert "tools" not in completions.kwargs
350+
assert "parallel_tool_calls" not in completions.kwargs
351+
assert all(
352+
not isinstance(value, Omit) and value is not omit for value in completions.kwargs.values()
353+
)
345354
# Response is a proper openai Response
346355
assert isinstance(response, Response)
347356
assert response.id == FAKE_RESPONSES_ID

0 commit comments

Comments
 (0)