diff --git a/examples/realtime/cli/demo.py b/examples/realtime/cli/demo.py index be610b43e..32e8b6999 100644 --- a/examples/realtime/cli/demo.py +++ b/examples/realtime/cli/demo.py @@ -52,7 +52,7 @@ def __init__(self) -> None: # Audio output state for callback system self.output_queue: queue.Queue[Any] = queue.Queue(maxsize=10) # Buffer more chunks self.interrupt_event = threading.Event() - self.current_audio_chunk: np.ndarray | None = None # type: ignore + self.current_audio_chunk: np.ndarray | None = None self.chunk_position = 0 def _output_callback(self, outdata, frames: int, time, status) -> None: diff --git a/pyproject.toml b/pyproject.toml index fec596f28..cb414b440 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires-python = ">=3.9" license = "MIT" authors = [{ name = "OpenAI", email = "support@openai.com" }] dependencies = [ - "openai>=1.97.1,<2", + "openai>=1.99.3,<2", "pydantic>=2.10, <3", "griffe>=1.5.6, <2", "typing-extensions>=4.12.2, <5", diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index b01b84253..4cd9558aa 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -18,13 +18,19 @@ ) from _e from openai import NOT_GIVEN, AsyncStream, NotGiven -from openai.types.chat import ChatCompletionChunk, ChatCompletionMessageToolCall +from openai.types.chat import ChatCompletionChunk from openai.types.chat.chat_completion_message import ( Annotation, AnnotationURLCitation, ChatCompletionMessage, ) -from openai.types.chat.chat_completion_message_tool_call import Function +from openai.types.chat.chat_completion_message_custom_tool_call import ( + ChatCompletionMessageCustomToolCall, +) +from openai.types.chat.chat_completion_message_function_tool_call import ( + ChatCompletionMessageFunctionToolCall, + Function, +) from openai.types.responses import Response from ... import _debug @@ -361,7 +367,9 @@ def convert_message_to_openai( if message.role != "assistant": raise ModelBehaviorError(f"Unsupported role: {message.role}") - tool_calls = ( + tool_calls: ( + list[ChatCompletionMessageFunctionToolCall | ChatCompletionMessageCustomToolCall] | None + ) = ( [LitellmConverter.convert_tool_call_to_openai(tool) for tool in message.tool_calls] if message.tool_calls else None @@ -412,8 +420,8 @@ def convert_annotations_to_openai( @classmethod def convert_tool_call_to_openai( cls, tool_call: litellm.types.utils.ChatCompletionMessageToolCall - ) -> ChatCompletionMessageToolCall: - return ChatCompletionMessageToolCall( + ) -> ChatCompletionMessageFunctionToolCall: + return ChatCompletionMessageFunctionToolCall( id=tool_call.id, type="function", function=Function( diff --git a/src/agents/models/chatcmpl_converter.py b/src/agents/models/chatcmpl_converter.py index b84f2e669..ad367256f 100644 --- a/src/agents/models/chatcmpl_converter.py +++ b/src/agents/models/chatcmpl_converter.py @@ -20,6 +20,9 @@ ChatCompletionUserMessageParam, ) from openai.types.chat.chat_completion_content_part_param import File, FileFile +from openai.types.chat.chat_completion_message_function_tool_call import ( + ChatCompletionMessageFunctionToolCall, +) from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam from openai.types.chat.completion_create_params import ResponseFormat from openai.types.responses import ( @@ -126,15 +129,16 @@ def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TRespon if message.tool_calls: for tool_call in message.tool_calls: - items.append( - ResponseFunctionToolCall( - id=FAKE_RESPONSES_ID, - call_id=tool_call.id, - arguments=tool_call.function.arguments, - name=tool_call.function.name, - type="function_call", + if isinstance(tool_call, ChatCompletionMessageFunctionToolCall): + items.append( + ResponseFunctionToolCall( + id=FAKE_RESPONSES_ID, + call_id=tool_call.id, + arguments=tool_call.function.arguments, + name=tool_call.function.name, + type="function_call", + ) ) - ) return items @@ -420,10 +424,10 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam: elif file_search := cls.maybe_file_search_call(item): asst = ensure_assistant_message() tool_calls = list(asst.get("tool_calls", [])) - new_tool_call = ChatCompletionMessageToolCallParam( - id=file_search["id"], - type="function", - function={ + file_tool_call: ChatCompletionMessageToolCallParam = { + "id": file_search["id"], + "type": "function", + "function": { "name": "file_search_call", "arguments": json.dumps( { @@ -432,23 +436,23 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam: } ), }, - ) - tool_calls.append(new_tool_call) + } + tool_calls.append(file_tool_call) asst["tool_calls"] = tool_calls elif func_call := cls.maybe_function_tool_call(item): asst = ensure_assistant_message() tool_calls = list(asst.get("tool_calls", [])) arguments = func_call["arguments"] if func_call["arguments"] else "{}" - new_tool_call = ChatCompletionMessageToolCallParam( - id=func_call["call_id"], - type="function", - function={ + func_tool_call: ChatCompletionMessageToolCallParam = { + "id": func_call["call_id"], + "type": "function", + "function": { "name": func_call["name"], "arguments": arguments, }, - ) - tool_calls.append(new_tool_call) + } + tool_calls.append(func_tool_call) asst["tool_calls"] = tool_calls # 5) function call output => tool message elif func_output := cls.maybe_function_tool_call_output(item): diff --git a/tests/conftest.py b/tests/conftest.py index b73d734d1..8b78a701d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,7 @@ from __future__ import annotations +import os + import pytest from agents.models import _openai_shared @@ -32,6 +34,7 @@ def clear_openai_settings(): _openai_shared._default_openai_key = None _openai_shared._default_openai_client = None _openai_shared._use_responses_by_default = True + os.environ.setdefault("OPENAI_API_KEY", "test") @pytest.fixture(autouse=True) diff --git a/tests/model_settings/test_serialization.py b/tests/model_settings/test_serialization.py index 6e8c65180..980359270 100644 --- a/tests/model_settings/test_serialization.py +++ b/tests/model_settings/test_serialization.py @@ -53,7 +53,7 @@ def test_all_fields_serialization() -> None: parallel_tool_calls=True, truncation="auto", max_tokens=100, - reasoning=Reasoning(), + reasoning=Reasoning(effort="minimal"), metadata={"foo": "bar"}, store=False, include_usage=False, diff --git a/tests/test_openai_chatcompletions.py b/tests/test_openai_chatcompletions.py index a6909b195..9e87643db 100644 --- a/tests/test_openai_chatcompletions.py +++ b/tests/test_openai_chatcompletions.py @@ -9,8 +9,8 @@ from openai.types.chat.chat_completion import ChatCompletion, Choice from openai.types.chat.chat_completion_chunk import ChatCompletionChunk from openai.types.chat.chat_completion_message import ChatCompletionMessage -from openai.types.chat.chat_completion_message_tool_call import ( - ChatCompletionMessageToolCall, +from openai.types.chat.chat_completion_message_function_tool_call import ( + ChatCompletionMessageFunctionToolCall, Function, ) from openai.types.completion_usage import ( @@ -152,10 +152,10 @@ async def test_get_response_with_tool_call(monkeypatch) -> None: should append corresponding `ResponseFunctionToolCall` items after the assistant message item with matching name/arguments. """ - tool_call = ChatCompletionMessageToolCall( + tool_call = ChatCompletionMessageFunctionToolCall( id="call-id", - type="function", function=Function(name="do_thing", arguments="{'x':1}"), + type="function", ) msg = ChatCompletionMessage(role="assistant", content="Hi", tool_calls=[tool_call]) choice = Choice(index=0, finish_reason="stop", message=msg) diff --git a/tests/test_openai_chatcompletions_converter.py b/tests/test_openai_chatcompletions_converter.py index bcfca5495..aaa0eaed0 100644 --- a/tests/test_openai_chatcompletions_converter.py +++ b/tests/test_openai_chatcompletions_converter.py @@ -26,8 +26,14 @@ from typing import Literal, cast import pytest -from openai.types.chat import ChatCompletionMessage, ChatCompletionMessageToolCall -from openai.types.chat.chat_completion_message_tool_call import Function +from openai.types.chat import ChatCompletionMessage +from openai.types.chat.chat_completion_message_function_tool_call import ( + ChatCompletionMessageFunctionToolCall, + Function, +) +from openai.types.chat.chat_completion_message_function_tool_call_param import ( + ChatCompletionMessageFunctionToolCallParam, +) from openai.types.responses import ( ResponseFunctionToolCall, ResponseFunctionToolCallParam, @@ -87,7 +93,7 @@ def test_message_to_output_items_with_tool_call(): be reflected as separate `ResponseFunctionToolCall` items appended after the message item. """ - tool_call = ChatCompletionMessageToolCall( + tool_call = ChatCompletionMessageFunctionToolCall( id="tool1", type="function", function=Function(name="myfn", arguments='{"x":1}'), @@ -339,7 +345,7 @@ def test_tool_call_conversion(): tool_calls = list(tool_msg.get("tool_calls", [])) assert len(tool_calls) == 1 - tool_call = tool_calls[0] + tool_call = cast(ChatCompletionMessageFunctionToolCallParam, tool_calls[0]) assert tool_call["id"] == function_call["call_id"] assert tool_call["function"]["name"] == function_call["name"] assert tool_call["function"]["arguments"] == function_call["arguments"] diff --git a/tests/test_reasoning_effort.py b/tests/test_reasoning_effort.py new file mode 100644 index 000000000..d8c90409f --- /dev/null +++ b/tests/test_reasoning_effort.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from types import SimpleNamespace +from typing import cast + +import pytest +from openai import AsyncOpenAI +from openai.types.chat import ChatCompletion, ChatCompletionMessage +from openai.types.chat.chat_completion import Choice +from openai.types.completion_usage import CompletionUsage +from openai.types.shared import Reasoning + +from agents import ModelSettings, ModelTracing +from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel + + +class FakeClient: + def __init__(self) -> None: + self.kwargs: dict[str, object] | None = None + self.base_url = "https://example.com" + self.chat = SimpleNamespace(completions=SimpleNamespace(create=self.create)) + + async def create(self, **kwargs: object) -> ChatCompletion: + self.kwargs = kwargs + msg = ChatCompletionMessage(role="assistant", content="hi") + choice = Choice(index=0, finish_reason="stop", message=msg) + usage = CompletionUsage(completion_tokens=0, prompt_tokens=0, total_tokens=0) + return ChatCompletion( + id="resp", + created=0, + model="gpt-4", + object="chat.completion", + choices=[choice], + usage=usage, + ) + + +@pytest.mark.allow_call_model_methods +@pytest.mark.asyncio +async def test_reasoning_effort_minimal() -> None: + client = FakeClient() + model = OpenAIChatCompletionsModel("gpt-4", cast(AsyncOpenAI, client)) + settings = ModelSettings(reasoning=Reasoning(effort="minimal")) + await model.get_response( + system_instructions=None, + input="", + model_settings=settings, + tools=[], + output_schema=None, + handoffs=[], + tracing=ModelTracing.DISABLED, + previous_response_id=None, + ) + assert client.kwargs is not None + assert client.kwargs.get("reasoning_effort") == "minimal" diff --git a/tests/voice/test_input.py b/tests/voice/test_input.py index d41d870d7..9eeaa858e 100644 --- a/tests/voice/test_input.py +++ b/tests/voice/test_input.py @@ -1,5 +1,6 @@ import io import wave +from typing import Any, cast import numpy as np import pytest @@ -52,11 +53,11 @@ def test_buffer_to_audio_file_float32(): def test_buffer_to_audio_file_invalid_dtype(): # Create a buffer with invalid dtype (float64) - buffer = np.array([1.0, 2.0, 3.0], dtype=np.float64) + buffer = cast(Any, np.array([1.0, 2.0, 3.0], dtype=np.float64)) with pytest.raises(UserError, match="Buffer must be a numpy array of int16 or float32"): - # Purposely ignore the type error - _buffer_to_audio_file(buffer) # type: ignore + # Purposely pass invalid dtype buffer + _buffer_to_audio_file(buffer) class TestAudioInput: diff --git a/uv.lock b/uv.lock index d8efb6e8c..a06c54f7a 100644 --- a/uv.lock +++ b/uv.lock @@ -1463,7 +1463,7 @@ wheels = [ [[package]] name = "openai" -version = "1.97.1" +version = "1.99.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1475,9 +1475,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a6/57/1c471f6b3efb879d26686d31582997615e969f3bb4458111c9705e56332e/openai-1.97.1.tar.gz", hash = "sha256:a744b27ae624e3d4135225da9b1c89c107a2a7e5bc4c93e5b7b5214772ce7a4e", size = 494267, upload-time = "2025-07-22T13:10:12.607Z" } +sdist = { url = "https://files.pythonhosted.org/packages/72/d3/c372420c8ca1c60e785fd8c19e536cea8f16b0cfdcdad6458e1d8884f2ea/openai-1.99.3.tar.gz", hash = "sha256:1a0e2910e4545d828c14218f2ac3276827c94a043f5353e43b9413b38b497897", size = 504932, upload-time = "2025-08-07T20:35:15.893Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ee/35/412a0e9c3f0d37c94ed764b8ac7adae2d834dbd20e69f6aca582118e0f55/openai-1.97.1-py3-none-any.whl", hash = "sha256:4e96bbdf672ec3d44968c9ea39d2c375891db1acc1794668d8149d5fa6000606", size = 764380, upload-time = "2025-07-22T13:10:10.689Z" }, + { url = "https://files.pythonhosted.org/packages/92/bc/e52f49940b4e320629da7db09c90a2407a48c612cff397b4b41b7e58cdf9/openai-1.99.3-py3-none-any.whl", hash = "sha256:c786a03f6cddadb5ee42c6d749aa4f6134fe14fdd7d69a667e5e7ce7fd29a719", size = 785776, upload-time = "2025-08-07T20:35:13.653Z" }, ] [[package]] @@ -1541,7 +1541,7 @@ requires-dist = [ { name = "litellm", marker = "extra == 'litellm'", specifier = ">=1.67.4.post1,<2" }, { name = "mcp", marker = "python_full_version >= '3.10'", specifier = ">=1.11.0,<2" }, { name = "numpy", marker = "python_full_version >= '3.10' and extra == 'voice'", specifier = ">=2.2.0,<3" }, - { name = "openai", specifier = ">=1.97.1,<2" }, + { name = "openai", specifier = ">=1.99.3,<2" }, { name = "pydantic", specifier = ">=2.10,<3" }, { name = "requests", specifier = ">=2.0,<3" }, { name = "types-requests", specifier = ">=2.0,<3" },