Skip to content

Commit 46b4624

Browse files
authored
Merge branch 'main' into fix/issue-879-mcp-http-error-handling
2 parents a8fb751 + 4bc33e3 commit 46b4624

File tree

18 files changed

+840
-45
lines changed

18 files changed

+840
-45
lines changed

.github/workflows/tests.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@ jobs:
2222
enable-cache: true
2323
- name: Install dependencies
2424
run: make sync
25+
- name: Verify formatting
26+
run: make format-check
2527
- name: Run lint
2628
run: make lint
2729

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# OpenAI Agents SDK
1+
# OpenAI Agents SDK [![PyPI](https://img.shields.io/pypi/v/openai-agents?label=pypi%20package)](https://pypi.org/project/openai-agents/)
22

33
The OpenAI Agents SDK is a lightweight yet powerful framework for building multi-agent workflows. It is provider-agnostic, supporting the OpenAI Responses and Chat Completions APIs, as well as 100+ other LLMs.
44

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "openai-agents"
3-
version = "0.4.0"
3+
version = "0.4.1"
44
description = "OpenAI Agents SDK"
55
readme = "README.md"
66
requires-python = ">=3.9"

src/agents/extensions/memory/sqlalchemy_session.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -319,3 +319,16 @@ async def clear_session(self) -> None:
319319
await sess.execute(
320320
delete(self._sessions).where(self._sessions.c.session_id == self.session_id)
321321
)
322+
323+
@property
324+
def engine(self) -> AsyncEngine:
325+
"""Access the underlying SQLAlchemy AsyncEngine.
326+
327+
This property provides direct access to the engine for advanced use cases,
328+
such as checking connection pool status, configuring engine settings,
329+
or manually disposing the engine when needed.
330+
331+
Returns:
332+
AsyncEngine: The SQLAlchemy async engine instance.
333+
"""
334+
return self._engine

src/agents/extensions/models/litellm_model.py

Lines changed: 28 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444
from ...models.chatcmpl_stream_handler import ChatCmplStreamHandler
4545
from ...models.fake_id import FAKE_RESPONSES_ID
4646
from ...models.interface import Model, ModelTracing
47+
from ...models.openai_responses import Converter as OpenAIResponsesConverter
4748
from ...tool import Tool
4849
from ...tracing import generation_span
4950
from ...tracing.span_data import GenerationSpanData
@@ -325,6 +326,23 @@ async def _fetch_response(
325326
)
326327

327328
reasoning_effort = model_settings.reasoning.effort if model_settings.reasoning else None
329+
# Enable developers to pass non-OpenAI compatible reasoning_effort data like "none"
330+
# Priority order:
331+
# 1. model_settings.reasoning.effort
332+
# 2. model_settings.extra_body["reasoning_effort"]
333+
# 3. model_settings.extra_args["reasoning_effort"]
334+
if (
335+
reasoning_effort is None # Unset in model_settings
336+
and isinstance(model_settings.extra_body, dict)
337+
and "reasoning_effort" in model_settings.extra_body
338+
):
339+
reasoning_effort = model_settings.extra_body["reasoning_effort"]
340+
if (
341+
reasoning_effort is None # Unset in both model_settings and model_settings.extra_body
342+
and model_settings.extra_args
343+
and "reasoning_effort" in model_settings.extra_args
344+
):
345+
reasoning_effort = model_settings.extra_args["reasoning_effort"]
328346

329347
stream_options = None
330348
if stream and model_settings.include_usage is not None:
@@ -342,6 +360,9 @@ async def _fetch_response(
342360
if model_settings.extra_args:
343361
extra_kwargs.update(model_settings.extra_args)
344362

363+
# Prevent duplicate reasoning_effort kwargs when it was promoted to a top-level argument.
364+
extra_kwargs.pop("reasoning_effort", None)
365+
345366
ret = await litellm.acompletion(
346367
model=self.model,
347368
messages=converted_messages,
@@ -367,15 +388,19 @@ async def _fetch_response(
367388
if isinstance(ret, litellm.types.utils.ModelResponse):
368389
return ret
369390

391+
responses_tool_choice = OpenAIResponsesConverter.convert_tool_choice(
392+
model_settings.tool_choice
393+
)
394+
if responses_tool_choice is None or responses_tool_choice is omit:
395+
responses_tool_choice = "auto"
396+
370397
response = Response(
371398
id=FAKE_RESPONSES_ID,
372399
created_at=time.time(),
373400
model=self.model,
374401
object="response",
375402
output=[],
376-
tool_choice=cast(Literal["auto", "required", "none"], tool_choice)
377-
if tool_choice is not omit
378-
else "auto",
403+
tool_choice=responses_tool_choice, # type: ignore[arg-type]
379404
top_p=model_settings.top_p,
380405
temperature=model_settings.temperature,
381406
tools=[],

src/agents/items.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -361,6 +361,9 @@ def _maybe_get_output_as_structured_function_output(
361361
if isinstance(output, (ToolOutputText, ToolOutputImage, ToolOutputFileContent)):
362362
return output
363363
elif isinstance(output, dict):
364+
# Require explicit 'type' field in dict to be considered a structured output
365+
if "type" not in output:
366+
return None
364367
try:
365368
return ValidToolOutputPydanticModelsTypeAdapter.validate_python(output)
366369
except pydantic.ValidationError:

src/agents/realtime/model_inputs.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,9 @@ class RealtimeModelSendToolOutput:
9595
class RealtimeModelSendInterrupt:
9696
"""Send an interrupt to the model."""
9797

98+
force_response_cancel: bool = False
99+
"""Force sending a response.cancel event even if automatic cancellation is enabled."""
100+
98101

99102
@dataclass
100103
class RealtimeModelSendSessionUpdate:

src/agents/realtime/openai_realtime.py

Lines changed: 30 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -395,36 +395,36 @@ async def _send_interrupt(self, event: RealtimeModelSendInterrupt) -> None:
395395
current_item_id = playback_state.get("current_item_id")
396396
current_item_content_index = playback_state.get("current_item_content_index")
397397
elapsed_ms = playback_state.get("elapsed_ms")
398+
398399
if current_item_id is None or elapsed_ms is None:
399400
logger.debug(
400401
"Skipping interrupt. "
401402
f"Item id: {current_item_id}, "
402403
f"elapsed ms: {elapsed_ms}, "
403404
f"content index: {current_item_content_index}"
404405
)
405-
return
406-
407-
current_item_content_index = current_item_content_index or 0
408-
if elapsed_ms > 0:
409-
await self._emit_event(
410-
RealtimeModelAudioInterruptedEvent(
411-
item_id=current_item_id,
412-
content_index=current_item_content_index,
413-
)
414-
)
415-
converted = _ConversionHelper.convert_interrupt(
416-
current_item_id,
417-
current_item_content_index,
418-
int(elapsed_ms),
419-
)
420-
await self._send_raw_message(converted)
421406
else:
422-
logger.debug(
423-
"Didn't interrupt bc elapsed ms is < 0. "
424-
f"Item id: {current_item_id}, "
425-
f"elapsed ms: {elapsed_ms}, "
426-
f"content index: {current_item_content_index}"
427-
)
407+
current_item_content_index = current_item_content_index or 0
408+
if elapsed_ms > 0:
409+
await self._emit_event(
410+
RealtimeModelAudioInterruptedEvent(
411+
item_id=current_item_id,
412+
content_index=current_item_content_index,
413+
)
414+
)
415+
converted = _ConversionHelper.convert_interrupt(
416+
current_item_id,
417+
current_item_content_index,
418+
int(elapsed_ms),
419+
)
420+
await self._send_raw_message(converted)
421+
else:
422+
logger.debug(
423+
"Didn't interrupt bc elapsed ms is < 0. "
424+
f"Item id: {current_item_id}, "
425+
f"elapsed ms: {elapsed_ms}, "
426+
f"content index: {current_item_content_index}"
427+
)
428428

429429
session = self._created_session
430430
automatic_response_cancellation_enabled = (
@@ -434,12 +434,16 @@ async def _send_interrupt(self, event: RealtimeModelSendInterrupt) -> None:
434434
and session.audio.input.turn_detection is not None
435435
and session.audio.input.turn_detection.interrupt_response is True
436436
)
437-
if not automatic_response_cancellation_enabled:
437+
should_cancel_response = event.force_response_cancel or (
438+
not automatic_response_cancellation_enabled
439+
)
440+
if should_cancel_response:
438441
await self._cancel_response()
439442

440-
self._audio_state_tracker.on_interrupted()
441-
if self._playback_tracker:
442-
self._playback_tracker.on_interrupted()
443+
if current_item_id is not None and elapsed_ms is not None:
444+
self._audio_state_tracker.on_interrupted()
445+
if self._playback_tracker:
446+
self._playback_tracker.on_interrupted()
443447

444448
async def _send_session_update(self, event: RealtimeModelSendSessionUpdate) -> None:
445449
"""Send a session update to the model."""

src/agents/realtime/session.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -704,7 +704,7 @@ async def _run_output_guardrails(self, text: str, response_id: str) -> bool:
704704
)
705705

706706
# Interrupt the model
707-
await self._model.send_event(RealtimeModelSendInterrupt())
707+
await self._model.send_event(RealtimeModelSendInterrupt(force_response_cancel=True))
708708

709709
# Send guardrail triggered message
710710
guardrail_names = [result.guardrail.get_name() for result in triggered_results]

src/agents/run.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1138,6 +1138,15 @@ async def _start_streaming(
11381138

11391139
streamed_result.is_complete = True
11401140
finally:
1141+
if streamed_result._input_guardrails_task:
1142+
try:
1143+
await AgentRunner._input_guardrail_tripwire_triggered_for_stream(
1144+
streamed_result
1145+
)
1146+
except Exception as e:
1147+
logger.debug(
1148+
f"Error in streamed_result finalize for agent {current_agent.name} - {e}"
1149+
)
11411150
if current_span:
11421151
current_span.finish(reset_current=True)
11431152
if streamed_result.trace:

0 commit comments

Comments
 (0)