Skip to content

Commit 369a5b8

Browse files
authored
Merge branch 'main' into feat/stt-event-name-alias
2 parents b9742e1 + 36fa8f3 commit 369a5b8

37 files changed

+2845
-1468
lines changed
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
import asyncio
2+
3+
from agents import Agent, Runner
4+
from agents.extensions.memory.sqlalchemy_session import SQLAlchemySession
5+
6+
7+
async def main():
8+
# Create an agent
9+
agent = Agent(
10+
name="Assistant",
11+
instructions="Reply very concisely.",
12+
)
13+
14+
# Create a session instance with a session ID.
15+
# This example uses an in-memory SQLite database.
16+
# The `create_tables=True` flag is useful for development and testing.
17+
session = SQLAlchemySession.from_url(
18+
"conversation_123",
19+
url="sqlite+aiosqlite:///:memory:",
20+
create_tables=True,
21+
)
22+
23+
print("=== SQLAlchemySession Example ===")
24+
print("The agent will remember previous messages automatically.\n")
25+
26+
# First turn
27+
print("User: What city is the Golden Gate Bridge in?")
28+
result = await Runner.run(
29+
agent,
30+
"What city is the Golden Gate Bridge in?",
31+
session=session,
32+
)
33+
print(f"Assistant: {result.final_output}\n")
34+
35+
# Second turn - the agent will remember the previous conversation
36+
print("User: What state is it in?")
37+
result = await Runner.run(
38+
agent,
39+
"What state is it in?",
40+
session=session,
41+
)
42+
print(f"Assistant: {result.final_output}\n")
43+
44+
print("=== Conversation Complete ===")
45+
46+
47+
if __name__ == "__main__":
48+
# To run this example, you need to install the sqlalchemy extras:
49+
# pip install "agents[sqlalchemy]"
50+
asyncio.run(main())

examples/financial_research_agent/agents/search_agent.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212

1313
search_agent = Agent(
1414
name="FinancialSearchAgent",
15+
model="gpt-4.1",
1516
instructions=INSTRUCTIONS,
1617
tools=[WebSearchTool()],
1718
model_settings=ModelSettings(tool_choice="required"),

examples/handoffs/message_filter.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
from agents import Agent, HandoffInputData, Runner, function_tool, handoff, trace
77
from agents.extensions import handoff_filters
8+
from agents.models import is_gpt_5_default
89

910

1011
@function_tool
@@ -14,6 +15,15 @@ def random_number_tool(max: int) -> int:
1415

1516

1617
def spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:
18+
if is_gpt_5_default():
19+
print("gpt-5 is enabled, so we're not filtering the input history")
20+
# when using gpt-5, removing some of the items could break things, so we do this filtering only for other models
21+
return HandoffInputData(
22+
input_history=handoff_message_data.input_history,
23+
pre_handoff_items=tuple(handoff_message_data.pre_handoff_items),
24+
new_items=tuple(handoff_message_data.new_items),
25+
)
26+
1727
# First, we'll remove any tool-related messages from the message history
1828
handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)
1929

examples/handoffs/message_filter_streaming.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
from agents import Agent, HandoffInputData, Runner, function_tool, handoff, trace
77
from agents.extensions import handoff_filters
8+
from agents.models import is_gpt_5_default
89

910

1011
@function_tool
@@ -14,6 +15,15 @@ def random_number_tool(max: int) -> int:
1415

1516

1617
def spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:
18+
if is_gpt_5_default():
19+
print("gpt-5 is enabled, so we're not filtering the input history")
20+
# when using gpt-5, removing some of the items could break things, so we do this filtering only for other models
21+
return HandoffInputData(
22+
input_history=handoff_message_data.input_history,
23+
pre_handoff_items=tuple(handoff_message_data.pre_handoff_items),
24+
new_items=tuple(handoff_message_data.new_items),
25+
)
26+
1727
# First, we'll remove any tool-related messages from the message history
1828
handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)
1929

examples/hosted_mcp/approvals.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,10 @@ async def main(verbose: bool, stream: bool):
4444
print(f"Got event of type {event.item.__class__.__name__}")
4545
print(f"Done streaming; final result: {result.final_output}")
4646
else:
47-
res = await Runner.run(agent, "Which language is this repo written in?")
47+
res = await Runner.run(
48+
agent,
49+
"Which language is this repo written in? Your MCP server should know what the repo is.",
50+
)
4851
print(res.final_output)
4952

5053
if verbose:

examples/hosted_mcp/simple.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,10 @@ async def main(verbose: bool, stream: bool):
2929
print(f"Got event of type {event.item.__class__.__name__}")
3030
print(f"Done streaming; final result: {result.final_output}")
3131
else:
32-
res = await Runner.run(agent, "Which language is this repo written in?")
32+
res = await Runner.run(
33+
agent,
34+
"Which language is this repo written in? Your MCP server should know what the repo is.",
35+
)
3336
print(res.final_output)
3437
# The repository is primarily written in multiple languages, including Rust and TypeScript...
3538

examples/realtime/app/server.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,12 @@
44
import logging
55
import struct
66
from contextlib import asynccontextmanager
7-
from typing import TYPE_CHECKING, Any, assert_never
7+
from typing import TYPE_CHECKING, Any
88

99
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
1010
from fastapi.responses import FileResponse
1111
from fastapi.staticfiles import StaticFiles
12+
from typing_extensions import assert_never
1213

1314
from agents.realtime import RealtimeRunner, RealtimeSession, RealtimeSessionEvent
1415

@@ -111,6 +112,8 @@ async def _serialize_event(self, event: RealtimeSessionEvent) -> dict[str, Any]:
111112
}
112113
elif event.type == "error":
113114
base_event["error"] = str(event.error) if hasattr(event, "error") else "Unknown error"
115+
elif event.type == "input_audio_timeout_triggered":
116+
pass
114117
else:
115118
assert_never(event)
116119

examples/realtime/cli/demo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def __init__(self) -> None:
5252
# Audio output state for callback system
5353
self.output_queue: queue.Queue[Any] = queue.Queue(maxsize=10) # Buffer more chunks
5454
self.interrupt_event = threading.Event()
55-
self.current_audio_chunk: np.ndarray | None = None # type: ignore
55+
self.current_audio_chunk: np.ndarray[Any, np.dtype[Any]] | None = None
5656
self.chunk_position = 0
5757

5858
def _output_callback(self, outdata, frames: int, time, status) -> None:

examples/reasoning_content/main.py

Lines changed: 17 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,26 @@
11
"""
22
Example demonstrating how to use the reasoning content feature with models that support it.
33
4-
Some models, like deepseek-reasoner, provide a reasoning_content field in addition to the regular content.
4+
Some models, like gpt-5, provide a reasoning_content field in addition to the regular content.
55
This example shows how to access and use this reasoning content from both streaming and non-streaming responses.
66
77
To run this example, you need to:
88
1. Set your OPENAI_API_KEY environment variable
9-
2. Use a model that supports reasoning content (e.g., deepseek-reasoner)
9+
2. Use a model that supports reasoning content (e.g., gpt-5)
1010
"""
1111

1212
import asyncio
1313
import os
1414
from typing import Any, cast
1515

1616
from openai.types.responses import ResponseOutputRefusal, ResponseOutputText
17+
from openai.types.shared.reasoning import Reasoning
1718

1819
from agents import ModelSettings
1920
from agents.models.interface import ModelTracing
2021
from agents.models.openai_provider import OpenAIProvider
2122

22-
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "deepseek-reasoner"
23+
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "gpt-5"
2324

2425

2526
async def stream_with_reasoning_content():
@@ -36,10 +37,11 @@ async def stream_with_reasoning_content():
3637
reasoning_content = ""
3738
regular_content = ""
3839

40+
output_text_already_started = False
3941
async for event in model.stream_response(
4042
system_instructions="You are a helpful assistant that writes creative content.",
4143
input="Write a haiku about recursion in programming",
42-
model_settings=ModelSettings(),
44+
model_settings=ModelSettings(reasoning=Reasoning(effort="medium", summary="detailed")),
4345
tools=[],
4446
output_schema=None,
4547
handoffs=[],
@@ -48,18 +50,16 @@ async def stream_with_reasoning_content():
4850
prompt=None,
4951
):
5052
if event.type == "response.reasoning_summary_text.delta":
51-
print(
52-
f"\033[33m{event.delta}\033[0m", end="", flush=True
53-
) # Yellow for reasoning content
53+
# Yellow for reasoning content
54+
print(f"\033[33m{event.delta}\033[0m", end="", flush=True)
5455
reasoning_content += event.delta
5556
elif event.type == "response.output_text.delta":
56-
print(f"\033[32m{event.delta}\033[0m", end="", flush=True) # Green for regular content
57+
if not output_text_already_started:
58+
print("\n")
59+
output_text_already_started = True
60+
# Green for regular content
61+
print(f"\033[32m{event.delta}\033[0m", end="", flush=True)
5762
regular_content += event.delta
58-
59-
print("\n\nReasoning Content:")
60-
print(reasoning_content)
61-
print("\nRegular Content:")
62-
print(regular_content)
6363
print("\n")
6464

6565

@@ -77,7 +77,7 @@ async def get_response_with_reasoning_content():
7777
response = await model.get_response(
7878
system_instructions="You are a helpful assistant that explains technical concepts clearly.",
7979
input="Explain the concept of recursion in programming",
80-
model_settings=ModelSettings(),
80+
model_settings=ModelSettings(reasoning=Reasoning(effort="medium", summary="detailed")),
8181
tools=[],
8282
output_schema=None,
8383
handoffs=[],
@@ -102,12 +102,10 @@ async def get_response_with_reasoning_content():
102102
refusal_item = cast(Any, content_item)
103103
regular_content = refusal_item.refusal
104104

105-
print("\nReasoning Content:")
105+
print("\n\n### Reasoning Content:")
106106
print(reasoning_content or "No reasoning content provided")
107-
108-
print("\nRegular Content:")
107+
print("\n\n### Regular Content:")
109108
print(regular_content or "No regular content provided")
110-
111109
print("\n")
112110

113111

@@ -118,7 +116,7 @@ async def main():
118116
except Exception as e:
119117
print(f"Error: {e}")
120118
print("\nNote: This example requires a model that supports reasoning content.")
121-
print("You may need to use a specific model like deepseek-reasoner or similar.")
119+
print("You may need to use a specific model like gpt-5 or similar.")
122120

123121

124122
if __name__ == "__main__":

examples/reasoning_content/runner_example.py

Lines changed: 23 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -6,17 +6,18 @@
66
77
To run this example, you need to:
88
1. Set your OPENAI_API_KEY environment variable
9-
2. Use a model that supports reasoning content (e.g., deepseek-reasoner)
9+
2. Use a model that supports reasoning content (e.g., gpt-5)
1010
"""
1111

1212
import asyncio
1313
import os
14-
from typing import Any
1514

16-
from agents import Agent, Runner, trace
15+
from openai.types.shared.reasoning import Reasoning
16+
17+
from agents import Agent, ModelSettings, Runner, trace
1718
from agents.items import ReasoningItem
1819

19-
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "deepseek-reasoner"
20+
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or "gpt-5"
2021

2122

2223
async def main():
@@ -27,6 +28,7 @@ async def main():
2728
name="Reasoning Agent",
2829
instructions="You are a helpful assistant that explains your reasoning step by step.",
2930
model=MODEL_NAME,
31+
model_settings=ModelSettings(reasoning=Reasoning(effort="medium", summary="detailed")),
3032
)
3133

3234
# Example 1: Non-streaming response
@@ -35,53 +37,34 @@ async def main():
3537
result = await Runner.run(
3638
agent, "What is the square root of 841? Please explain your reasoning."
3739
)
38-
3940
# Extract reasoning content from the result items
4041
reasoning_content = None
41-
# RunResult has 'response' attribute which has 'output' attribute
42-
for item in result.response.output: # type: ignore
43-
if isinstance(item, ReasoningItem):
44-
reasoning_content = item.summary[0].text # type: ignore
42+
for item in result.new_items:
43+
if isinstance(item, ReasoningItem) and len(item.raw_item.summary) > 0:
44+
reasoning_content = item.raw_item.summary[0].text
4545
break
4646

47-
print("\nReasoning Content:")
47+
print("\n### Reasoning Content:")
4848
print(reasoning_content or "No reasoning content provided")
49-
50-
print("\nFinal Output:")
49+
print("\n### Final Output:")
5150
print(result.final_output)
5251

5352
# Example 2: Streaming response
5453
with trace("Reasoning Content - Streaming"):
5554
print("\n=== Example 2: Streaming response ===")
56-
print("\nStreaming response:")
57-
58-
# Buffers to collect reasoning and regular content
59-
reasoning_buffer = ""
60-
content_buffer = ""
61-
62-
# RunResultStreaming is async iterable
6355
stream = Runner.run_streamed(agent, "What is 15 x 27? Please explain your reasoning.")
64-
65-
async for event in stream: # type: ignore
66-
if isinstance(event, ReasoningItem):
67-
# This is reasoning content
68-
reasoning_item: Any = event
69-
reasoning_buffer += reasoning_item.summary[0].text
70-
print(
71-
f"\033[33m{reasoning_item.summary[0].text}\033[0m", end="", flush=True
72-
) # Yellow for reasoning
73-
elif hasattr(event, "text"):
74-
# This is regular content
75-
content_buffer += event.text
76-
print(
77-
f"\033[32m{event.text}\033[0m", end="", flush=True
78-
) # Green for regular content
79-
80-
print("\n\nCollected Reasoning Content:")
81-
print(reasoning_buffer)
82-
83-
print("\nCollected Final Answer:")
84-
print(content_buffer)
56+
output_text_already_started = False
57+
async for event in stream.stream_events():
58+
if event.type == "raw_response_event":
59+
if event.data.type == "response.reasoning_summary_text.delta":
60+
print(f"\033[33m{event.data.delta}\033[0m", end="", flush=True)
61+
elif event.data.type == "response.output_text.delta":
62+
if not output_text_already_started:
63+
print("\n")
64+
output_text_already_started = True
65+
print(f"\033[32m{event.data.delta}\033[0m", end="", flush=True)
66+
67+
print("\n")
8568

8669

8770
if __name__ == "__main__":

0 commit comments

Comments
 (0)