1
-
2
- from typing import Any , List
1
+ from typing import Any
3
2
4
3
import pytest
5
4
5
+ # Types from the openai library used by the SDK
6
+ from openai .types .responses import ResponseFunctionToolCall , ResponseOutputMessage
7
+
6
8
# Core SDK Imports
7
9
from agents .agent import Agent
8
- from agents .run import Runner
9
- from agents .lifecycle import AgentHooks
10
- from agents .tool import Tool , function_tool , FunctionTool
11
10
from agents .items import ModelResponse
12
- from agents .usage import Usage , InputTokensDetails , OutputTokensDetails
11
+ from agents .lifecycle import AgentHooks
13
12
from agents .models .interface import Model
13
+ from agents .run import Runner
14
+ from agents .tool import Tool , function_tool
15
+ from agents .usage import InputTokensDetails , OutputTokensDetails , Usage
14
16
15
- # Types from the openai library used by the SDK
16
- from openai .types .responses import ResponseFunctionToolCall , ResponseOutputMessage
17
17
18
18
# --- 1. Spy Hook Implementation ---
19
19
class LoggingAgentHooks (AgentHooks [Any ]):
20
20
def __init__ (self ):
21
21
super ().__init__ ()
22
- self .called_hooks : List [str ] = []
22
+ self .called_hooks : list [str ] = []
23
23
24
24
# Spy on the NEW hooks
25
25
async def on_llm_start (self , * args , ** kwargs ):
@@ -41,9 +41,11 @@ async def on_tool_start(self, *args, **kwargs):
41
41
async def on_tool_end (self , * args , ** kwargs ):
42
42
self .called_hooks .append ("on_tool_end" )
43
43
44
+
44
45
# --- 2. Mock Model and Tools ---
45
46
class MockModel (Model ):
46
47
"""A mock model that can be configured to either return a chat message or a tool call."""
48
+
47
49
def __init__ (self ):
48
50
self ._call_count = 0
49
51
self ._should_call_tool = False
@@ -59,54 +61,77 @@ def configure_for_chat(self):
59
61
60
62
async def get_response (self , * args , ** kwargs ) -> ModelResponse :
61
63
self ._call_count += 1
62
- response_items : List [Any ] = []
64
+ response_items : list [Any ] = []
63
65
64
66
if self ._should_call_tool and self ._call_count == 1 :
65
67
response_items .append (
66
- ResponseFunctionToolCall (name = self ._tool_to_call .name , arguments = '{}' , call_id = "call123" , type = "function_call" )
68
+ ResponseFunctionToolCall (
69
+ name = self ._tool_to_call .name ,
70
+ arguments = "{}" ,
71
+ call_id = "call123" ,
72
+ type = "function_call" ,
73
+ )
67
74
)
68
75
else :
69
76
response_items .append (
70
- ResponseOutputMessage (id = "msg1" , content = [{"type" :"output_text" , "text" :"Mock response" , "annotations" :[]}], role = "assistant" , status = "completed" , type = "message" )
77
+ ResponseOutputMessage (
78
+ id = "msg1" ,
79
+ content = [{"type" : "output_text" , "text" : "Mock response" , "annotations" : []}],
80
+ role = "assistant" ,
81
+ status = "completed" ,
82
+ type = "message" ,
83
+ )
71
84
)
72
-
85
+
73
86
mock_usage = Usage (
74
- requests = 1 , input_tokens = 10 , output_tokens = 10 , total_tokens = 20 ,
87
+ requests = 1 ,
88
+ input_tokens = 10 ,
89
+ output_tokens = 10 ,
90
+ total_tokens = 20 ,
75
91
input_tokens_details = InputTokensDetails (cached_tokens = 0 ),
76
- output_tokens_details = OutputTokensDetails (reasoning_tokens = 0 )
92
+ output_tokens_details = OutputTokensDetails (reasoning_tokens = 0 ),
77
93
)
78
94
return ModelResponse (output = response_items , usage = mock_usage , response_id = "resp123" )
79
95
80
96
async def stream_response (self , * args , ** kwargs ):
81
97
final_response = await self .get_response (* args , ** kwargs )
82
98
from openai .types .responses import ResponseCompletedEvent
99
+
83
100
class MockSDKResponse :
84
- def __init__ (self , id , output , usage ): self .id , self .output , self .usage = id , output , usage
85
- yield ResponseCompletedEvent (response = MockSDKResponse (final_response .response_id , final_response .output , final_response .usage ), type = "response_completed" )
101
+ def __init__ (self , id , output , usage ):
102
+ self .id , self .output , self .usage = id , output , usage
103
+
104
+ yield ResponseCompletedEvent (
105
+ response = MockSDKResponse (
106
+ final_response .response_id , final_response .output , final_response .usage
107
+ ),
108
+ type = "response_completed" ,
109
+ )
110
+
86
111
87
112
@function_tool
88
113
def mock_tool (a : int , b : int ) -> int :
89
114
"""A mock tool for testing tool call hooks."""
90
115
return a + b
91
116
117
+
92
118
# --- 3. Pytest Fixtures for Test Setup ---
93
119
@pytest .fixture
94
120
def logging_hooks () -> LoggingAgentHooks :
95
121
"""Provides a fresh instance of LoggingAgentHooks for each test."""
96
122
return LoggingAgentHooks ()
97
123
124
+
98
125
@pytest .fixture
99
126
def chat_agent (logging_hooks : LoggingAgentHooks ) -> Agent :
100
127
"""Provides an agent configured for a simple chat interaction."""
101
128
mock_model = MockModel ()
102
129
mock_model .configure_for_chat ()
103
130
return Agent (
104
- name = "ChatAgent" ,
105
- instructions = "Test agent for chat." ,
106
- model = mock_model ,
107
- hooks = logging_hooks
131
+ name = "ChatAgent" , instructions = "Test agent for chat." , model = mock_model , hooks = logging_hooks
108
132
)
109
133
134
+
110
135
@pytest .fixture
111
136
def tool_agent (logging_hooks : LoggingAgentHooks ) -> Agent :
112
137
"""Provides an agent configured to use a tool."""
@@ -117,21 +142,20 @@ def tool_agent(logging_hooks: LoggingAgentHooks) -> Agent:
117
142
instructions = "Test agent for tools." ,
118
143
model = mock_model ,
119
144
hooks = logging_hooks ,
120
- tools = [mock_tool ]
145
+ tools = [mock_tool ],
121
146
)
122
147
148
+
123
149
# --- 4. Test Cases Focused on New Hooks ---
124
150
@pytest .mark .asyncio
125
- async def test_llm_hooks_fire_in_chat_scenario (
126
- chat_agent : Agent , logging_hooks : LoggingAgentHooks
127
- ):
151
+ async def test_llm_hooks_fire_in_chat_scenario (chat_agent : Agent , logging_hooks : LoggingAgentHooks ):
128
152
"""
129
153
Tests that on_llm_start and on_llm_end fire correctly for a chat-only turn.
130
154
"""
131
155
await Runner .run (chat_agent , "Hello" )
132
-
156
+
133
157
sequence = logging_hooks .called_hooks
134
-
158
+
135
159
expected_sequence = [
136
160
"on_start" ,
137
161
"on_llm_start" ,
@@ -140,6 +164,7 @@ async def test_llm_hooks_fire_in_chat_scenario(
140
164
]
141
165
assert sequence == expected_sequence
142
166
167
+
143
168
@pytest .mark .asyncio
144
169
async def test_llm_hooks_wrap_tool_hooks_in_tool_scenario (
145
170
tool_agent : Agent , logging_hooks : LoggingAgentHooks
@@ -159,10 +184,11 @@ async def test_llm_hooks_wrap_tool_hooks_in_tool_scenario(
159
184
"on_tool_end" ,
160
185
"on_llm_start" ,
161
186
"on_llm_end" ,
162
- "on_end"
187
+ "on_end" ,
163
188
]
164
189
assert sequence == expected_sequence
165
190
191
+
166
192
@pytest .mark .asyncio
167
193
async def test_no_hooks_run_if_hooks_is_none ():
168
194
"""
@@ -171,13 +197,10 @@ async def test_no_hooks_run_if_hooks_is_none():
171
197
mock_model = MockModel ()
172
198
mock_model .configure_for_chat ()
173
199
agent_no_hooks = Agent (
174
- name = "NoHooksAgent" ,
175
- instructions = "Test agent without hooks." ,
176
- model = mock_model ,
177
- hooks = None
200
+ name = "NoHooksAgent" , instructions = "Test agent without hooks." , model = mock_model , hooks = None
178
201
)
179
-
202
+
180
203
try :
181
204
await Runner .run (agent_no_hooks , "Hello" )
182
205
except Exception as e :
183
- pytest .fail (f"Runner.run failed when agent.hooks was None: { e } " )
206
+ pytest .fail (f"Runner.run failed when agent.hooks was None: { e } " )
0 commit comments