1-
2- from typing import Any , List
1+ from typing import Any
32
43import pytest
54
5+ # Types from the openai library used by the SDK
6+ from openai .types .responses import ResponseFunctionToolCall , ResponseOutputMessage
7+
68# Core SDK Imports
79from agents .agent import Agent
8- from agents .run import Runner
9- from agents .lifecycle import AgentHooks
10- from agents .tool import Tool , function_tool , FunctionTool
1110from agents .items import ModelResponse
12- from agents .usage import Usage , InputTokensDetails , OutputTokensDetails
11+ from agents .lifecycle import AgentHooks
1312from agents .models .interface import Model
13+ from agents .run import Runner
14+ from agents .tool import Tool , function_tool
15+ from agents .usage import InputTokensDetails , OutputTokensDetails , Usage
1416
15- # Types from the openai library used by the SDK
16- from openai .types .responses import ResponseFunctionToolCall , ResponseOutputMessage
1717
1818# --- 1. Spy Hook Implementation ---
1919class LoggingAgentHooks (AgentHooks [Any ]):
2020 def __init__ (self ):
2121 super ().__init__ ()
22- self .called_hooks : List [str ] = []
22+ self .called_hooks : list [str ] = []
2323
2424 # Spy on the NEW hooks
2525 async def on_llm_start (self , * args , ** kwargs ):
@@ -41,9 +41,11 @@ async def on_tool_start(self, *args, **kwargs):
4141 async def on_tool_end (self , * args , ** kwargs ):
4242 self .called_hooks .append ("on_tool_end" )
4343
44+
4445# --- 2. Mock Model and Tools ---
4546class MockModel (Model ):
4647 """A mock model that can be configured to either return a chat message or a tool call."""
48+
4749 def __init__ (self ):
4850 self ._call_count = 0
4951 self ._should_call_tool = False
@@ -59,54 +61,77 @@ def configure_for_chat(self):
5961
6062 async def get_response (self , * args , ** kwargs ) -> ModelResponse :
6163 self ._call_count += 1
62- response_items : List [Any ] = []
64+ response_items : list [Any ] = []
6365
6466 if self ._should_call_tool and self ._call_count == 1 :
6567 response_items .append (
66- ResponseFunctionToolCall (name = self ._tool_to_call .name , arguments = '{}' , call_id = "call123" , type = "function_call" )
68+ ResponseFunctionToolCall (
69+ name = self ._tool_to_call .name ,
70+ arguments = "{}" ,
71+ call_id = "call123" ,
72+ type = "function_call" ,
73+ )
6774 )
6875 else :
6976 response_items .append (
70- ResponseOutputMessage (id = "msg1" , content = [{"type" :"output_text" , "text" :"Mock response" , "annotations" :[]}], role = "assistant" , status = "completed" , type = "message" )
77+ ResponseOutputMessage (
78+ id = "msg1" ,
79+ content = [{"type" : "output_text" , "text" : "Mock response" , "annotations" : []}],
80+ role = "assistant" ,
81+ status = "completed" ,
82+ type = "message" ,
83+ )
7184 )
72-
85+
7386 mock_usage = Usage (
74- requests = 1 , input_tokens = 10 , output_tokens = 10 , total_tokens = 20 ,
87+ requests = 1 ,
88+ input_tokens = 10 ,
89+ output_tokens = 10 ,
90+ total_tokens = 20 ,
7591 input_tokens_details = InputTokensDetails (cached_tokens = 0 ),
76- output_tokens_details = OutputTokensDetails (reasoning_tokens = 0 )
92+ output_tokens_details = OutputTokensDetails (reasoning_tokens = 0 ),
7793 )
7894 return ModelResponse (output = response_items , usage = mock_usage , response_id = "resp123" )
7995
8096 async def stream_response (self , * args , ** kwargs ):
8197 final_response = await self .get_response (* args , ** kwargs )
8298 from openai .types .responses import ResponseCompletedEvent
99+
83100 class MockSDKResponse :
84- def __init__ (self , id , output , usage ): self .id , self .output , self .usage = id , output , usage
85- yield ResponseCompletedEvent (response = MockSDKResponse (final_response .response_id , final_response .output , final_response .usage ), type = "response_completed" )
101+ def __init__ (self , id , output , usage ):
102+ self .id , self .output , self .usage = id , output , usage
103+
104+ yield ResponseCompletedEvent (
105+ response = MockSDKResponse (
106+ final_response .response_id , final_response .output , final_response .usage
107+ ),
108+ type = "response_completed" ,
109+ )
110+
86111
87112@function_tool
88113def mock_tool (a : int , b : int ) -> int :
89114 """A mock tool for testing tool call hooks."""
90115 return a + b
91116
117+
92118# --- 3. Pytest Fixtures for Test Setup ---
93119@pytest .fixture
94120def logging_hooks () -> LoggingAgentHooks :
95121 """Provides a fresh instance of LoggingAgentHooks for each test."""
96122 return LoggingAgentHooks ()
97123
124+
98125@pytest .fixture
99126def chat_agent (logging_hooks : LoggingAgentHooks ) -> Agent :
100127 """Provides an agent configured for a simple chat interaction."""
101128 mock_model = MockModel ()
102129 mock_model .configure_for_chat ()
103130 return Agent (
104- name = "ChatAgent" ,
105- instructions = "Test agent for chat." ,
106- model = mock_model ,
107- hooks = logging_hooks
131+ name = "ChatAgent" , instructions = "Test agent for chat." , model = mock_model , hooks = logging_hooks
108132 )
109133
134+
110135@pytest .fixture
111136def tool_agent (logging_hooks : LoggingAgentHooks ) -> Agent :
112137 """Provides an agent configured to use a tool."""
@@ -117,21 +142,20 @@ def tool_agent(logging_hooks: LoggingAgentHooks) -> Agent:
117142 instructions = "Test agent for tools." ,
118143 model = mock_model ,
119144 hooks = logging_hooks ,
120- tools = [mock_tool ]
145+ tools = [mock_tool ],
121146 )
122147
148+
123149# --- 4. Test Cases Focused on New Hooks ---
124150@pytest .mark .asyncio
125- async def test_llm_hooks_fire_in_chat_scenario (
126- chat_agent : Agent , logging_hooks : LoggingAgentHooks
127- ):
151+ async def test_llm_hooks_fire_in_chat_scenario (chat_agent : Agent , logging_hooks : LoggingAgentHooks ):
128152 """
129153 Tests that on_llm_start and on_llm_end fire correctly for a chat-only turn.
130154 """
131155 await Runner .run (chat_agent , "Hello" )
132-
156+
133157 sequence = logging_hooks .called_hooks
134-
158+
135159 expected_sequence = [
136160 "on_start" ,
137161 "on_llm_start" ,
@@ -140,6 +164,7 @@ async def test_llm_hooks_fire_in_chat_scenario(
140164 ]
141165 assert sequence == expected_sequence
142166
167+
143168@pytest .mark .asyncio
144169async def test_llm_hooks_wrap_tool_hooks_in_tool_scenario (
145170 tool_agent : Agent , logging_hooks : LoggingAgentHooks
@@ -159,10 +184,11 @@ async def test_llm_hooks_wrap_tool_hooks_in_tool_scenario(
159184 "on_tool_end" ,
160185 "on_llm_start" ,
161186 "on_llm_end" ,
162- "on_end"
187+ "on_end" ,
163188 ]
164189 assert sequence == expected_sequence
165190
191+
166192@pytest .mark .asyncio
167193async def test_no_hooks_run_if_hooks_is_none ():
168194 """
@@ -171,13 +197,10 @@ async def test_no_hooks_run_if_hooks_is_none():
171197 mock_model = MockModel ()
172198 mock_model .configure_for_chat ()
173199 agent_no_hooks = Agent (
174- name = "NoHooksAgent" ,
175- instructions = "Test agent without hooks." ,
176- model = mock_model ,
177- hooks = None
200+ name = "NoHooksAgent" , instructions = "Test agent without hooks." , model = mock_model , hooks = None
178201 )
179-
202+
180203 try :
181204 await Runner .run (agent_no_hooks , "Hello" )
182205 except Exception as e :
183- pytest .fail (f"Runner.run failed when agent.hooks was None: { e } " )
206+ pytest .fail (f"Runner.run failed when agent.hooks was None: { e } " )
0 commit comments