1+
2+ from typing import Any , List
3+
4+ import pytest
5+
6+ # Core SDK Imports
7+ from agents .agent import Agent
8+ from agents .run import Runner
9+ from agents .lifecycle import AgentHooks
10+ from agents .tool import Tool , function_tool , FunctionTool
11+ from agents .items import ModelResponse
12+ from agents .usage import Usage , InputTokensDetails , OutputTokensDetails
13+ from agents .models .interface import Model
14+
15+ # Types from the openai library used by the SDK
16+ from openai .types .responses import ResponseFunctionToolCall , ResponseOutputMessage
17+
18+ # --- 1. Spy Hook Implementation ---
19+ class LoggingAgentHooks (AgentHooks [Any ]):
20+ def __init__ (self ):
21+ super ().__init__ ()
22+ self .called_hooks : List [str ] = []
23+
24+ # Spy on the NEW hooks
25+ async def on_llm_start (self , * args , ** kwargs ):
26+ self .called_hooks .append ("on_llm_start" )
27+
28+ async def on_llm_end (self , * args , ** kwargs ):
29+ self .called_hooks .append ("on_llm_end" )
30+
31+ # Spy on EXISTING hooks to serve as landmarks for sequence verification
32+ async def on_start (self , * args , ** kwargs ):
33+ self .called_hooks .append ("on_start" )
34+
35+ async def on_end (self , * args , ** kwargs ):
36+ self .called_hooks .append ("on_end" )
37+
38+ async def on_tool_start (self , * args , ** kwargs ):
39+ self .called_hooks .append ("on_tool_start" )
40+
41+ async def on_tool_end (self , * args , ** kwargs ):
42+ self .called_hooks .append ("on_tool_end" )
43+
44+ # --- 2. Mock Model and Tools ---
45+ class MockModel (Model ):
46+ """A mock model that can be configured to either return a chat message or a tool call."""
47+ def __init__ (self ):
48+ self ._call_count = 0
49+ self ._should_call_tool = False
50+ self ._tool_to_call : Tool | None = None
51+
52+ def configure_for_tool_call (self , tool : Tool ):
53+ self ._should_call_tool = True
54+ self ._tool_to_call = tool
55+
56+ def configure_for_chat (self ):
57+ self ._should_call_tool = False
58+ self ._tool_to_call = None
59+
60+ async def get_response (self , * args , ** kwargs ) -> ModelResponse :
61+ self ._call_count += 1
62+ response_items : List [Any ] = []
63+
64+ if self ._should_call_tool and self ._call_count == 1 :
65+ response_items .append (
66+ ResponseFunctionToolCall (name = self ._tool_to_call .name , arguments = '{}' , call_id = "call123" , type = "function_call" )
67+ )
68+ else :
69+ response_items .append (
70+ ResponseOutputMessage (id = "msg1" , content = [{"type" :"output_text" , "text" :"Mock response" , "annotations" :[]}], role = "assistant" , status = "completed" , type = "message" )
71+ )
72+
73+ mock_usage = Usage (
74+ requests = 1 , input_tokens = 10 , output_tokens = 10 , total_tokens = 20 ,
75+ input_tokens_details = InputTokensDetails (cached_tokens = 0 ),
76+ output_tokens_details = OutputTokensDetails (reasoning_tokens = 0 )
77+ )
78+ return ModelResponse (output = response_items , usage = mock_usage , response_id = "resp123" )
79+
80+ async def stream_response (self , * args , ** kwargs ):
81+ final_response = await self .get_response (* args , ** kwargs )
82+ from openai .types .responses import ResponseCompletedEvent
83+ class MockSDKResponse :
84+ def __init__ (self , id , output , usage ): self .id , self .output , self .usage = id , output , usage
85+ yield ResponseCompletedEvent (response = MockSDKResponse (final_response .response_id , final_response .output , final_response .usage ), type = "response_completed" )
86+
87+ @function_tool
88+ def mock_tool (a : int , b : int ) -> int :
89+ """A mock tool for testing tool call hooks."""
90+ return a + b
91+
92+ # --- 3. Pytest Fixtures for Test Setup ---
93+ @pytest .fixture
94+ def logging_hooks () -> LoggingAgentHooks :
95+ """Provides a fresh instance of LoggingAgentHooks for each test."""
96+ return LoggingAgentHooks ()
97+
98+ @pytest .fixture
99+ def chat_agent (logging_hooks : LoggingAgentHooks ) -> Agent :
100+ """Provides an agent configured for a simple chat interaction."""
101+ mock_model = MockModel ()
102+ mock_model .configure_for_chat ()
103+ return Agent (
104+ name = "ChatAgent" ,
105+ instructions = "Test agent for chat." ,
106+ model = mock_model ,
107+ hooks = logging_hooks
108+ )
109+
110+ @pytest .fixture
111+ def tool_agent (logging_hooks : LoggingAgentHooks ) -> Agent :
112+ """Provides an agent configured to use a tool."""
113+ mock_model = MockModel ()
114+ mock_model .configure_for_tool_call (mock_tool )
115+ return Agent (
116+ name = "ToolAgent" ,
117+ instructions = "Test agent for tools." ,
118+ model = mock_model ,
119+ hooks = logging_hooks ,
120+ tools = [mock_tool ]
121+ )
122+
123+ # --- 4. Test Cases Focused on New Hooks ---
124+ @pytest .mark .asyncio
125+ async def test_llm_hooks_fire_in_chat_scenario (
126+ chat_agent : Agent , logging_hooks : LoggingAgentHooks
127+ ):
128+ """
129+ Tests that on_llm_start and on_llm_end fire correctly for a chat-only turn.
130+ """
131+ await Runner .run (chat_agent , "Hello" )
132+
133+ sequence = logging_hooks .called_hooks
134+
135+ expected_sequence = [
136+ "on_start" ,
137+ "on_llm_start" ,
138+ "on_llm_end" ,
139+ "on_end" ,
140+ ]
141+ assert sequence == expected_sequence
142+
143+ @pytest .mark .asyncio
144+ async def test_llm_hooks_wrap_tool_hooks_in_tool_scenario (
145+ tool_agent : Agent , logging_hooks : LoggingAgentHooks
146+ ):
147+ """
148+ Tests that on_llm_start and on_llm_end wrap the tool execution cycle.
149+ """
150+ await Runner .run (tool_agent , "Use your tool" )
151+
152+ sequence = logging_hooks .called_hooks
153+
154+ expected_sequence = [
155+ "on_start" ,
156+ "on_llm_start" ,
157+ "on_llm_end" ,
158+ "on_tool_start" ,
159+ "on_tool_end" ,
160+ "on_llm_start" ,
161+ "on_llm_end" ,
162+ "on_end"
163+ ]
164+ assert sequence == expected_sequence
165+
166+ @pytest .mark .asyncio
167+ async def test_no_hooks_run_if_hooks_is_none ():
168+ """
169+ Ensures that the agent runs without error when agent.hooks is None.
170+ """
171+ mock_model = MockModel ()
172+ mock_model .configure_for_chat ()
173+ agent_no_hooks = Agent (
174+ name = "NoHooksAgent" ,
175+ instructions = "Test agent without hooks." ,
176+ model = mock_model ,
177+ hooks = None
178+ )
179+
180+ try :
181+ await Runner .run (agent_no_hooks , "Hello" )
182+ except Exception as e :
183+ pytest .fail (f"Runner.run failed when agent.hooks was None: { e } " )
0 commit comments