4
4
import pytest
5
5
6
6
from agents .agent import Agent
7
- from agents .items import ModelResponse , TResponseInputItem
7
+ from agents .items import ItemHelpers , ModelResponse , TResponseInputItem
8
8
from agents .lifecycle import AgentHooks
9
9
from agents .run import Runner
10
10
from agents .run_context import RunContextWrapper , TContext
@@ -63,7 +63,7 @@ async def on_llm_start(
63
63
64
64
async def on_llm_end (
65
65
self ,
66
- ccontext : RunContextWrapper [TContext ],
66
+ context : RunContextWrapper [TContext ],
67
67
agent : Agent [TContext ],
68
68
response : ModelResponse ,
69
69
) -> None :
@@ -72,7 +72,7 @@ async def on_llm_end(
72
72
73
73
# Example test using the above hooks:
74
74
@pytest .mark .asyncio
75
- async def test_non_streamed_agent_hooks_with_llm ():
75
+ async def test_async_agent_hooks_with_llm ():
76
76
hooks = AgentHooksForTests ()
77
77
model = FakeModel ()
78
78
agent = Agent (
@@ -81,5 +81,50 @@ async def test_non_streamed_agent_hooks_with_llm():
81
81
# Simulate a single LLM call producing an output:
82
82
model .set_next_output ([get_text_message ("hello" )])
83
83
await Runner .run (agent , input = "hello" )
84
+ # Expect one on_start, one on_llm_start, one on_llm_end, and one on_end
85
+ assert hooks .events == {"on_start" : 1 , "on_llm_start" : 1 , "on_llm_end" : 1 , "on_end" : 1 }
86
+
87
+
88
+ # test_sync_agent_hook_with_llm()
89
+ def test_sync_agent_hook_with_llm ():
90
+ hooks = AgentHooksForTests ()
91
+ model = FakeModel ()
92
+ agent = Agent (
93
+ name = "A" , model = model , tools = [get_function_tool ("f" , "res" )], handoffs = [], hooks = hooks
94
+ )
95
+ # Simulate a single LLM call producing an output:
96
+ model .set_next_output ([get_text_message ("hello" )])
97
+ Runner .run_sync (agent , input = "hello" )
98
+ # Expect one on_start, one on_llm_start, one on_llm_end, and one on_end
99
+ assert hooks .events == {"on_start" : 1 , "on_llm_start" : 1 , "on_llm_end" : 1 , "on_end" : 1 }
100
+
101
+
102
+ # test_streamed_agent_hooks_with_llm():
103
+ @pytest .mark .asyncio
104
+ async def test_streamed_agent_hooks_with_llm ():
105
+ hooks = AgentHooksForTests ()
106
+ model = FakeModel ()
107
+ agent = Agent (
108
+ name = "A" , model = model , tools = [get_function_tool ("f" , "res" )], handoffs = [], hooks = hooks
109
+ )
110
+ # Simulate a single LLM call producing an output:
111
+ model .set_next_output ([get_text_message ("hello" )])
112
+ stream = Runner .run_streamed (agent , input = "hello" )
113
+
114
+ async for event in stream .stream_events ():
115
+ if event .type == "raw_response_event" :
116
+ continue
117
+ if event .type == "agent_updated_stream_event" :
118
+ print (f"[EVENT] agent_updated → { event .new_agent .name } " )
119
+ elif event .type == "run_item_stream_event" :
120
+ item = event .item
121
+ if item .type == "tool_call_item" :
122
+ print ("[EVENT] tool_call_item" )
123
+ elif item .type == "tool_call_output_item" :
124
+ print (f"[EVENT] tool_call_output_item → { item .output } " )
125
+ elif item .type == "message_output_item" :
126
+ text = ItemHelpers .text_message_output (item )
127
+ print (f"[EVENT] message_output_item → { text } " )
128
+
84
129
# Expect one on_start, one on_llm_start, one on_llm_end, and one on_end
85
130
assert hooks .events == {"on_start" : 1 , "on_llm_start" : 1 , "on_llm_end" : 1 , "on_end" : 1 }
0 commit comments