Skip to content

Commit 83876d5

Browse files
committed
feat: add streaming and tests to new hooks
1 parent e03a144 commit 83876d5

File tree

2 files changed

+61
-7
lines changed

2 files changed

+61
-7
lines changed

src/agents/run.py

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -935,14 +935,19 @@ async def _run_single_turn_streamed(
935935
input = ItemHelpers.input_to_new_input_list(streamed_result.input)
936936
input.extend([item.to_input_item() for item in streamed_result.new_items])
937937

938+
# THIS IS THE RESOLVED CONFLICT BLOCK
938939
filtered = await cls._maybe_filter_model_input(
939940
agent=agent,
940941
run_config=run_config,
941942
context_wrapper=context_wrapper,
942943
input_items=input,
943944
system_instructions=system_prompt,
944945
)
945-
946+
947+
# Call hook just before the model is invoked, with the correct system_prompt.
948+
if agent.hooks:
949+
await agent.hooks.on_llm_start(context_wrapper, agent, filtered.instructions, filtered.input)
950+
946951
# 1. Stream the output events
947952
async for event in model.stream_response(
948953
filtered.instructions,
@@ -979,6 +984,10 @@ async def _run_single_turn_streamed(
979984

980985
streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
981986

987+
# Call hook just after the model response is finalized.
988+
if agent.hooks:
989+
await agent.hooks.on_llm_end(context_wrapper, agent, final_response)
990+
982991
# 2. At this point, the streaming is complete for this turn of the agent loop.
983992
if not final_response:
984993
raise ModelBehaviorError("Model did not produce a final response!")
@@ -1257,8 +1266,8 @@ async def _get_new_response(
12571266
await agent.hooks.on_llm_start(
12581267
context_wrapper,
12591268
agent,
1260-
system_prompt,
1261-
input
1269+
filtered.instructions, # Use filtered instructions
1270+
filtered.input # Use filtered input
12621271
)
12631272

12641273
new_response = await model.get_response(
@@ -1395,4 +1404,4 @@ async def _save_result_to_session(
13951404
def _copy_str_or_list(input: str | list[TResponseInputItem]) -> str | list[TResponseInputItem]:
13961405
if isinstance(input, str):
13971406
return input
1398-
return input.copy()
1407+
return input.copy()

tests/test_agent_llm_hooks.py

Lines changed: 48 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import pytest
55

66
from agents.agent import Agent
7-
from agents.items import ModelResponse, TResponseInputItem
7+
from agents.items import ItemHelpers, ModelResponse, TResponseInputItem
88
from agents.lifecycle import AgentHooks
99
from agents.run import Runner
1010
from agents.run_context import RunContextWrapper, TContext
@@ -63,7 +63,7 @@ async def on_llm_start(
6363

6464
async def on_llm_end(
6565
self,
66-
ccontext: RunContextWrapper[TContext],
66+
context: RunContextWrapper[TContext],
6767
agent: Agent[TContext],
6868
response: ModelResponse,
6969
) -> None:
@@ -72,7 +72,7 @@ async def on_llm_end(
7272

7373
# Example test using the above hooks:
7474
@pytest.mark.asyncio
75-
async def test_non_streamed_agent_hooks_with_llm():
75+
async def test_async_agent_hooks_with_llm():
7676
hooks = AgentHooksForTests()
7777
model = FakeModel()
7878
agent = Agent(
@@ -81,5 +81,50 @@ async def test_non_streamed_agent_hooks_with_llm():
8181
# Simulate a single LLM call producing an output:
8282
model.set_next_output([get_text_message("hello")])
8383
await Runner.run(agent, input="hello")
84+
# Expect one on_start, one on_llm_start, one on_llm_end, and one on_end
85+
assert hooks.events == {"on_start": 1, "on_llm_start": 1, "on_llm_end": 1, "on_end": 1}
86+
87+
88+
# test_sync_agent_hook_with_llm()
89+
def test_sync_agent_hook_with_llm():
90+
hooks = AgentHooksForTests()
91+
model = FakeModel()
92+
agent = Agent(
93+
name="A", model=model, tools=[get_function_tool("f", "res")], handoffs=[], hooks=hooks
94+
)
95+
# Simulate a single LLM call producing an output:
96+
model.set_next_output([get_text_message("hello")])
97+
Runner.run_sync(agent, input="hello")
98+
# Expect one on_start, one on_llm_start, one on_llm_end, and one on_end
99+
assert hooks.events == {"on_start": 1, "on_llm_start": 1, "on_llm_end": 1, "on_end": 1}
100+
101+
102+
# test_streamed_agent_hooks_with_llm():
103+
@pytest.mark.asyncio
104+
async def test_streamed_agent_hooks_with_llm():
105+
hooks = AgentHooksForTests()
106+
model = FakeModel()
107+
agent = Agent(
108+
name="A", model=model, tools=[get_function_tool("f", "res")], handoffs=[], hooks=hooks
109+
)
110+
# Simulate a single LLM call producing an output:
111+
model.set_next_output([get_text_message("hello")])
112+
stream = Runner.run_streamed(agent, input="hello")
113+
114+
async for event in stream.stream_events():
115+
if event.type == "raw_response_event":
116+
continue
117+
if event.type == "agent_updated_stream_event":
118+
print(f"[EVENT] agent_updated → {event.new_agent.name}")
119+
elif event.type == "run_item_stream_event":
120+
item = event.item
121+
if item.type == "tool_call_item":
122+
print("[EVENT] tool_call_item")
123+
elif item.type == "tool_call_output_item":
124+
print(f"[EVENT] tool_call_output_item → {item.output}")
125+
elif item.type == "message_output_item":
126+
text = ItemHelpers.text_message_output(item)
127+
print(f"[EVENT] message_output_item → {text}")
128+
84129
# Expect one on_start, one on_llm_start, one on_llm_end, and one on_end
85130
assert hooks.events == {"on_start": 1, "on_llm_start": 1, "on_llm_end": 1, "on_end": 1}

0 commit comments

Comments
 (0)