Skip to content

Commit 34e097d

Browse files
committed
style: Apply ruff formatting across project
1 parent 853b595 commit 34e097d

File tree

3 files changed

+67
-56
lines changed

3 files changed

+67
-56
lines changed

src/agents/lifecycle.py

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
1-
from typing import Any, Generic,List
2-
from .items import ModelResponse, TResponseInputItem
1+
from typing import Any, Generic
2+
33
from .agent import Agent
4+
from .items import ModelResponse, TResponseInputItem
45
from .run_context import RunContextWrapper, TContext
56
from .tool import Tool
67

@@ -10,28 +11,24 @@ class RunHooks(Generic[TContext]):
1011
override the methods you need.
1112
"""
1213

13-
#Two new hook methods added to the RunHooks class to handle LLM start and end events.
14-
#These methods allow you to perform actions just before and after the LLM call for an agent.
15-
#This is useful for logging, monitoring, or modifying the context before and after the LLM call.
14+
# Two new hook methods added to the RunHooks class to handle LLM start and end events.
15+
# These methods allow you to perform actions just before and after the LLM call for an agent.
16+
# This is useful for logging, monitoring, or modifying the context before and after the LLM call
1617
async def on_llm_start(
1718
self,
1819
context: RunContextWrapper[TContext],
1920
agent: Agent[TContext],
2021
system_prompt: str | None,
21-
input_items: List[TResponseInputItem]
22+
input_items: list[TResponseInputItem],
2223
) -> None:
2324
"""Called just before invoking the LLM for this agent."""
2425
pass
2526

2627
async def on_llm_end(
27-
self,
28-
context: RunContextWrapper[TContext],
29-
agent: Agent[TContext],
30-
response: ModelResponse
28+
self, context: RunContextWrapper[TContext], agent: Agent[TContext], response: ModelResponse
3129
) -> None:
3230
"""Called immediately after the LLM call returns for this agent."""
3331
pass
34-
3532

3633
async def on_agent_start(
3734
self, context: RunContextWrapper[TContext], agent: Agent[TContext]

src/agents/run.py

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1069,12 +1069,7 @@ async def _get_new_response(
10691069
model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings)
10701070
# If the agent has hooks, we need to call them before and after the LLM call
10711071
if agent.hooks:
1072-
await agent.hooks.on_llm_start(
1073-
context_wrapper,
1074-
agent,
1075-
system_prompt,
1076-
input
1077-
)
1072+
await agent.hooks.on_llm_start(context_wrapper, agent, system_prompt, input)
10781073

10791074
new_response = await model.get_response(
10801075
system_instructions=system_prompt,
@@ -1091,11 +1086,7 @@ async def _get_new_response(
10911086
)
10921087
# If the agent has hooks, we need to call them after the LLM call
10931088
if agent.hooks:
1094-
await agent.hooks.on_llm_end(
1095-
context_wrapper,
1096-
agent,
1097-
new_response
1098-
)
1089+
await agent.hooks.on_llm_end(context_wrapper, agent, new_response)
10991090

11001091
context_wrapper.usage.add(new_response.usage)
11011092

tests/test_agent_llm_hooks.py

Lines changed: 57 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,25 @@
1-
2-
from typing import Any, List
1+
from typing import Any
32

43
import pytest
54

5+
# Types from the openai library used by the SDK
6+
from openai.types.responses import ResponseFunctionToolCall, ResponseOutputMessage
7+
68
# Core SDK Imports
79
from agents.agent import Agent
8-
from agents.run import Runner
9-
from agents.lifecycle import AgentHooks
10-
from agents.tool import Tool, function_tool, FunctionTool
1110
from agents.items import ModelResponse
12-
from agents.usage import Usage, InputTokensDetails, OutputTokensDetails
11+
from agents.lifecycle import AgentHooks
1312
from agents.models.interface import Model
13+
from agents.run import Runner
14+
from agents.tool import Tool, function_tool
15+
from agents.usage import InputTokensDetails, OutputTokensDetails, Usage
1416

15-
# Types from the openai library used by the SDK
16-
from openai.types.responses import ResponseFunctionToolCall, ResponseOutputMessage
1717

1818
# --- 1. Spy Hook Implementation ---
1919
class LoggingAgentHooks(AgentHooks[Any]):
2020
def __init__(self):
2121
super().__init__()
22-
self.called_hooks: List[str] = []
22+
self.called_hooks: list[str] = []
2323

2424
# Spy on the NEW hooks
2525
async def on_llm_start(self, *args, **kwargs):
@@ -41,9 +41,11 @@ async def on_tool_start(self, *args, **kwargs):
4141
async def on_tool_end(self, *args, **kwargs):
4242
self.called_hooks.append("on_tool_end")
4343

44+
4445
# --- 2. Mock Model and Tools ---
4546
class MockModel(Model):
4647
"""A mock model that can be configured to either return a chat message or a tool call."""
48+
4749
def __init__(self):
4850
self._call_count = 0
4951
self._should_call_tool = False
@@ -59,54 +61,77 @@ def configure_for_chat(self):
5961

6062
async def get_response(self, *args, **kwargs) -> ModelResponse:
6163
self._call_count += 1
62-
response_items: List[Any] = []
64+
response_items: list[Any] = []
6365

6466
if self._should_call_tool and self._call_count == 1:
6567
response_items.append(
66-
ResponseFunctionToolCall(name=self._tool_to_call.name, arguments='{}', call_id="call123", type="function_call")
68+
ResponseFunctionToolCall(
69+
name=self._tool_to_call.name,
70+
arguments="{}",
71+
call_id="call123",
72+
type="function_call",
73+
)
6774
)
6875
else:
6976
response_items.append(
70-
ResponseOutputMessage(id="msg1", content=[{"type":"output_text", "text":"Mock response", "annotations":[]}], role="assistant", status="completed", type="message")
77+
ResponseOutputMessage(
78+
id="msg1",
79+
content=[{"type": "output_text", "text": "Mock response", "annotations": []}],
80+
role="assistant",
81+
status="completed",
82+
type="message",
83+
)
7184
)
72-
85+
7386
mock_usage = Usage(
74-
requests=1, input_tokens=10, output_tokens=10, total_tokens=20,
87+
requests=1,
88+
input_tokens=10,
89+
output_tokens=10,
90+
total_tokens=20,
7591
input_tokens_details=InputTokensDetails(cached_tokens=0),
76-
output_tokens_details=OutputTokensDetails(reasoning_tokens=0)
92+
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
7793
)
7894
return ModelResponse(output=response_items, usage=mock_usage, response_id="resp123")
7995

8096
async def stream_response(self, *args, **kwargs):
8197
final_response = await self.get_response(*args, **kwargs)
8298
from openai.types.responses import ResponseCompletedEvent
99+
83100
class MockSDKResponse:
84-
def __init__(self, id, output, usage): self.id, self.output, self.usage = id, output, usage
85-
yield ResponseCompletedEvent(response=MockSDKResponse(final_response.response_id, final_response.output, final_response.usage), type="response_completed")
101+
def __init__(self, id, output, usage):
102+
self.id, self.output, self.usage = id, output, usage
103+
104+
yield ResponseCompletedEvent(
105+
response=MockSDKResponse(
106+
final_response.response_id, final_response.output, final_response.usage
107+
),
108+
type="response_completed",
109+
)
110+
86111

87112
@function_tool
88113
def mock_tool(a: int, b: int) -> int:
89114
"""A mock tool for testing tool call hooks."""
90115
return a + b
91116

117+
92118
# --- 3. Pytest Fixtures for Test Setup ---
93119
@pytest.fixture
94120
def logging_hooks() -> LoggingAgentHooks:
95121
"""Provides a fresh instance of LoggingAgentHooks for each test."""
96122
return LoggingAgentHooks()
97123

124+
98125
@pytest.fixture
99126
def chat_agent(logging_hooks: LoggingAgentHooks) -> Agent:
100127
"""Provides an agent configured for a simple chat interaction."""
101128
mock_model = MockModel()
102129
mock_model.configure_for_chat()
103130
return Agent(
104-
name="ChatAgent",
105-
instructions="Test agent for chat.",
106-
model=mock_model,
107-
hooks=logging_hooks
131+
name="ChatAgent", instructions="Test agent for chat.", model=mock_model, hooks=logging_hooks
108132
)
109133

134+
110135
@pytest.fixture
111136
def tool_agent(logging_hooks: LoggingAgentHooks) -> Agent:
112137
"""Provides an agent configured to use a tool."""
@@ -117,21 +142,20 @@ def tool_agent(logging_hooks: LoggingAgentHooks) -> Agent:
117142
instructions="Test agent for tools.",
118143
model=mock_model,
119144
hooks=logging_hooks,
120-
tools=[mock_tool]
145+
tools=[mock_tool],
121146
)
122147

148+
123149
# --- 4. Test Cases Focused on New Hooks ---
124150
@pytest.mark.asyncio
125-
async def test_llm_hooks_fire_in_chat_scenario(
126-
chat_agent: Agent, logging_hooks: LoggingAgentHooks
127-
):
151+
async def test_llm_hooks_fire_in_chat_scenario(chat_agent: Agent, logging_hooks: LoggingAgentHooks):
128152
"""
129153
Tests that on_llm_start and on_llm_end fire correctly for a chat-only turn.
130154
"""
131155
await Runner.run(chat_agent, "Hello")
132-
156+
133157
sequence = logging_hooks.called_hooks
134-
158+
135159
expected_sequence = [
136160
"on_start",
137161
"on_llm_start",
@@ -140,6 +164,7 @@ async def test_llm_hooks_fire_in_chat_scenario(
140164
]
141165
assert sequence == expected_sequence
142166

167+
143168
@pytest.mark.asyncio
144169
async def test_llm_hooks_wrap_tool_hooks_in_tool_scenario(
145170
tool_agent: Agent, logging_hooks: LoggingAgentHooks
@@ -159,10 +184,11 @@ async def test_llm_hooks_wrap_tool_hooks_in_tool_scenario(
159184
"on_tool_end",
160185
"on_llm_start",
161186
"on_llm_end",
162-
"on_end"
187+
"on_end",
163188
]
164189
assert sequence == expected_sequence
165190

191+
166192
@pytest.mark.asyncio
167193
async def test_no_hooks_run_if_hooks_is_none():
168194
"""
@@ -171,13 +197,10 @@ async def test_no_hooks_run_if_hooks_is_none():
171197
mock_model = MockModel()
172198
mock_model.configure_for_chat()
173199
agent_no_hooks = Agent(
174-
name="NoHooksAgent",
175-
instructions="Test agent without hooks.",
176-
model=mock_model,
177-
hooks=None
200+
name="NoHooksAgent", instructions="Test agent without hooks.", model=mock_model, hooks=None
178201
)
179-
202+
180203
try:
181204
await Runner.run(agent_no_hooks, "Hello")
182205
except Exception as e:
183-
pytest.fail(f"Runner.run failed when agent.hooks was None: {e}")
206+
pytest.fail(f"Runner.run failed when agent.hooks was None: {e}")

0 commit comments

Comments
 (0)