Skip to content

Commit da63d3e

Browse files
authored
Add run_config inheritance for agent-as-tool functionality
1 parent 1cee869 commit da63d3e

File tree

1 file changed

+26
-14
lines changed

1 file changed

+26
-14
lines changed

src/agents/agent.py

Lines changed: 26 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,8 @@ class Agent(Generic[TContext]):
108108
LLM knows what it does and when to invoke it.
109109
"""
110110

111-
handoffs: list[Agent[Any] | Handoff[TContext]] = field(default_factory=list)
111+
handoffs: list[Agent[Any] | Handoff[TContext]
112+
] = field(default_factory=list)
112113
"""Handoffs are sub-agents that the agent can delegate to. You can provide a list of handoffs,
113114
and the agent can choose to delegate to them if relevant. Allows for separation of concerns and
114115
modularity.
@@ -141,12 +142,14 @@ class Agent(Generic[TContext]):
141142
mcp_config: MCPConfig = field(default_factory=lambda: MCPConfig())
142143
"""Configuration for MCP servers."""
143144

144-
input_guardrails: list[InputGuardrail[TContext]] = field(default_factory=list)
145+
input_guardrails: list[InputGuardrail[TContext]
146+
] = field(default_factory=list)
145147
"""A list of checks that run in parallel to the agent's execution, before generating a
146148
response. Runs only if the agent is the first agent in the chain.
147149
"""
148150

149-
output_guardrails: list[OutputGuardrail[TContext]] = field(default_factory=list)
151+
output_guardrails: list[OutputGuardrail[TContext]
152+
] = field(default_factory=list)
150153
"""A list of checks that run on the final output of the agent, after generating a response.
151154
Runs only if the agent produces a final output.
152155
"""
@@ -165,7 +168,8 @@ class Agent(Generic[TContext]):
165168
"""
166169

167170
tool_use_behavior: (
168-
Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
171+
Literal["run_llm_again",
172+
"stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
169173
) = "run_llm_again"
170174
"""This lets you configure how tool use is handled.
171175
- "run_llm_again": The default behavior. Tools are run, and then the LLM receives the results
@@ -187,10 +191,6 @@ class Agent(Generic[TContext]):
187191
"""Whether to reset the tool choice to the default value after a tool has been called. Defaults
188192
to True. This ensures that the agent doesn't enter an infinite loop of tool usage."""
189193

190-
def __post_init__(self):
191-
if not isinstance(self.name, str):
192-
raise TypeError(f"Agent name must be a string, got {type(self.name).__name__}")
193-
194194
def clone(self, **kwargs: Any) -> Agent[TContext]:
195195
"""Make a copy of the agent, with the given arguments changed. For example, you could do:
196196
```
@@ -203,15 +203,16 @@ def as_tool(
203203
self,
204204
tool_name: str | None,
205205
tool_description: str | None,
206-
custom_output_extractor: Callable[[RunResult], Awaitable[str]] | None = None,
206+
custom_output_extractor: Callable[[
207+
RunResult], Awaitable[str]] | None = None,
207208
) -> Tool:
208209
"""Transform this agent into a tool, callable by other agents.
209210
210211
This is different from handoffs in two ways:
211212
1. In handoffs, the new agent receives the conversation history. In this tool, the new agent
212-
receives generated input.
213+
receives generated input.
213214
2. In handoffs, the new agent takes over the conversation. In this tool, the new agent is
214-
called as a tool, and the conversation is continued by the original agent.
215+
called as a tool, and the conversation is continued by the original agent.
215216
216217
Args:
217218
tool_name: The name of the tool. If not provided, the agent's name will be used.
@@ -222,16 +223,25 @@ def as_tool(
222223
"""
223224

224225
@function_tool(
225-
name_override=tool_name or _transforms.transform_string_function_style(self.name),
226+
name_override=tool_name or _transforms.transform_string_function_style(
227+
self.name),
226228
description_override=tool_description or "",
227229
)
228230
async def run_agent(context: RunContextWrapper, input: str) -> str:
229231
from .run import Runner
232+
from .tracing import get_current_trace
233+
234+
# Get the current run_config from the trace context if available
235+
run_config = None
236+
current_trace = get_current_trace()
237+
if current_trace and hasattr(current_trace, '_run_config'):
238+
run_config = current_trace._run_config
230239

231240
output = await Runner.run(
232241
starting_agent=self,
233242
input=input,
234243
context=context.context,
244+
run_config=run_config,
235245
)
236246
if custom_output_extractor:
237247
return await custom_output_extractor(output)
@@ -250,7 +260,8 @@ async def get_system_prompt(self, run_context: RunContextWrapper[TContext]) -> s
250260
else:
251261
return cast(str, self.instructions(run_context, self))
252262
elif self.instructions is not None:
253-
logger.error(f"Instructions must be a string or a function, got {self.instructions}")
263+
logger.error(
264+
f"Instructions must be a string or a function, got {self.instructions}")
254265

255266
return None
256267

@@ -264,7 +275,8 @@ async def get_mcp_tools(
264275
self, run_context: RunContextWrapper[TContext]
265276
) -> list[Tool]:
266277
"""Fetches the available tools from the MCP servers."""
267-
convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False)
278+
convert_schemas_to_strict = self.mcp_config.get(
279+
"convert_schemas_to_strict", False)
268280
return await MCPUtil.get_all_function_tools(
269281
self.mcp_servers, convert_schemas_to_strict, run_context, self
270282
)

0 commit comments

Comments
 (0)