Skip to content

Commit a490cf2

Browse files
authored
Add run_config inheritance for agent-as-tool functionality
1 parent da63d3e commit a490cf2

File tree

1 file changed

+8
-16
lines changed

1 file changed

+8
-16
lines changed

src/agents/agent.py

Lines changed: 8 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,7 @@ class Agent(Generic[TContext]):
108108
LLM knows what it does and when to invoke it.
109109
"""
110110

111-
handoffs: list[Agent[Any] | Handoff[TContext]
112-
] = field(default_factory=list)
111+
handoffs: list[Agent[Any] | Handoff[TContext]] = field(default_factory=list)
113112
"""Handoffs are sub-agents that the agent can delegate to. You can provide a list of handoffs,
114113
and the agent can choose to delegate to them if relevant. Allows for separation of concerns and
115114
modularity.
@@ -142,14 +141,12 @@ class Agent(Generic[TContext]):
142141
mcp_config: MCPConfig = field(default_factory=lambda: MCPConfig())
143142
"""Configuration for MCP servers."""
144143

145-
input_guardrails: list[InputGuardrail[TContext]
146-
] = field(default_factory=list)
144+
input_guardrails: list[InputGuardrail[TContext]] = field(default_factory=list)
147145
"""A list of checks that run in parallel to the agent's execution, before generating a
148146
response. Runs only if the agent is the first agent in the chain.
149147
"""
150148

151-
output_guardrails: list[OutputGuardrail[TContext]
152-
] = field(default_factory=list)
149+
output_guardrails: list[OutputGuardrail[TContext]] = field(default_factory=list)
153150
"""A list of checks that run on the final output of the agent, after generating a response.
154151
Runs only if the agent produces a final output.
155152
"""
@@ -168,8 +165,7 @@ class Agent(Generic[TContext]):
168165
"""
169166

170167
tool_use_behavior: (
171-
Literal["run_llm_again",
172-
"stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
168+
Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
173169
) = "run_llm_again"
174170
"""This lets you configure how tool use is handled.
175171
- "run_llm_again": The default behavior. Tools are run, and then the LLM receives the results
@@ -203,8 +199,7 @@ def as_tool(
203199
self,
204200
tool_name: str | None,
205201
tool_description: str | None,
206-
custom_output_extractor: Callable[[
207-
RunResult], Awaitable[str]] | None = None,
202+
custom_output_extractor: Callable[[RunResult], Awaitable[str]] | None = None,
208203
) -> Tool:
209204
"""Transform this agent into a tool, callable by other agents.
210205
@@ -223,8 +218,7 @@ def as_tool(
223218
"""
224219

225220
@function_tool(
226-
name_override=tool_name or _transforms.transform_string_function_style(
227-
self.name),
221+
name_override=tool_name or _transforms.transform_string_function_style(self.name),
228222
description_override=tool_description or "",
229223
)
230224
async def run_agent(context: RunContextWrapper, input: str) -> str:
@@ -260,8 +254,7 @@ async def get_system_prompt(self, run_context: RunContextWrapper[TContext]) -> s
260254
else:
261255
return cast(str, self.instructions(run_context, self))
262256
elif self.instructions is not None:
263-
logger.error(
264-
f"Instructions must be a string or a function, got {self.instructions}")
257+
logger.error(f"Instructions must be a string or a function, got {self.instructions}")
265258

266259
return None
267260

@@ -275,8 +268,7 @@ async def get_mcp_tools(
275268
self, run_context: RunContextWrapper[TContext]
276269
) -> list[Tool]:
277270
"""Fetches the available tools from the MCP servers."""
278-
convert_schemas_to_strict = self.mcp_config.get(
279-
"convert_schemas_to_strict", False)
271+
convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False)
280272
return await MCPUtil.get_all_function_tools(
281273
self.mcp_servers, convert_schemas_to_strict, run_context, self
282274
)

0 commit comments

Comments
 (0)