@@ -108,7 +108,8 @@ class Agent(Generic[TContext]):
108
108
LLM knows what it does and when to invoke it.
109
109
"""
110
110
111
- handoffs : list [Agent [Any ] | Handoff [TContext ]] = field (default_factory = list )
111
+ handoffs : list [Agent [Any ] | Handoff [TContext ]
112
+ ] = field (default_factory = list )
112
113
"""Handoffs are sub-agents that the agent can delegate to. You can provide a list of handoffs,
113
114
and the agent can choose to delegate to them if relevant. Allows for separation of concerns and
114
115
modularity.
@@ -141,12 +142,14 @@ class Agent(Generic[TContext]):
141
142
mcp_config : MCPConfig = field (default_factory = lambda : MCPConfig ())
142
143
"""Configuration for MCP servers."""
143
144
144
- input_guardrails : list [InputGuardrail [TContext ]] = field (default_factory = list )
145
+ input_guardrails : list [InputGuardrail [TContext ]
146
+ ] = field (default_factory = list )
145
147
"""A list of checks that run in parallel to the agent's execution, before generating a
146
148
response. Runs only if the agent is the first agent in the chain.
147
149
"""
148
150
149
- output_guardrails : list [OutputGuardrail [TContext ]] = field (default_factory = list )
151
+ output_guardrails : list [OutputGuardrail [TContext ]
152
+ ] = field (default_factory = list )
150
153
"""A list of checks that run on the final output of the agent, after generating a response.
151
154
Runs only if the agent produces a final output.
152
155
"""
@@ -165,7 +168,8 @@ class Agent(Generic[TContext]):
165
168
"""
166
169
167
170
tool_use_behavior : (
168
- Literal ["run_llm_again" , "stop_on_first_tool" ] | StopAtTools | ToolsToFinalOutputFunction
171
+ Literal ["run_llm_again" ,
172
+ "stop_on_first_tool" ] | StopAtTools | ToolsToFinalOutputFunction
169
173
) = "run_llm_again"
170
174
"""This lets you configure how tool use is handled.
171
175
- "run_llm_again": The default behavior. Tools are run, and then the LLM receives the results
@@ -187,10 +191,6 @@ class Agent(Generic[TContext]):
187
191
"""Whether to reset the tool choice to the default value after a tool has been called. Defaults
188
192
to True. This ensures that the agent doesn't enter an infinite loop of tool usage."""
189
193
190
- def __post_init__ (self ):
191
- if not isinstance (self .name , str ):
192
- raise TypeError (f"Agent name must be a string, got { type (self .name ).__name__ } " )
193
-
194
194
def clone (self , ** kwargs : Any ) -> Agent [TContext ]:
195
195
"""Make a copy of the agent, with the given arguments changed. For example, you could do:
196
196
```
@@ -203,15 +203,16 @@ def as_tool(
203
203
self ,
204
204
tool_name : str | None ,
205
205
tool_description : str | None ,
206
- custom_output_extractor : Callable [[RunResult ], Awaitable [str ]] | None = None ,
206
+ custom_output_extractor : Callable [[
207
+ RunResult ], Awaitable [str ]] | None = None ,
207
208
) -> Tool :
208
209
"""Transform this agent into a tool, callable by other agents.
209
210
210
211
This is different from handoffs in two ways:
211
212
1. In handoffs, the new agent receives the conversation history. In this tool, the new agent
212
- receives generated input.
213
+ receives generated input.
213
214
2. In handoffs, the new agent takes over the conversation. In this tool, the new agent is
214
- called as a tool, and the conversation is continued by the original agent.
215
+ called as a tool, and the conversation is continued by the original agent.
215
216
216
217
Args:
217
218
tool_name: The name of the tool. If not provided, the agent's name will be used.
@@ -222,16 +223,25 @@ def as_tool(
222
223
"""
223
224
224
225
@function_tool (
225
- name_override = tool_name or _transforms .transform_string_function_style (self .name ),
226
+ name_override = tool_name or _transforms .transform_string_function_style (
227
+ self .name ),
226
228
description_override = tool_description or "" ,
227
229
)
228
230
async def run_agent (context : RunContextWrapper , input : str ) -> str :
229
231
from .run import Runner
232
+ from .tracing import get_current_trace
233
+
234
+ # Get the current run_config from the trace context if available
235
+ run_config = None
236
+ current_trace = get_current_trace ()
237
+ if current_trace and hasattr (current_trace , '_run_config' ):
238
+ run_config = current_trace ._run_config
230
239
231
240
output = await Runner .run (
232
241
starting_agent = self ,
233
242
input = input ,
234
243
context = context .context ,
244
+ run_config = run_config ,
235
245
)
236
246
if custom_output_extractor :
237
247
return await custom_output_extractor (output )
@@ -250,7 +260,8 @@ async def get_system_prompt(self, run_context: RunContextWrapper[TContext]) -> s
250
260
else :
251
261
return cast (str , self .instructions (run_context , self ))
252
262
elif self .instructions is not None :
253
- logger .error (f"Instructions must be a string or a function, got { self .instructions } " )
263
+ logger .error (
264
+ f"Instructions must be a string or a function, got { self .instructions } " )
254
265
255
266
return None
256
267
@@ -264,7 +275,8 @@ async def get_mcp_tools(
264
275
self , run_context : RunContextWrapper [TContext ]
265
276
) -> list [Tool ]:
266
277
"""Fetches the available tools from the MCP servers."""
267
- convert_schemas_to_strict = self .mcp_config .get ("convert_schemas_to_strict" , False )
278
+ convert_schemas_to_strict = self .mcp_config .get (
279
+ "convert_schemas_to_strict" , False )
268
280
return await MCPUtil .get_all_function_tools (
269
281
self .mcp_servers , convert_schemas_to_strict , run_context , self
270
282
)
0 commit comments