@@ -101,7 +101,7 @@ async def get_mcp_tools(self, run_context: RunContextWrapper[TContext]) -> list[
101101 self .mcp_servers , convert_schemas_to_strict , run_context , self
102102 )
103103
104- async def get_all_tools (self , run_context : RunContextWrapper [Any ]) -> list [Tool ]:
104+ async def get_all_tools (self , run_context : RunContextWrapper [TContext ]) -> list [Tool ]:
105105 """All agent tools, including MCP tools and function tools."""
106106 mcp_tools = await self .get_mcp_tools (run_context )
107107
@@ -201,14 +201,16 @@ class Agent(AgentBase, Generic[TContext]):
201201 tool_use_behavior : (
202202 Literal ["run_llm_again" , "stop_on_first_tool" ] | StopAtTools | ToolsToFinalOutputFunction
203203 ) = "run_llm_again"
204- """This lets you configure how tool use is handled.
204+ """
205+ This lets you configure how tool use is handled.
205206 - "run_llm_again": The default behavior. Tools are run, and then the LLM receives the results
206207 and gets to respond.
207208 - "stop_on_first_tool": The output of the first tool call is used as the final output. This
208209 means that the LLM does not process the result of the tool call.
209- - A list of tool names: The agent will stop running if any of the tools in the list are called.
210- The final output will be the output of the first matching tool call. The LLM does not
211- process the result of the tool call.
210+ - A StopAtTools object: The agent will stop running if any of the tools listed in
211+ `stop_at_tool_names` is called.
212+ The final output will be the output of the first matching tool call.
213+ The LLM does not process the result of the tool call.
212214 - A function: If you pass a function, it will be called with the run context and the list of
213215 tool results. It must return a `ToolsToFinalOutputResult`, which determines whether the tool
214216 calls result in a final output.
0 commit comments