From 1d735c15437b31671b68ea54dd1e1a0a3a4bf9f7 Mon Sep 17 00:00:00 2001 From: Muhammad Suhaib <154515857+MuhammedSuhaib@users.noreply.github.com> Date: Mon, 15 Sep 2025 14:52:26 +0500 Subject: [PATCH 1/3] docs: reformat run module docstrings to Google style for better mkdocstrings rendering MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On https://openai.github.io/openai-agents-python/ref/run , the docstrings for the run functions were previously in a very raw, unstructured format, making them difficult to read. This PR reformats those docstrings into Google style, so they now render cleanly and are reader-friendly in the documentation site. Changes: Reformatted docstrings for run, run_sync, and run_streaming. Added consistent section headers (Args, Returns, Note). Adjusted indentation, spacing, and blank lines only. Notes: ⚠️ No wording or content was changed — only formatting. Purpose: Improve readability and usability of the reference docs without altering semantics. --- src/agents/run.py | 276 +++++++++++++++++++++++++++++++--------------- 1 file changed, 187 insertions(+), 89 deletions(-) diff --git a/src/agents/run.py b/src/agents/run.py index 1027b2355..4cbbf4b7d 100644 --- a/src/agents/run.py +++ b/src/agents/run.py @@ -243,39 +243,53 @@ async def run( conversation_id: str | None = None, session: Session | None = None, ) -> RunResult: - """Run a workflow starting at the given agent. The agent will run in a loop until a final - output is generated. The loop runs like so: - 1. The agent is invoked with the given input. - 2. If there is a final output (i.e. the agent produces something of type - `agent.output_type`, the loop terminates. - 3. If there's a handoff, we run the loop again, with the new agent. - 4. Else, we run tool calls (if any), and re-run the loop. + """ + Run a workflow starting at the given agent. + + The agent will run in a loop until a final output is generated. The loop runs like so: + + 1. The agent is invoked with the given input. + 2. If there is a final output (i.e. the agent produces something of type + `agent.output_type`), the loop terminates. + 3. If there's a handoff, we run the loop again, with the new agent. + 4. Else, we run tool calls (if any), and re-run the loop. + In two cases, the agent may raise an exception: - 1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised. - 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised. - Note that only the first agent's input guardrails are run. + + 1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised. + 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered + exception is raised. + + Note: + Only the first agent's input guardrails are run. + Args: starting_agent: The starting agent to run. - input: The initial input to the agent. You can pass a single string for a user message, - or a list of input items. + input: The initial input to the agent. You can pass a single string for a + user message, or a list of input items. context: The context to run the agent with. - max_turns: The maximum number of turns to run the agent for. A turn is defined as one - AI invocation (including any tool calls that might occur). + max_turns: The maximum number of turns to run the agent for. A turn is + defined as one AI invocation (including any tool calls that might occur). hooks: An object that receives callbacks on various lifecycle events. run_config: Global settings for the entire agent run. - previous_response_id: The ID of the previous response, if using OpenAI models via the - Responses API, this allows you to skip passing in input from the previous turn. - conversation_id: The conversation ID (https://platform.openai.com/docs/guides/conversation-state?api-mode=responses). + previous_response_id: The ID of the previous response. If using OpenAI + models via the Responses API, this allows you to skip passing in input + from the previous turn. + conversation_id: The conversation ID + (https://platform.openai.com/docs/guides/conversation-state?api-mode=responses). If provided, the conversation will be used to read and write items. - Every agent will have access to the conversation history so far, - and it's output items will be written to the conversation. - We recommend only using this if you are exclusively using OpenAI models; - other model providers don't write to the Conversation object, - so you'll end up having partial conversations stored. + Every agent will have access to the conversation history so far, and + its output items will be written to the conversation. We recommend only + using this if you are exclusively using OpenAI models; other model + providers don't write to the Conversation object, so you'll end up + having partial conversations stored. + Returns: - A run result containing all the inputs, guardrail results and the output of the last - agent. Agents may perform handoffs, so we don't know the specific type of the output. + A run result containing all the inputs, guardrail results and the output of + the last agent. Agents may perform handoffs, so we don't know the specific + type of the output. """ + runner = DEFAULT_AGENT_RUNNER return await runner.run( starting_agent, @@ -303,36 +317,51 @@ def run_sync( conversation_id: str | None = None, session: Session | None = None, ) -> RunResult: - """Run a workflow synchronously, starting at the given agent. Note that this just wraps the - `run` method, so it will not work if there's already an event loop (e.g. inside an async - function, or in a Jupyter notebook or async context like FastAPI). For those cases, use - the `run` method instead. - The agent will run in a loop until a final output is generated. The loop runs like so: - 1. The agent is invoked with the given input. - 2. If there is a final output (i.e. the agent produces something of type - `agent.output_type`, the loop terminates. - 3. If there's a handoff, we run the loop again, with the new agent. - 4. Else, we run tool calls (if any), and re-run the loop. + """ + Run a workflow synchronously, starting at the given agent. + + Note: + This just wraps the `run` method, so it will not work if there's already an + event loop (e.g. inside an async function, or in a Jupyter notebook or async + context like FastAPI). For those cases, use the `run` method instead. + + The agent will run in a loop until a final output is generated. The loop runs: + + 1. The agent is invoked with the given input. + 2. If there is a final output (i.e. the agent produces something of type + `agent.output_type`), the loop terminates. + 3. If there's a handoff, we run the loop again, with the new agent. + 4. Else, we run tool calls (if any), and re-run the loop. + In two cases, the agent may raise an exception: - 1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised. - 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised. - Note that only the first agent's input guardrails are run. + + 1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised. + 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered + exception is raised. + + Note: + Only the first agent's input guardrails are run. + Args: starting_agent: The starting agent to run. - input: The initial input to the agent. You can pass a single string for a user message, - or a list of input items. + input: The initial input to the agent. You can pass a single string for a + user message, or a list of input items. context: The context to run the agent with. - max_turns: The maximum number of turns to run the agent for. A turn is defined as one - AI invocation (including any tool calls that might occur). + max_turns: The maximum number of turns to run the agent for. A turn is + defined as one AI invocation (including any tool calls that might occur). hooks: An object that receives callbacks on various lifecycle events. run_config: Global settings for the entire agent run. - previous_response_id: The ID of the previous response, if using OpenAI models via the - Responses API, this allows you to skip passing in input from the previous turn. + previous_response_id: The ID of the previous response, if using OpenAI + models via the Responses API, this allows you to skip passing in input + from the previous turn. conversation_id: The ID of the stored conversation, if any. + Returns: - A run result containing all the inputs, guardrail results and the output of the last - agent. Agents may perform handoffs, so we don't know the specific type of the output. + A run result containing all the inputs, guardrail results and the output of + the last agent. Agents may perform handoffs, so we don't know the specific + type of the output. """ + runner = DEFAULT_AGENT_RUNNER return runner.run_sync( starting_agent, @@ -359,33 +388,48 @@ def run_streamed( conversation_id: str | None = None, session: Session | None = None, ) -> RunResultStreaming: - """Run a workflow starting at the given agent in streaming mode. The returned result object - contains a method you can use to stream semantic events as they are generated. + """ + Run a workflow starting at the given agent in streaming mode. + + The returned result object contains a method you can use to stream semantic + events as they are generated. + The agent will run in a loop until a final output is generated. The loop runs like so: - 1. The agent is invoked with the given input. - 2. If there is a final output (i.e. the agent produces something of type - `agent.output_type`, the loop terminates. - 3. If there's a handoff, we run the loop again, with the new agent. - 4. Else, we run tool calls (if any), and re-run the loop. + + 1. The agent is invoked with the given input. + 2. If there is a final output (i.e. the agent produces something of type + `agent.output_type`), the loop terminates. + 3. If there's a handoff, we run the loop again, with the new agent. + 4. Else, we run tool calls (if any), and re-run the loop. + In two cases, the agent may raise an exception: - 1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised. - 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised. - Note that only the first agent's input guardrails are run. + + 1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised. + 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered + exception is raised. + + Note: + Only the first agent's input guardrails are run. + Args: starting_agent: The starting agent to run. - input: The initial input to the agent. You can pass a single string for a user message, - or a list of input items. + input: The initial input to the agent. You can pass a single string for a + user message, or a list of input items. context: The context to run the agent with. - max_turns: The maximum number of turns to run the agent for. A turn is defined as one - AI invocation (including any tool calls that might occur). + max_turns: The maximum number of turns to run the agent for. A turn is + defined as one AI invocation (including any tool calls that might occur). hooks: An object that receives callbacks on various lifecycle events. run_config: Global settings for the entire agent run. - previous_response_id: The ID of the previous response, if using OpenAI models via the - Responses API, this allows you to skip passing in input from the previous turn. + previous_response_id: The ID of the previous response, if using OpenAI + models via the Responses API, this allows you to skip passing in input + from the previous turn. conversation_id: The ID of the stored conversation, if any. + Returns: - A result object that contains data about the run, as well as a method to stream events. + A result object that contains data about the run, as well as a method to + stream events. """ + runner = DEFAULT_AGENT_RUNNER return runner.run_streamed( starting_agent, @@ -440,7 +484,9 @@ async def run( disabled=run_config.tracing_disabled, ): current_turn = 0 - original_input: str | list[TResponseInputItem] = _copy_str_or_list(prepared_input) + original_input: str | list[TResponseInputItem] = _copy_str_or_list( + prepared_input + ) generated_items: list[RunItem] = [] model_responses: list[ModelResponse] = [] @@ -459,16 +505,22 @@ async def run( try: while True: - all_tools = await AgentRunner._get_all_tools(current_agent, context_wrapper) + all_tools = await AgentRunner._get_all_tools( + current_agent, context_wrapper + ) # Start an agent span if we don't have one. This span is ended if the current # agent changes, or if the agent loop ends. if current_span is None: handoff_names = [ h.agent_name - for h in await AgentRunner._get_handoffs(current_agent, context_wrapper) + for h in await AgentRunner._get_handoffs( + current_agent, context_wrapper + ) ] - if output_schema := AgentRunner._get_output_schema(current_agent): + if output_schema := AgentRunner._get_output_schema( + current_agent + ): output_type_name = output_schema.name() else: output_type_name = "str" @@ -541,7 +593,8 @@ async def run( if isinstance(turn_result.next_step, NextStepFinalOutput): output_guardrail_results = await self._run_output_guardrails( - current_agent.output_guardrails + (run_config.output_guardrails or []), + current_agent.output_guardrails + + (run_config.output_guardrails or []), current_agent, turn_result.next_step.output, context_wrapper, @@ -556,16 +609,22 @@ async def run( output_guardrail_results=output_guardrail_results, context_wrapper=context_wrapper, ) - await self._save_result_to_session(session, [], turn_result.new_step_items) + await self._save_result_to_session( + session, [], turn_result.new_step_items + ) return result elif isinstance(turn_result.next_step, NextStepHandoff): - current_agent = cast(Agent[TContext], turn_result.next_step.new_agent) + current_agent = cast( + Agent[TContext], turn_result.next_step.new_agent + ) current_span.finish(reset_current=True) current_span = None should_run_agent_start_hooks = True elif isinstance(turn_result.next_step, NextStepRunAgain): - await self._save_result_to_session(session, [], turn_result.new_step_items) + await self._save_result_to_session( + session, [], turn_result.new_step_items + ) else: raise AgentsException( f"Unknown next step type: {type(turn_result.next_step)}" @@ -703,7 +762,9 @@ async def _maybe_filter_model_input( effective_input: list[TResponseInputItem] = input_items if run_config.call_model_input_filter is None: - return ModelInputData(input=effective_input, instructions=effective_instructions) + return ModelInputData( + input=effective_input, instructions=effective_instructions + ) try: model_input = ModelInputData( @@ -716,13 +777,21 @@ async def _maybe_filter_model_input( context=context_wrapper.context, ) maybe_updated = run_config.call_model_input_filter(filter_payload) - updated = await maybe_updated if inspect.isawaitable(maybe_updated) else maybe_updated + updated = ( + await maybe_updated + if inspect.isawaitable(maybe_updated) + else maybe_updated + ) if not isinstance(updated, ModelInputData): - raise UserError("call_model_input_filter must return a ModelInputData instance") + raise UserError( + "call_model_input_filter must return a ModelInputData instance" + ) return updated except Exception as e: _error_tracing.attach_error_to_current_span( - SpanError(message="Error in call_model_input_filter", data={"error": str(e)}) + SpanError( + message="Error in call_model_input_filter", data={"error": str(e)} + ) ) raise @@ -792,7 +861,9 @@ async def _start_streaming( should_run_agent_start_hooks = True tool_use_tracker = AgentToolUseTracker() - streamed_result._event_queue.put_nowait(AgentUpdatedStreamEvent(new_agent=current_agent)) + streamed_result._event_queue.put_nowait( + AgentUpdatedStreamEvent(new_agent=current_agent) + ) try: # Prepare input with session if enabled @@ -850,7 +921,8 @@ async def _start_streaming( streamed_result._input_guardrails_task = asyncio.create_task( cls._run_input_guardrails_with_queue( starting_agent, - starting_agent.input_guardrails + (run_config.input_guardrails or []), + starting_agent.input_guardrails + + (run_config.input_guardrails or []), ItemHelpers.input_to_new_input_list(prepared_input), context_wrapper, streamed_result, @@ -898,12 +970,16 @@ async def _start_streaming( ) try: - output_guardrail_results = await streamed_result._output_guardrails_task + output_guardrail_results = ( + await streamed_result._output_guardrails_task + ) except Exception: # Exceptions will be checked in the stream_events loop output_guardrail_results = [] - streamed_result.output_guardrail_results = output_guardrail_results + streamed_result.output_guardrail_results = ( + output_guardrail_results + ) streamed_result.final_output = turn_result.next_step.output streamed_result.is_complete = True @@ -989,7 +1065,9 @@ async def _run_single_turn_streamed( handoffs = await cls._get_handoffs(agent, context_wrapper) model = cls._get_model(agent, run_config) model_settings = agent.model_settings.resolve(run_config.model_settings) - model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings) + model_settings = RunImpl.maybe_reset_tool_choice( + agent, tool_use_tracker, model_settings + ) final_response: ModelResponse | None = None @@ -1007,7 +1085,9 @@ async def _run_single_turn_streamed( # Call hook just before the model is invoked, with the correct system_prompt. await asyncio.gather( - hooks.on_llm_start(context_wrapper, agent, filtered.instructions, filtered.input), + hooks.on_llm_start( + context_wrapper, agent, filtered.instructions, filtered.input + ), ( agent.hooks.on_llm_start( context_wrapper, agent, filtered.instructions, filtered.input @@ -1131,8 +1211,12 @@ async def _run_single_turn_streamed( ] # Create filtered result and send to queue - filtered_result = _dc.replace(single_step_result, new_step_items=items_to_filter) - RunImpl.stream_step_result_to_queue(filtered_result, streamed_result._event_queue) + filtered_result = _dc.replace( + single_step_result, new_step_items=items_to_filter + ) + RunImpl.stream_step_result_to_queue( + filtered_result, streamed_result._event_queue + ) return single_step_result @classmethod @@ -1170,7 +1254,9 @@ async def _run_single_turn( output_schema = cls._get_output_schema(agent) handoffs = await cls._get_handoffs(agent, context_wrapper) input = ItemHelpers.input_to_new_input_list(original_input) - input.extend([generated_item.to_input_item() for generated_item in generated_items]) + input.extend( + [generated_item.to_input_item() for generated_item in generated_items] + ) new_response = await cls._get_new_response( agent, @@ -1232,10 +1318,14 @@ async def _get_single_step_result_from_response( # Send handoff items immediately for streaming, but avoid duplicates if event_queue is not None and processed_response.new_items: handoff_items = [ - item for item in processed_response.new_items if isinstance(item, HandoffCallItem) + item + for item in processed_response.new_items + if isinstance(item, HandoffCallItem) ] if handoff_items: - RunImpl.stream_step_items_to_queue(cast(list[RunItem], handoff_items), event_queue) + RunImpl.stream_step_items_to_queue( + cast(list[RunItem], handoff_items), event_queue + ) return await RunImpl.execute_tools_and_side_effects( agent=agent, @@ -1350,7 +1440,9 @@ async def _run_output_guardrails( guardrail_tasks = [ asyncio.create_task( - RunImpl.run_single_output_guardrail(guardrail, agent, agent_output, context) + RunImpl.run_single_output_guardrail( + guardrail, agent, agent_output, context + ) ) for guardrail in guardrails ] @@ -1403,11 +1495,15 @@ async def _get_new_response( model = cls._get_model(agent, run_config) model_settings = agent.model_settings.resolve(run_config.model_settings) - model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings) + model_settings = RunImpl.maybe_reset_tool_choice( + agent, tool_use_tracker, model_settings + ) # If we have run hooks, or if the agent has hooks, we need to call them before the LLM call await asyncio.gather( - hooks.on_llm_start(context_wrapper, agent, filtered.instructions, filtered.input), + hooks.on_llm_start( + context_wrapper, agent, filtered.instructions, filtered.input + ), ( agent.hooks.on_llm_start( context_wrapper, @@ -1569,7 +1665,9 @@ async def _save_result_to_session( _TOOL_CALL_TYPES: tuple[type, ...] = get_args(ToolCallItemTypes) -def _copy_str_or_list(input: str | list[TResponseInputItem]) -> str | list[TResponseInputItem]: +def _copy_str_or_list( + input: str | list[TResponseInputItem], +) -> str | list[TResponseInputItem]: if isinstance(input, str): return input return input.copy() From 9ae3c26ebfab5a2727ae5c5813dea63f4c9d2052 Mon Sep 17 00:00:00 2001 From: Muhammad Suhaib <154515857+MuhammedSuhaib@users.noreply.github.com> Date: Mon, 15 Sep 2025 20:49:15 +0500 Subject: [PATCH 2/3] docs: reformat run module docstrings to Google style --- src/agents/run.py | 108 ++++++++++++---------------------------------- 1 file changed, 27 insertions(+), 81 deletions(-) diff --git a/src/agents/run.py b/src/agents/run.py index 4cbbf4b7d..f6b04e3aa 100644 --- a/src/agents/run.py +++ b/src/agents/run.py @@ -484,9 +484,7 @@ async def run( disabled=run_config.tracing_disabled, ): current_turn = 0 - original_input: str | list[TResponseInputItem] = _copy_str_or_list( - prepared_input - ) + original_input: str | list[TResponseInputItem] = _copy_str_or_list(prepared_input) generated_items: list[RunItem] = [] model_responses: list[ModelResponse] = [] @@ -505,22 +503,16 @@ async def run( try: while True: - all_tools = await AgentRunner._get_all_tools( - current_agent, context_wrapper - ) + all_tools = await AgentRunner._get_all_tools(current_agent, context_wrapper) # Start an agent span if we don't have one. This span is ended if the current # agent changes, or if the agent loop ends. if current_span is None: handoff_names = [ h.agent_name - for h in await AgentRunner._get_handoffs( - current_agent, context_wrapper - ) + for h in await AgentRunner._get_handoffs(current_agent, context_wrapper) ] - if output_schema := AgentRunner._get_output_schema( - current_agent - ): + if output_schema := AgentRunner._get_output_schema(current_agent): output_type_name = output_schema.name() else: output_type_name = "str" @@ -593,8 +585,7 @@ async def run( if isinstance(turn_result.next_step, NextStepFinalOutput): output_guardrail_results = await self._run_output_guardrails( - current_agent.output_guardrails - + (run_config.output_guardrails or []), + current_agent.output_guardrails + (run_config.output_guardrails or []), current_agent, turn_result.next_step.output, context_wrapper, @@ -609,22 +600,16 @@ async def run( output_guardrail_results=output_guardrail_results, context_wrapper=context_wrapper, ) - await self._save_result_to_session( - session, [], turn_result.new_step_items - ) + await self._save_result_to_session(session, [], turn_result.new_step_items) return result elif isinstance(turn_result.next_step, NextStepHandoff): - current_agent = cast( - Agent[TContext], turn_result.next_step.new_agent - ) + current_agent = cast(Agent[TContext], turn_result.next_step.new_agent) current_span.finish(reset_current=True) current_span = None should_run_agent_start_hooks = True elif isinstance(turn_result.next_step, NextStepRunAgain): - await self._save_result_to_session( - session, [], turn_result.new_step_items - ) + await self._save_result_to_session(session, [], turn_result.new_step_items) else: raise AgentsException( f"Unknown next step type: {type(turn_result.next_step)}" @@ -762,9 +747,7 @@ async def _maybe_filter_model_input( effective_input: list[TResponseInputItem] = input_items if run_config.call_model_input_filter is None: - return ModelInputData( - input=effective_input, instructions=effective_instructions - ) + return ModelInputData(input=effective_input, instructions=effective_instructions) try: model_input = ModelInputData( @@ -777,21 +760,13 @@ async def _maybe_filter_model_input( context=context_wrapper.context, ) maybe_updated = run_config.call_model_input_filter(filter_payload) - updated = ( - await maybe_updated - if inspect.isawaitable(maybe_updated) - else maybe_updated - ) + updated = await maybe_updated if inspect.isawaitable(maybe_updated) else maybe_updated if not isinstance(updated, ModelInputData): - raise UserError( - "call_model_input_filter must return a ModelInputData instance" - ) + raise UserError("call_model_input_filter must return a ModelInputData instance") return updated except Exception as e: _error_tracing.attach_error_to_current_span( - SpanError( - message="Error in call_model_input_filter", data={"error": str(e)} - ) + SpanError(message="Error in call_model_input_filter", data={"error": str(e)}) ) raise @@ -861,9 +836,7 @@ async def _start_streaming( should_run_agent_start_hooks = True tool_use_tracker = AgentToolUseTracker() - streamed_result._event_queue.put_nowait( - AgentUpdatedStreamEvent(new_agent=current_agent) - ) + streamed_result._event_queue.put_nowait(AgentUpdatedStreamEvent(new_agent=current_agent)) try: # Prepare input with session if enabled @@ -921,8 +894,7 @@ async def _start_streaming( streamed_result._input_guardrails_task = asyncio.create_task( cls._run_input_guardrails_with_queue( starting_agent, - starting_agent.input_guardrails - + (run_config.input_guardrails or []), + starting_agent.input_guardrails + (run_config.input_guardrails or []), ItemHelpers.input_to_new_input_list(prepared_input), context_wrapper, streamed_result, @@ -970,16 +942,12 @@ async def _start_streaming( ) try: - output_guardrail_results = ( - await streamed_result._output_guardrails_task - ) + output_guardrail_results = await streamed_result._output_guardrails_task except Exception: # Exceptions will be checked in the stream_events loop output_guardrail_results = [] - streamed_result.output_guardrail_results = ( - output_guardrail_results - ) + streamed_result.output_guardrail_results = output_guardrail_results streamed_result.final_output = turn_result.next_step.output streamed_result.is_complete = True @@ -1065,9 +1033,7 @@ async def _run_single_turn_streamed( handoffs = await cls._get_handoffs(agent, context_wrapper) model = cls._get_model(agent, run_config) model_settings = agent.model_settings.resolve(run_config.model_settings) - model_settings = RunImpl.maybe_reset_tool_choice( - agent, tool_use_tracker, model_settings - ) + model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings) final_response: ModelResponse | None = None @@ -1085,9 +1051,7 @@ async def _run_single_turn_streamed( # Call hook just before the model is invoked, with the correct system_prompt. await asyncio.gather( - hooks.on_llm_start( - context_wrapper, agent, filtered.instructions, filtered.input - ), + hooks.on_llm_start(context_wrapper, agent, filtered.instructions, filtered.input), ( agent.hooks.on_llm_start( context_wrapper, agent, filtered.instructions, filtered.input @@ -1211,12 +1175,8 @@ async def _run_single_turn_streamed( ] # Create filtered result and send to queue - filtered_result = _dc.replace( - single_step_result, new_step_items=items_to_filter - ) - RunImpl.stream_step_result_to_queue( - filtered_result, streamed_result._event_queue - ) + filtered_result = _dc.replace(single_step_result, new_step_items=items_to_filter) + RunImpl.stream_step_result_to_queue(filtered_result, streamed_result._event_queue) return single_step_result @classmethod @@ -1254,9 +1214,7 @@ async def _run_single_turn( output_schema = cls._get_output_schema(agent) handoffs = await cls._get_handoffs(agent, context_wrapper) input = ItemHelpers.input_to_new_input_list(original_input) - input.extend( - [generated_item.to_input_item() for generated_item in generated_items] - ) + input.extend([generated_item.to_input_item() for generated_item in generated_items]) new_response = await cls._get_new_response( agent, @@ -1318,14 +1276,10 @@ async def _get_single_step_result_from_response( # Send handoff items immediately for streaming, but avoid duplicates if event_queue is not None and processed_response.new_items: handoff_items = [ - item - for item in processed_response.new_items - if isinstance(item, HandoffCallItem) + item for item in processed_response.new_items if isinstance(item, HandoffCallItem) ] if handoff_items: - RunImpl.stream_step_items_to_queue( - cast(list[RunItem], handoff_items), event_queue - ) + RunImpl.stream_step_items_to_queue(cast(list[RunItem], handoff_items), event_queue) return await RunImpl.execute_tools_and_side_effects( agent=agent, @@ -1440,9 +1394,7 @@ async def _run_output_guardrails( guardrail_tasks = [ asyncio.create_task( - RunImpl.run_single_output_guardrail( - guardrail, agent, agent_output, context - ) + RunImpl.run_single_output_guardrail(guardrail, agent, agent_output, context) ) for guardrail in guardrails ] @@ -1495,15 +1447,11 @@ async def _get_new_response( model = cls._get_model(agent, run_config) model_settings = agent.model_settings.resolve(run_config.model_settings) - model_settings = RunImpl.maybe_reset_tool_choice( - agent, tool_use_tracker, model_settings - ) + model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings) # If we have run hooks, or if the agent has hooks, we need to call them before the LLM call await asyncio.gather( - hooks.on_llm_start( - context_wrapper, agent, filtered.instructions, filtered.input - ), + hooks.on_llm_start(context_wrapper, agent, filtered.instructions, filtered.input), ( agent.hooks.on_llm_start( context_wrapper, @@ -1665,9 +1613,7 @@ async def _save_result_to_session( _TOOL_CALL_TYPES: tuple[type, ...] = get_args(ToolCallItemTypes) -def _copy_str_or_list( - input: str | list[TResponseInputItem], -) -> str | list[TResponseInputItem]: +def _copy_str_or_list(input: str | list[TResponseInputItem]) -> str | list[TResponseInputItem]: if isinstance(input, str): return input return input.copy() From 2288fe48ff8fbaa1491192b74eb0b4c6ceb166f7 Mon Sep 17 00:00:00 2001 From: Suhaib Umair <154515857+MuhammedSuhaib@users.noreply.github.com> Date: Tue, 16 Sep 2025 15:01:49 +0500 Subject: [PATCH 3/3] =?UTF-8?q?Keep=20the=20correct=20grammar=20=E2=86=92?= =?UTF-8?q?=20its=20output=20items=20=E2=9C=85=20(not=20it's).?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/agents/run.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/agents/run.py b/src/agents/run.py index 0ae03b22a..42339eb50 100644 --- a/src/agents/run.py +++ b/src/agents/run.py @@ -279,7 +279,7 @@ async def run( (https://platform.openai.com/docs/guides/conversation-state?api-mode=responses). If provided, the conversation will be used to read and write items. Every agent will have access to the conversation history so far, - and it's output items will be written to the conversation. + and its output items will be written to the conversation. We recommend only using this if you are exclusively using OpenAI models; other model providers don't write to the Conversation object, so you'll end up having partial conversations stored. @@ -427,7 +427,7 @@ def run_streamed( from the previous turn. conversation_id: The ID of the stored conversation, if any. session: A session for automatic conversation history management. - + Returns: A result object that contains data about the run, as well as a method to stream events.