@@ -477,3 +477,167 @@ async def run_agent_streamed_auto_send(
477477 max_turns = max_turns ,
478478 previous_response_id = previous_response_id ,
479479 )
480+
481+ async def run (
482+ self ,
483+ agent : Agent ,
484+ input : str | list [dict [str , Any ]],
485+ task_id : str ,
486+ * ,
487+ trace_id : str | None = None ,
488+ parent_span_id : str | None = None ,
489+ start_to_close_timeout : timedelta = timedelta (seconds = 600 ),
490+ heartbeat_timeout : timedelta = timedelta (seconds = 600 ),
491+ retry_policy : RetryPolicy = DEFAULT_RETRY_POLICY ,
492+ max_turns : int | None = None ,
493+ mcp_server_params : list [StdioServerParameters ] | None = None ,
494+ previous_response_id : str | None = None ,
495+ ) -> SerializableRunResultStreaming | RunResultStreaming :
496+ """
497+ Run an OpenAI Agent with automatic streaming to AgentEx UI.
498+
499+ This is a simple wrapper that lets you use standard OpenAI Agents SDK
500+ patterns while getting AgentEx features (streaming, tracing, TaskMessages).
501+
502+ Works everywhere: Temporal workflows, sync agents (FastACP), standalone scripts.
503+
504+ Example:
505+ from agents import Agent, function_tool, ModelSettings
506+ from openai.types.shared import Reasoning
507+
508+ @function_tool
509+ def get_weather(city: str) -> str:
510+ return f"Weather in {city}: Sunny"
511+
512+ agent = Agent(
513+ name="Weather Bot",
514+ instructions="Help with weather",
515+ model="gpt-4o",
516+ model_settings=ModelSettings(
517+ parallel_tool_calls=True,
518+ reasoning=Reasoning(effort="low", summary="auto")
519+ ),
520+ tools=[get_weather]
521+ )
522+
523+ result = await adk.providers.openai.run(
524+ agent=agent,
525+ input="What's the weather in Tokyo?",
526+ task_id=params.task.id,
527+ trace_id=params.task.id,
528+ parent_span_id=span.id,
529+ )
530+
531+ Args:
532+ agent: Standard OpenAI Agents SDK Agent object
533+ input: User message (str) or conversation history (list of dicts)
534+ task_id: AgentEx task ID for streaming
535+ trace_id: Optional trace ID (defaults to task_id)
536+ parent_span_id: Optional parent span for nested tracing
537+ start_to_close_timeout: Maximum time allowed for the operation
538+ heartbeat_timeout: Maximum time between heartbeats
539+ retry_policy: Policy for retrying failed operations
540+ max_turns: Max conversation turns (default from Runner)
541+ mcp_server_params: Optional MCP server configurations
542+ previous_response_id: For conversation continuity
543+
544+ Returns:
545+ RunResult with final_output and conversation history
546+ """
547+ # 1. Normalize input format
548+ if isinstance (input , str ):
549+ input_list = [{"role" : "user" , "content" : input }]
550+ else :
551+ input_list = input
552+
553+ # 2. Extract agent properties
554+ agent_name = agent .name
555+ agent_instructions = agent .instructions
556+
557+ # Extract model name
558+ if isinstance (agent .model , str ):
559+ model = agent .model
560+ else :
561+ model = None # Will use default
562+
563+ # Extract model settings and convert to serializable format if needed
564+ model_settings = getattr (agent , 'model_settings' , None )
565+ if model_settings and not isinstance (model_settings , dict ):
566+ # Convert OpenAI SDK ModelSettings to serializable format
567+ from agentex .lib .core .temporal .activities .adk .providers .openai_activities import ModelSettings as SerializableModelSettings
568+
569+ model_settings = SerializableModelSettings (
570+ temperature = getattr (model_settings , 'temperature' , None ),
571+ max_tokens = getattr (model_settings , 'max_tokens' , None ),
572+ top_p = getattr (model_settings , 'top_p' , None ),
573+ frequency_penalty = getattr (model_settings , 'frequency_penalty' , None ),
574+ presence_penalty = getattr (model_settings , 'presence_penalty' , None ),
575+ parallel_tool_calls = getattr (model_settings , 'parallel_tool_calls' , None ),
576+ tool_choice = getattr (model_settings , 'tool_choice' , None ),
577+ reasoning = getattr (model_settings , 'reasoning' , None ),
578+ top_logprobs = getattr (model_settings , 'top_logprobs' , None ),
579+ store = getattr (model_settings , 'store' , None ),
580+ metadata = getattr (model_settings , 'metadata' , None ),
581+ verbosity = getattr (model_settings , 'verbosity' , None ),
582+ extra_headers = getattr (model_settings , 'extra_headers' , None ),
583+ extra_query = getattr (model_settings , 'extra_query' , None ),
584+ extra_body = getattr (model_settings , 'extra_body' , None ),
585+ extra_args = getattr (model_settings , 'extra_args' , None ),
586+ )
587+
588+ # Extract other properties and convert tools to serializable format
589+ tools = agent .tools or []
590+ if tools :
591+ # Convert tools to ensure they're serializable
592+ converted_tools = []
593+ for tool in tools :
594+ if hasattr (tool , 'to_oai_function_tool' ):
595+ # Already a custom tool with conversion method
596+ converted_tools .append (tool .to_oai_function_tool ())
597+ elif hasattr (tool , 'name' ) and hasattr (tool , 'description' ):
598+ # Standard OpenAI SDK FunctionTool - convert to serializable format
599+ from agentex .lib .core .temporal .activities .adk .providers .openai_activities import FunctionTool as SerializableFunctionTool
600+
601+ converted_tools .append (SerializableFunctionTool (
602+ name = tool .name ,
603+ description = tool .description ,
604+ params_json_schema = getattr (tool , 'params_json_schema' , {}),
605+ strict_json_schema = getattr (tool , 'strict_json_schema' , True ),
606+ on_invoke_tool = getattr (tool , 'on_invoke_tool' , None ),
607+ ))
608+ else :
609+ # Other tool types (WebSearchTool, FileSearchTool, etc.)
610+ converted_tools .append (tool )
611+ tools = converted_tools
612+
613+ handoffs = agent .handoffs or []
614+ handoff_description = getattr (agent , 'handoff_description' , None )
615+ output_type = getattr (agent , 'output_type' , None )
616+ tool_use_behavior = getattr (agent , 'tool_use_behavior' , 'run_llm_again' )
617+ input_guardrails = getattr (agent , 'input_guardrails' , None )
618+ output_guardrails = getattr (agent , 'output_guardrails' , None )
619+
620+ # 3. Call the existing service layer
621+ return await self .run_agent_streamed_auto_send (
622+ task_id = task_id ,
623+ trace_id = trace_id ,
624+ parent_span_id = parent_span_id ,
625+ input_list = input_list ,
626+ mcp_server_params = mcp_server_params or [],
627+ agent_name = agent_name ,
628+ agent_instructions = agent_instructions ,
629+ model = model ,
630+ model_settings = model_settings ,
631+ tools = tools ,
632+ handoff_description = handoff_description ,
633+ handoffs = handoffs ,
634+ output_type = output_type ,
635+ tool_use_behavior = tool_use_behavior ,
636+ start_to_close_timeout = start_to_close_timeout ,
637+ heartbeat_timeout = heartbeat_timeout ,
638+ retry_policy = retry_policy ,
639+ input_guardrails = input_guardrails ,
640+ output_guardrails = output_guardrails ,
641+ max_turns = max_turns ,
642+ previous_response_id = previous_response_id ,
643+ )
0 commit comments