Skip to content

Commit 5e4a37b

Browse files
committed
Add simplified run() API for OpenAI Agents SDK integration
- New run() method accepts standard Agent objects from OpenAI SDK - Automatically converts ModelSettings and FunctionTool to serializable format - Provides clean DX matching OpenAI Agents SDK with just task_id param - Works in Temporal workflows, sync agents (FastACP), and standalone scripts Example usage: from agents import Agent, function_tool, ModelSettings @function_tool def my_tool(param: str) -> str: return f"Result: {param}" agent = Agent( name="My Agent", instructions="Help users", model="gpt-4o", model_settings=ModelSettings(parallel_tool_calls=True), tools=[my_tool] ) result = await adk.providers.openai.run( agent=agent, input="Hello", task_id=task_id )
1 parent 03e4078 commit 5e4a37b

File tree

1 file changed

+212
-0
lines changed
  • src/agentex/lib/adk/providers/_modules

1 file changed

+212
-0
lines changed

src/agentex/lib/adk/providers/_modules/openai.py

Lines changed: 212 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -477,3 +477,215 @@ async def run_agent_streamed_auto_send(
477477
max_turns=max_turns,
478478
previous_response_id=previous_response_id,
479479
)
480+
481+
async def run(
482+
self,
483+
agent: Agent,
484+
input: str | list[dict[str, Any]],
485+
task_id: str,
486+
*,
487+
trace_id: str | None = None,
488+
parent_span_id: str | None = None,
489+
start_to_close_timeout: timedelta = timedelta(seconds=600),
490+
heartbeat_timeout: timedelta = timedelta(seconds=600),
491+
retry_policy: RetryPolicy = DEFAULT_RETRY_POLICY,
492+
max_turns: int | None = None,
493+
mcp_server_params: list[StdioServerParameters] | None = None,
494+
previous_response_id: str | None = None,
495+
) -> SerializableRunResultStreaming | RunResultStreaming:
496+
"""
497+
Run an OpenAI Agent with automatic streaming to AgentEx UI.
498+
499+
This is a simple wrapper that lets you use standard OpenAI Agents SDK
500+
patterns while getting AgentEx features (streaming, tracing, TaskMessages).
501+
502+
Works everywhere: Temporal workflows, sync agents (FastACP), standalone scripts.
503+
504+
Example:
505+
from agents import Agent, function_tool, ModelSettings
506+
from openai.types.shared import Reasoning
507+
508+
@function_tool
509+
def get_weather(city: str) -> str:
510+
return f"Weather in {city}: Sunny"
511+
512+
agent = Agent(
513+
name="Weather Bot",
514+
instructions="Help with weather",
515+
model="gpt-4o",
516+
model_settings=ModelSettings(
517+
parallel_tool_calls=True,
518+
reasoning=Reasoning(effort="low", summary="auto")
519+
),
520+
tools=[get_weather]
521+
)
522+
523+
result = await adk.providers.openai.run(
524+
agent=agent,
525+
input="What's the weather in Tokyo?",
526+
task_id=params.task.id,
527+
trace_id=params.task.id,
528+
parent_span_id=span.id,
529+
)
530+
531+
Args:
532+
agent: Standard OpenAI Agents SDK Agent object
533+
input: User message (str) or conversation history (list of dicts)
534+
task_id: AgentEx task ID for streaming
535+
trace_id: Optional trace ID (defaults to task_id)
536+
parent_span_id: Optional parent span for nested tracing
537+
start_to_close_timeout: Maximum time allowed for the operation
538+
heartbeat_timeout: Maximum time between heartbeats
539+
retry_policy: Policy for retrying failed operations
540+
max_turns: Max conversation turns (default from Runner)
541+
mcp_server_params: Optional MCP server configurations
542+
previous_response_id: For conversation continuity
543+
544+
Returns:
545+
RunResult with final_output and conversation history
546+
"""
547+
# 1. Normalize input format
548+
if isinstance(input, str):
549+
input_list = [{"role": "user", "content": input}]
550+
else:
551+
input_list = input
552+
553+
# 2. Extract agent properties
554+
agent_name = agent.name
555+
agent_instructions = agent.instructions
556+
557+
# Extract model name
558+
if isinstance(agent.model, str):
559+
model = agent.model
560+
else:
561+
model = None # Will use default
562+
563+
# Extract model settings and convert to serializable format if needed
564+
model_settings = getattr(agent, 'model_settings', None)
565+
if model_settings and not isinstance(model_settings, dict):
566+
# Convert OpenAI SDK ModelSettings to serializable format
567+
from agentex.lib.core.temporal.activities.adk.providers.openai_activities import ModelSettings as SerializableModelSettings
568+
569+
model_settings = SerializableModelSettings(
570+
temperature=getattr(model_settings, 'temperature', None),
571+
max_tokens=getattr(model_settings, 'max_tokens', None),
572+
top_p=getattr(model_settings, 'top_p', None),
573+
frequency_penalty=getattr(model_settings, 'frequency_penalty', None),
574+
presence_penalty=getattr(model_settings, 'presence_penalty', None),
575+
parallel_tool_calls=getattr(model_settings, 'parallel_tool_calls', None),
576+
tool_choice=getattr(model_settings, 'tool_choice', None),
577+
reasoning=getattr(model_settings, 'reasoning', None),
578+
store=getattr(model_settings, 'store', None),
579+
metadata=getattr(model_settings, 'metadata', None),
580+
extra_headers=getattr(model_settings, 'extra_headers', None),
581+
extra_body=getattr(model_settings, 'extra_body', None),
582+
extra_args=getattr(model_settings, 'extra_args', None),
583+
)
584+
585+
# Extract other properties and convert tools to serializable format
586+
tools = agent.tools or []
587+
if tools:
588+
# Import all tool types we need
589+
from agents.tool import (
590+
FunctionTool as OAIFunctionTool,
591+
WebSearchTool as OAIWebSearchTool,
592+
FileSearchTool as OAIFileSearchTool,
593+
ComputerTool as OAIComputerTool,
594+
LocalShellTool as OAILocalShellTool,
595+
CodeInterpreterTool as OAICodeInterpreterTool,
596+
ImageGenerationTool as OAIImageGenerationTool,
597+
)
598+
from agentex.lib.core.temporal.activities.adk.providers.openai_activities import (
599+
FunctionTool as SerializableFunctionTool,
600+
WebSearchTool as SerializableWebSearchTool,
601+
FileSearchTool as SerializableFileSearchTool,
602+
ComputerTool as SerializableComputerTool,
603+
LocalShellTool as SerializableLocalShellTool,
604+
CodeInterpreterTool as SerializableCodeInterpreterTool,
605+
ImageGenerationTool as SerializableImageGenerationTool,
606+
)
607+
608+
# Convert tools to ensure they're serializable for Temporal
609+
converted_tools = []
610+
for tool in tools:
611+
# If already a serializable wrapper, keep as-is
612+
if hasattr(tool, 'to_oai_function_tool'):
613+
converted_tools.append(tool)
614+
# Convert OpenAI SDK tool types to serializable wrappers
615+
elif isinstance(tool, OAIFunctionTool):
616+
# FunctionTool requires on_invoke_tool callable
617+
if not hasattr(tool, 'on_invoke_tool') or tool.on_invoke_tool is None:
618+
raise ValueError(f"FunctionTool '{tool.name}' missing required on_invoke_tool callable")
619+
converted_tools.append(SerializableFunctionTool(
620+
name=tool.name,
621+
description=tool.description,
622+
params_json_schema=tool.params_json_schema,
623+
strict_json_schema=getattr(tool, 'strict_json_schema', True),
624+
on_invoke_tool=tool.on_invoke_tool,
625+
))
626+
elif isinstance(tool, OAIWebSearchTool):
627+
converted_tools.append(SerializableWebSearchTool(
628+
user_location=getattr(tool, 'user_location', None),
629+
search_context_size=getattr(tool, 'search_context_size', 'medium'),
630+
))
631+
elif isinstance(tool, OAIFileSearchTool):
632+
converted_tools.append(SerializableFileSearchTool(
633+
vector_store_ids=tool.vector_store_ids,
634+
max_num_results=getattr(tool, 'max_num_results', None),
635+
include_search_results=getattr(tool, 'include_search_results', False),
636+
ranking_options=getattr(tool, 'ranking_options', None),
637+
filters=getattr(tool, 'filters', None),
638+
))
639+
elif isinstance(tool, OAIComputerTool):
640+
converted_tools.append(SerializableComputerTool(
641+
computer=getattr(tool, 'computer', None),
642+
on_safety_check=getattr(tool, 'on_safety_check', None),
643+
))
644+
elif isinstance(tool, OAILocalShellTool):
645+
converted_tools.append(SerializableLocalShellTool(
646+
executor=getattr(tool, 'executor', None),
647+
))
648+
elif isinstance(tool, OAICodeInterpreterTool):
649+
converted_tools.append(SerializableCodeInterpreterTool(
650+
tool_config=getattr(tool, 'tool_config', {"type": "code_interpreter"}),
651+
))
652+
elif isinstance(tool, OAIImageGenerationTool):
653+
converted_tools.append(SerializableImageGenerationTool(
654+
tool_config=getattr(tool, 'tool_config', {"type": "image_generation"}),
655+
))
656+
else:
657+
# Unknown tool type - keep as-is and let downstream handle it
658+
converted_tools.append(tool)
659+
tools = converted_tools
660+
661+
handoffs = agent.handoffs or []
662+
handoff_description = getattr(agent, 'handoff_description', None)
663+
output_type = getattr(agent, 'output_type', None)
664+
tool_use_behavior = getattr(agent, 'tool_use_behavior', 'run_llm_again')
665+
input_guardrails = getattr(agent, 'input_guardrails', None)
666+
output_guardrails = getattr(agent, 'output_guardrails', None)
667+
668+
# 3. Call the existing service layer
669+
return await self.run_agent_streamed_auto_send(
670+
task_id=task_id,
671+
trace_id=trace_id,
672+
parent_span_id=parent_span_id,
673+
input_list=input_list,
674+
mcp_server_params=mcp_server_params or [],
675+
agent_name=agent_name,
676+
agent_instructions=agent_instructions,
677+
model=model,
678+
model_settings=model_settings,
679+
tools=tools,
680+
handoff_description=handoff_description,
681+
handoffs=handoffs,
682+
output_type=output_type,
683+
tool_use_behavior=tool_use_behavior,
684+
start_to_close_timeout=start_to_close_timeout,
685+
heartbeat_timeout=heartbeat_timeout,
686+
retry_policy=retry_policy,
687+
input_guardrails=input_guardrails,
688+
output_guardrails=output_guardrails,
689+
max_turns=max_turns,
690+
previous_response_id=previous_response_id,
691+
)

0 commit comments

Comments
 (0)