diff --git a/examples/tutorials/00_sync/010_multiturn/project/acp.py b/examples/tutorials/00_sync/010_multiturn/project/acp.py index c8cc01c3..8fa07f7c 100644 --- a/examples/tutorials/00_sync/010_multiturn/project/acp.py +++ b/examples/tutorials/00_sync/010_multiturn/project/acp.py @@ -1,14 +1,17 @@ import os from typing import Union, AsyncGenerator +from agents import Agent, Runner, RunConfig + from agentex.lib import adk from agentex.types import TextContent from agentex.lib.types.acp import SendMessageParams +from agentex.lib.types.converters import convert_task_messages_to_oai_agents_inputs from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.types.llm_messages import LLMConfig, UserMessage, SystemMessage, AssistantMessage from agentex.lib.sdk.fastacp.fastacp import FastACP from agentex.types.task_message_update import TaskMessageUpdate from agentex.types.task_message_content import TaskMessageContent +from agentex.lib.adk.providers._modules.sync_provider import SyncStreamingProvider # Create an ACP server acp = FastACP.create( @@ -66,21 +69,27 @@ async def handle_message_send( task_messages = await adk.messages.list(task_id=params.task.id) ######################################################### - # 3. Convert task messages to LLM messages. + # 3. Run the agent with OpenAI Agents SDK ######################################################### - # This might seem duplicative, but the split between TaskMessage and LLMMessage is intentional and important. + # Initialize the provider and run config to allow for tracing + provider = SyncStreamingProvider( + trace_id=params.task.id, + ) + + run_config = RunConfig( + model_provider=provider, + ) + + # Initialize the agent + test_agent = Agent(name="assistant", instructions=state.system_prompt, model=state.model) + + # Convert task messages to OpenAI Agents SDK format + input_list = convert_task_messages_to_oai_agents_inputs(task_messages) + + # Run the agent + result = await Runner.run(test_agent, input_list, run_config=run_config) - llm_messages = [ - SystemMessage(content=state.system_prompt), - *[ - UserMessage(content=getattr(message.content, "content", "")) - if getattr(message.content, "author", None) == "user" - else AssistantMessage(content=getattr(message.content, "content", "")) - for message in task_messages - if getattr(message.content, "type", None) == "text" - ], - ] # TaskMessages are messages that are sent between an Agent and a Client. They are fundamentally decoupled from messages sent to the LLM. This is because you may want to send additional metadata to allow the client to render the message on the UI differently. @@ -94,25 +103,7 @@ async def handle_message_send( # - If using multiple LLMs, but one LLM's output should not be sent to the user (i.e. a critic model), you can leverage the State as an internal storage mechanism to store the critic model's conversation history. This i s a powerful and flexible way to handle complex scenarios. ######################################################### - # 4. Call an LLM to respond to the user's message. + # 4. Return the agent response to the client. ######################################################### - # Call an LLM to respond to the user's message - chat_completion = await adk.providers.litellm.chat_completion( - llm_config=LLMConfig(model=state.model, messages=llm_messages), - trace_id=params.task.id, - ) - - ######################################################### - # 5. Return the agent response to the client. - ######################################################### - - # The Agentex server automatically commits input and output messages to the database so you don't need to do this yourself, simply process the input content and return the output content. - - # Return the agent response to the client - if chat_completion.choices[0].message: - content_str = chat_completion.choices[0].message.content or "" - else: - content_str = "" - - return TextContent(author="agent", content=content_str) + return TextContent(author="agent", content=result.final_output) diff --git a/examples/tutorials/00_sync/010_multiturn/pyproject.toml b/examples/tutorials/00_sync/010_multiturn/pyproject.toml index 09c253f3..e21513a8 100644 --- a/examples/tutorials/00_sync/010_multiturn/pyproject.toml +++ b/examples/tutorials/00_sync/010_multiturn/pyproject.toml @@ -9,7 +9,7 @@ description = "An AgentEx agent" readme = "README.md" requires-python = ">=3.12" dependencies = [ - "agentex-sdk", + "agentex-sdk==0.4.28", "scale-gp", ] diff --git a/examples/tutorials/00_sync/020_streaming/project/acp.py b/examples/tutorials/00_sync/020_streaming/project/acp.py index 18dd480a..aff8ea67 100644 --- a/examples/tutorials/00_sync/020_streaming/project/acp.py +++ b/examples/tutorials/00_sync/020_streaming/project/acp.py @@ -1,19 +1,19 @@ import os from typing import Union, AsyncGenerator +from agents import Agent, Runner, RunConfig + from agentex.lib import adk from agentex.lib.types.acp import SendMessageParams +from agentex.lib.types.converters import convert_task_messages_to_oai_agents_inputs from agentex.lib.utils.model_utils import BaseModel -from agentex.lib.types.llm_messages import LLMConfig, UserMessage, SystemMessage, AssistantMessage from agentex.lib.sdk.fastacp.fastacp import FastACP -from agentex.types.task_message_delta import TextDelta -from agentex.types.task_message_update import ( - TaskMessageUpdate, - StreamTaskMessageDone, - StreamTaskMessageFull, - StreamTaskMessageDelta, -) +from agentex.types.task_message_update import TaskMessageUpdate, StreamTaskMessageFull from agentex.types.task_message_content import TextContent, TaskMessageContent +from agentex.lib.adk.providers._modules.sync_provider import ( + SyncStreamingProvider, + convert_openai_to_agentex_events, +) # Create an ACP server acp = FastACP.create( @@ -69,40 +69,35 @@ async def handle_message_send( task_messages = await adk.messages.list(task_id=params.task.id) - llm_messages = [ - SystemMessage(content=state.system_prompt), - *[ - UserMessage(content=getattr(message.content, "content", "")) - if getattr(message.content, "author", None) == "user" - else AssistantMessage(content=getattr(message.content, "content", "")) - for message in task_messages - if message.content and getattr(message.content, "type", None) == "text" - ], - ] - ######################################################### - # 4. Call an LLM to respond to the user's message and stream the response to the client. - ######################################################### + # Initialize the provider and run config to allow for tracing + provider = SyncStreamingProvider( + trace_id=params.task.id, + ) - # Call an LLM to respond to the user's message + # Initialize the run config to allow for tracing and streaming + run_config = RunConfig( + model_provider=provider, + ) - print(f"Calling LLM with model {state.model} and messages {llm_messages}") - # The Agentex server automatically commits input and output messages to the database so you don't need to do this yourself, simply process the input content and return the output content. + test_agent = Agent(name="assistant", instructions=state.system_prompt, model=state.model) + + # Convert task messages to OpenAI Agents SDK format + input_list = convert_task_messages_to_oai_agents_inputs(task_messages) + + # Run the agent and stream the events + result = Runner.run_streamed(test_agent, input_list, run_config=run_config) + + + ######################################################### + # 4. Stream the events to the client. + ######################################################### + # Convert the OpenAI events to Agentex events + # This is done by converting the OpenAI events to Agentex events and yielding them to the client + stream = result.stream_events() + + # Yield the Agentex events to the client + async for agentex_event in convert_openai_to_agentex_events(stream): + yield agentex_event - message_index = 0 - async for chunk in adk.providers.litellm.chat_completion_stream( - llm_config=LLMConfig(model=state.model, messages=llm_messages, stream=True), - trace_id=params.task.id, - ): - if chunk and chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content: - yield StreamTaskMessageDelta( - type="delta", - index=message_index, - delta=TextDelta(type="text", text_delta=chunk.choices[0].delta.content or ""), - ) - - yield StreamTaskMessageDone( - type="done", - index=message_index, - ) diff --git a/examples/tutorials/00_sync/020_streaming/pyproject.toml b/examples/tutorials/00_sync/020_streaming/pyproject.toml index cbcb6777..6e95138e 100644 --- a/examples/tutorials/00_sync/020_streaming/pyproject.toml +++ b/examples/tutorials/00_sync/020_streaming/pyproject.toml @@ -9,7 +9,7 @@ description = "An AgentEx agent that does multiturn streaming chat" readme = "README.md" requires-python = ">=3.12" dependencies = [ - "agentex-sdk", + "agentex-sdk==0.4.28", "scale-gp", ]