Skip to content

Commit 6c37392

Browse files
committed
Update sync tutorials to use new provider
1 parent 0bd8ae6 commit 6c37392

File tree

4 files changed

+55
-66
lines changed

4 files changed

+55
-66
lines changed

examples/tutorials/00_sync/010_multiturn/project/acp.py

Lines changed: 23 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@
99
from agentex.lib.sdk.fastacp.fastacp import FastACP
1010
from agentex.types.task_message_update import TaskMessageUpdate
1111
from agentex.types.task_message_content import TaskMessageContent
12+
from agents import Agent, Runner, RunConfig
13+
from agentex.lib.adk.providers._modules.sync_provider import SyncStreamingProvider
14+
from agentex.lib.types.converters import convert_task_messages_to_oai_agents_inputs
1215

1316
# Create an ACP server
1417
acp = FastACP.create(
@@ -66,21 +69,27 @@ async def handle_message_send(
6669
task_messages = await adk.messages.list(task_id=params.task.id)
6770

6871
#########################################################
69-
# 3. Convert task messages to LLM messages.
72+
# 3. Run the agent with OpenAI Agents SDK
7073
#########################################################
7174

72-
# This might seem duplicative, but the split between TaskMessage and LLMMessage is intentional and important.
75+
# Initialize the provider and run config to allow for tracing
76+
provider = SyncStreamingProvider(
77+
trace_id=params.task.id,
78+
)
79+
80+
run_config = RunConfig(
81+
model_provider=provider,
82+
)
83+
84+
# Initialize the agent
85+
test_agent = Agent(name="assistant", instructions=state.system_prompt, model=state.model)
86+
87+
# Convert task messages to OpenAI Agents SDK format
88+
input_list = convert_task_messages_to_oai_agents_inputs(task_messages)
89+
90+
# Run the agent
91+
result = await Runner.run(test_agent, input_list, run_config=run_config)
7392

74-
llm_messages = [
75-
SystemMessage(content=state.system_prompt),
76-
*[
77-
UserMessage(content=getattr(message.content, "content", ""))
78-
if getattr(message.content, "author", None) == "user"
79-
else AssistantMessage(content=getattr(message.content, "content", ""))
80-
for message in task_messages
81-
if getattr(message.content, "type", None) == "text"
82-
],
83-
]
8493

8594
# TaskMessages are messages that are sent between an Agent and a Client. They are fundamentally decoupled from messages sent to the LLM. This is because you may want to send additional metadata to allow the client to render the message on the UI differently.
8695

@@ -94,25 +103,7 @@ async def handle_message_send(
94103
# - If using multiple LLMs, but one LLM's output should not be sent to the user (i.e. a critic model), you can leverage the State as an internal storage mechanism to store the critic model's conversation history. This i s a powerful and flexible way to handle complex scenarios.
95104

96105
#########################################################
97-
# 4. Call an LLM to respond to the user's message.
98-
#########################################################
99-
100-
# Call an LLM to respond to the user's message
101-
chat_completion = await adk.providers.litellm.chat_completion(
102-
llm_config=LLMConfig(model=state.model, messages=llm_messages),
103-
trace_id=params.task.id,
104-
)
105-
106-
#########################################################
107-
# 5. Return the agent response to the client.
106+
# 4. Return the agent response to the client.
108107
#########################################################
109108

110-
# The Agentex server automatically commits input and output messages to the database so you don't need to do this yourself, simply process the input content and return the output content.
111-
112-
# Return the agent response to the client
113-
if chat_completion.choices[0].message:
114-
content_str = chat_completion.choices[0].message.content or ""
115-
else:
116-
content_str = ""
117-
118-
return TextContent(author="agent", content=content_str)
109+
return TextContent(author="agent", content=result.final_output)

examples/tutorials/00_sync/010_multiturn/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ description = "An AgentEx agent"
99
readme = "README.md"
1010
requires-python = ">=3.12"
1111
dependencies = [
12-
"agentex-sdk",
12+
"agentex-sdk==0.4.28",
1313
"scale-gp",
1414
]
1515

examples/tutorials/00_sync/020_streaming/project/acp.py

Lines changed: 30 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,9 @@
1414
StreamTaskMessageDelta,
1515
)
1616
from agentex.types.task_message_content import TextContent, TaskMessageContent
17+
from agents import Agent, Runner, RunConfig
18+
from agentex.lib.adk.providers._modules.sync_provider import SyncStreamingProvider, convert_openai_to_agentex_events
19+
from agentex.lib.types.converters import convert_task_messages_to_oai_agents_inputs
1720

1821
# Create an ACP server
1922
acp = FastACP.create(
@@ -69,40 +72,35 @@ async def handle_message_send(
6972

7073
task_messages = await adk.messages.list(task_id=params.task.id)
7174

72-
llm_messages = [
73-
SystemMessage(content=state.system_prompt),
74-
*[
75-
UserMessage(content=getattr(message.content, "content", ""))
76-
if getattr(message.content, "author", None) == "user"
77-
else AssistantMessage(content=getattr(message.content, "content", ""))
78-
for message in task_messages
79-
if message.content and getattr(message.content, "type", None) == "text"
80-
],
81-
]
8275

83-
#########################################################
84-
# 4. Call an LLM to respond to the user's message and stream the response to the client.
85-
#########################################################
76+
# Initialize the provider and run config to allow for tracing
77+
provider = SyncStreamingProvider(
78+
trace_id=params.task.id,
79+
)
8680

87-
# Call an LLM to respond to the user's message
81+
# Initialize the run config to allow for tracing and streaming
82+
run_config = RunConfig(
83+
model_provider=provider,
84+
)
8885

89-
print(f"Calling LLM with model {state.model} and messages {llm_messages}")
9086

91-
# The Agentex server automatically commits input and output messages to the database so you don't need to do this yourself, simply process the input content and return the output content.
87+
test_agent = Agent(name="assistant", instructions=state.system_prompt, model=state.model)
88+
89+
# Convert task messages to OpenAI Agents SDK format
90+
input_list = convert_task_messages_to_oai_agents_inputs(task_messages)
91+
92+
# Run the agent and stream the events
93+
result = Runner.run_streamed(test_agent, input_list, run_config=run_config)
94+
95+
96+
#########################################################
97+
# 4. Stream the events to the client.
98+
#########################################################
99+
# Convert the OpenAI events to Agentex events
100+
# This is done by converting the OpenAI events to Agentex events and yielding them to the client
101+
stream = result.stream_events()
102+
103+
# Yield the Agentex events to the client
104+
async for agentex_event in convert_openai_to_agentex_events(stream):
105+
yield agentex_event
92106

93-
message_index = 0
94-
async for chunk in adk.providers.litellm.chat_completion_stream(
95-
llm_config=LLMConfig(model=state.model, messages=llm_messages, stream=True),
96-
trace_id=params.task.id,
97-
):
98-
if chunk and chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
99-
yield StreamTaskMessageDelta(
100-
type="delta",
101-
index=message_index,
102-
delta=TextDelta(type="text", text_delta=chunk.choices[0].delta.content or ""),
103-
)
104-
105-
yield StreamTaskMessageDone(
106-
type="done",
107-
index=message_index,
108-
)

examples/tutorials/00_sync/020_streaming/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ description = "An AgentEx agent that does multiturn streaming chat"
99
readme = "README.md"
1010
requires-python = ">=3.12"
1111
dependencies = [
12-
"agentex-sdk",
12+
"agentex-sdk==0.4.28",
1313
"scale-gp",
1414
]
1515

0 commit comments

Comments
 (0)