Skip to content

Commit b723fbd

Browse files
authored
Python: Add mix agent orchestration sample (#12749)
### Motivation and Context <!-- Thank you for your contribution to the semantic-kernel repo! Please help reviewers and future users, providing the following information: 1. Why is this change required? 2. What problem does it solve? 3. What scenario does it contribute to? 4. If it fixes an open issue, please link to the issue here. --> We don't have a sample showing how to use a mix of agents of various types in an orchestration. This PR adds one. A bug is also discovered in the OpenAIResponsesAgent while creating the sample. ### Description <!-- Describe your changes, the overall approach, the underlying design. These notes will help understanding how your code works. Thanks! --> 1. Add a new sample showing how to use a mix of agents in the Handoff orchestration. 2. Fix a bug in the OpenAIResponsesAgent where the tools were not considering the tools in the kernel instance. A new test is added too. 3. Fix a bug in the AgentActorBase where the intermediate message callback was rejecting ChatMeesageContent type. ### Contribution Checklist <!-- Before submitting this PR, please make sure: --> - [x] The code builds clean without any errors or warnings - [x] The PR follows the [SK Contribution Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md) and the [pre-submission formatting script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts) raises no violations - [x] All unit tests pass, and I have added new tests where possible - [x] I didn't break anyone 😄
1 parent c79f5a5 commit b723fbd

File tree

4 files changed

+297
-19
lines changed

4 files changed

+297
-19
lines changed
Lines changed: 266 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,266 @@
1+
# Copyright (c) Microsoft. All rights reserved.
2+
3+
import asyncio
4+
5+
from azure.ai.projects.aio import AIProjectClient
6+
from azure.identity.aio import DefaultAzureCredential
7+
8+
from semantic_kernel.agents import (
9+
Agent,
10+
AzureAIAgent,
11+
AzureAIAgentSettings,
12+
AzureAssistantAgent,
13+
ChatCompletionAgent,
14+
HandoffOrchestration,
15+
OrchestrationHandoffs,
16+
)
17+
from semantic_kernel.agents.open_ai.azure_responses_agent import AzureResponsesAgent
18+
from semantic_kernel.agents.runtime import InProcessRuntime
19+
from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, AzureOpenAISettings
20+
from semantic_kernel.contents import AuthorRole, ChatMessageContent, FunctionCallContent, FunctionResultContent
21+
from semantic_kernel.functions import kernel_function
22+
23+
"""
24+
The following sample replicates sample "step4_handoff.py" but uses different agent types.
25+
The following agent types are used:
26+
- ChatCompletionAgent: A Chat Completion agent that is backed by an Azure OpenAI service.
27+
- AzureAssistantAgent: An Azure Assistant agent that is backed by the Azure OpenAI Assistant API.
28+
- AzureAIAgent: An Azure AI agent that is backed by the Azure AI Agent (a.k.a Foundry Agent) service.
29+
- OpenAIResponsesAgent: An Azure Responses agent that is backed by the Azure OpenAI Responses API.
30+
31+
The Handoff orchestration doesn't support the following agent types:
32+
- BedrockAgent
33+
- CopilotStudioAgent
34+
"""
35+
36+
azure_credential: DefaultAzureCredential | None = None
37+
azure_ai_agent_client: AIProjectClient | None = None
38+
39+
40+
async def init_azure_ai_agent_clients():
41+
global azure_credential, azure_ai_agent_client
42+
azure_credential = DefaultAzureCredential()
43+
azure_ai_agent_client = AzureAIAgent.create_client(credential=azure_credential)
44+
45+
46+
async def close_azure_ai_agent_clients():
47+
global azure_credential, azure_ai_agent_client
48+
if azure_credential:
49+
await azure_credential.close()
50+
if azure_ai_agent_client:
51+
await azure_ai_agent_client.close()
52+
53+
54+
class OrderStatusPlugin:
55+
@kernel_function
56+
def check_order_status(self, order_id: str) -> str:
57+
"""Check the status of an order."""
58+
# Simulate checking the order status
59+
return f"Order {order_id} is shipped and will arrive in 2-3 days."
60+
61+
62+
class OrderRefundPlugin:
63+
@kernel_function
64+
def process_refund(self, order_id: str, reason: str) -> str:
65+
"""Process a refund for an order."""
66+
# Simulate processing a refund
67+
print(f"Processing refund for order {order_id} due to: {reason}")
68+
return f"Refund for order {order_id} has been processed successfully."
69+
70+
71+
class OrderReturnPlugin:
72+
@kernel_function
73+
def process_return(self, order_id: str, reason: str) -> str:
74+
"""Process a return for an order."""
75+
# Simulate processing a return
76+
print(f"Processing return for order {order_id} due to: {reason}")
77+
return f"Return for order {order_id} has been processed successfully."
78+
79+
80+
async def get_agents() -> tuple[list[Agent], OrchestrationHandoffs]:
81+
"""Return a list of agents that will participate in the Handoff orchestration and the handoff relationships.
82+
83+
Feel free to add or remove agents and handoff connections.
84+
"""
85+
# A Chat Completion agent that is backed by an Azure OpenAI service
86+
support_agent = ChatCompletionAgent(
87+
name="TriageAgent",
88+
description="A customer support agent that triages issues.",
89+
instructions="Handle customer requests.",
90+
service=AzureChatCompletion(),
91+
)
92+
93+
# An Azure Assistant agent that is backed by the Azure OpenAI Assistant API
94+
azure_assistant_agent_client = AzureAssistantAgent.create_client()
95+
azure_assistant_agent_definition = await azure_assistant_agent_client.beta.assistants.create(
96+
model=AzureOpenAISettings().chat_deployment_name,
97+
description="A customer support agent that handles refunds.",
98+
instructions="Handle refund requests.",
99+
name="RefundAgent",
100+
)
101+
refund_agent = AzureAssistantAgent(
102+
client=azure_assistant_agent_client,
103+
definition=azure_assistant_agent_definition,
104+
plugins=[OrderRefundPlugin()],
105+
)
106+
107+
# An Azure Responses agent that is backed by the Azure OpenAI Responses API
108+
azure_responses_agent_client = AzureResponsesAgent.create_client()
109+
order_status_agent = AzureResponsesAgent(
110+
ai_model_id=AzureOpenAISettings().responses_deployment_name,
111+
client=azure_responses_agent_client,
112+
instructions="Handle order status requests.",
113+
description="A customer support agent that checks order status.",
114+
name="OrderStatusAgent",
115+
plugins=[OrderStatusPlugin()],
116+
)
117+
118+
# An Azure AI agent that is backed by the Azure AI Agent (a.k.a Foundry Agent) service
119+
azure_ai_agent_definition = await azure_ai_agent_client.agents.create_agent(
120+
model=AzureAIAgentSettings().model_deployment_name,
121+
name="OrderReturnAgent",
122+
instructions="Handle order return requests.",
123+
description="A customer support agent that handles order returns.",
124+
)
125+
order_return_agent = AzureAIAgent(
126+
client=azure_ai_agent_client,
127+
definition=azure_ai_agent_definition,
128+
plugins=[OrderReturnPlugin()],
129+
)
130+
131+
# Define the handoff relationships between agents
132+
handoffs = (
133+
OrchestrationHandoffs()
134+
.add_many(
135+
source_agent=support_agent.name,
136+
target_agents={
137+
refund_agent.name: "Transfer to this agent if the issue is refund related",
138+
order_status_agent.name: "Transfer to this agent if the issue is order status related",
139+
order_return_agent.name: "Transfer to this agent if the issue is order return related",
140+
},
141+
)
142+
.add(
143+
source_agent=refund_agent.name,
144+
target_agent=support_agent.name,
145+
description="Transfer to this agent if the issue is not refund related",
146+
)
147+
.add(
148+
source_agent=order_status_agent.name,
149+
target_agent=support_agent.name,
150+
description="Transfer to this agent if the issue is not order status related",
151+
)
152+
.add(
153+
source_agent=order_return_agent.name,
154+
target_agent=support_agent.name,
155+
description="Transfer to this agent if the issue is not order return related",
156+
)
157+
)
158+
159+
return [support_agent, refund_agent, order_status_agent, order_return_agent], handoffs
160+
161+
162+
def agent_response_callback(message: ChatMessageContent) -> None:
163+
"""Observer function to print the messages from the agents.
164+
165+
Please note that this function is called whenever the agent generates a response,
166+
including the internal processing messages (such as tool calls) that are not visible
167+
to other agents in the orchestration.
168+
"""
169+
print(f"{message.name}: {message.content}")
170+
for item in message.items:
171+
if isinstance(item, FunctionCallContent):
172+
print(f"Calling '{item.name}' with arguments '{item.arguments}'")
173+
if isinstance(item, FunctionResultContent):
174+
print(f"Result from '{item.name}' is '{item.result}'")
175+
176+
177+
def human_response_function() -> ChatMessageContent:
178+
"""Observer function to print the messages from the agents."""
179+
user_input = input("User: ")
180+
return ChatMessageContent(role=AuthorRole.USER, content=user_input)
181+
182+
183+
async def main():
184+
"""Main function to run the agents."""
185+
# 0. Initialize the Azure AI agent clients
186+
await init_azure_ai_agent_clients()
187+
188+
# 1. Create a handoff orchestration with multiple agents
189+
agents, handoffs = await get_agents()
190+
handoff_orchestration = HandoffOrchestration(
191+
members=agents,
192+
handoffs=handoffs,
193+
agent_response_callback=agent_response_callback,
194+
human_response_function=human_response_function,
195+
)
196+
197+
# 2. Create a runtime and start it
198+
runtime = InProcessRuntime()
199+
runtime.start()
200+
201+
try:
202+
# 3. Invoke the orchestration with a task and the runtime
203+
orchestration_result = await handoff_orchestration.invoke(
204+
task="Greet the customer who is reaching out for support.",
205+
runtime=runtime,
206+
)
207+
208+
# 4. Wait for the results
209+
value = await orchestration_result.get()
210+
print(value)
211+
finally:
212+
# 5. Stop the runtime after the invocation is complete
213+
await runtime.stop_when_idle()
214+
215+
# 6. Clean up the resources
216+
await close_azure_ai_agent_clients()
217+
218+
"""
219+
Sample output:
220+
TriageAgent: Hello! Thank you for reaching out for support. How can I assist you today?
221+
User: I'd like to track the status of my order
222+
TriageAgent:
223+
Calling 'Handoff-transfer_to_OrderStatusAgent' with arguments '{}'
224+
TriageAgent:
225+
Result from 'Handoff-transfer_to_OrderStatusAgent' is 'None'
226+
OrderStatusAgent: Could you please provide me with your order ID so I can check the status for you?
227+
User: My order ID is 123
228+
OrderStatusAgent:
229+
Calling 'OrderStatusPlugin-check_order_status' with arguments '{"order_id":"123"}'
230+
OrderStatusAgent:
231+
Result from 'OrderStatusPlugin-check_order_status' is 'Order 123 is shipped and will arrive in 2-3 days.'
232+
OrderStatusAgent: Your order with ID 123 has been shipped and is expected to arrive in 2-3 days. If you have any
233+
more questions, feel free to ask!
234+
User: I want to return another order of mine
235+
OrderStatusAgent: I can help you with that. Could you please provide me with the order ID of the order you want
236+
to return?
237+
User: Order ID 321
238+
OrderStatusAgent:
239+
Calling 'Handoff-transfer_to_TriageAgent' with arguments '{}'
240+
OrderStatusAgent:
241+
Result from 'Handoff-transfer_to_TriageAgent' is 'None'
242+
TriageAgent:
243+
Calling 'Handoff-transfer_to_OrderReturnAgent' with arguments '{}'
244+
TriageAgent:
245+
Result from 'Handoff-transfer_to_OrderReturnAgent' is 'None'
246+
OrderReturnAgent: Could you please provide me with the reason for the return for order ID 321?
247+
User: Broken item
248+
Processing return for order 321 due to: Broken item
249+
OrderReturnAgent:
250+
Calling 'OrderReturnPlugin-process_return' with arguments '{"order_id":"321","reason":"Broken item"}'
251+
OrderReturnAgent:
252+
Result from 'OrderReturnPlugin-process_return' is 'Return for order 321 has been processed successfully.'
253+
OrderReturnAgent: The return for order ID 321 has been processed successfully due to a broken item. If you need
254+
further assistance or have any other questions, feel free to let me know!
255+
User: No, bye
256+
Task is completed with summary: Processed the return request for order ID 321 due to a broken item.
257+
OrderReturnAgent:
258+
Calling 'Handoff-complete_task' with arguments '{"task_summary":"Processed the return request for order ID 321
259+
due to a broken item."}'
260+
OrderReturnAgent:
261+
Result from 'Handoff-complete_task' is 'None'
262+
"""
263+
264+
265+
if __name__ == "__main__":
266+
asyncio.run(main())

python/semantic_kernel/agents/open_ai/responses_agent_thread_actions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1021,7 +1021,7 @@ def _get_tools(
10211021

10221022
# TODO(evmattso): make sure to respect filters on FCB
10231023
if kernel.plugins:
1024-
funcs = agent.kernel.get_full_list_of_function_metadata()
1024+
funcs = kernel.get_full_list_of_function_metadata()
10251025
tools.extend([kernel_function_metadata_to_response_function_call_format(f) for f in funcs])
10261026

10271027
return tools

python/semantic_kernel/agents/orchestration/agent_actor_base.py

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -220,22 +220,22 @@ def _create_messages(self, additional_messages: DefaultTypeAlias | None = None)
220220
return [*base_messages, additional_messages]
221221

222222
async def _handle_intermediate_message(self, message: ChatMessageContent) -> None:
223-
"""Handle intermediate messages from the agent.
224-
225-
This method is called with messages produced during streaming agent responses.
226-
Although the parameter is typed as `ChatMessageContent` (to match the `invoke_stream` callback signature),
227-
the actual object will always be a `StreamingChatMessageContent` (a subclass of `ChatMessageContent`).
228-
229-
The agent response callback expects a `ChatMessageContent`, so we can pass the message directly.
230-
However, the streaming agent response callback specifically requires a `StreamingChatMessageContent`.
231-
To avoid type errors from the static type checker due to down casting (from `ChatMessageContent` to
232-
`StreamingChatMessageContent`), we check that the message is of the correct type before calling the callbacks.
233-
Since it will always be a `StreamingChatMessageContent`, this check is safe.
234-
"""
235-
if not isinstance(message, StreamingChatMessageContent):
236-
raise TypeError(
237-
f"Expected message to be of type 'StreamingChatMessageContent', "
238-
f"but got '{type(message).__name__}' instead."
239-
)
223+
"""Handle intermediate messages from the agent."""
240224
await self._call_agent_response_callback(message)
241-
await self._call_streaming_agent_response_callback(message, is_final=True)
225+
if isinstance(message, StreamingChatMessageContent):
226+
await self._call_streaming_agent_response_callback(message, is_final=True)
227+
else:
228+
# Convert to StreamingChatMessageContent if needed
229+
streaming_message = StreamingChatMessageContent( # type: ignore[misc, call-overload]
230+
role=message.role,
231+
choice_index=0,
232+
items=message.items,
233+
content=message.content,
234+
name=message.name,
235+
inner_content=message.inner_content,
236+
encoding=message.encoding,
237+
finish_reason=message.finish_reason,
238+
ai_model_id=message.ai_model_id,
239+
metadata=message.metadata,
240+
)
241+
await self._call_streaming_agent_response_callback(streaming_message, is_final=True)

python/tests/unit/agents/openai_responses/test_openai_responses_thread_actions.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -366,3 +366,15 @@ async def mock_invoke_function_call(*args, **kwargs):
366366

367367
assert len(collected_stream_messages) == 2, "Expected exactly two final messages after tool call."
368368
assert collected_stream_messages[0].role == AuthorRole.ASSISTANT
369+
370+
371+
def test_get_tools(mock_agent, kernel, custom_plugin_class):
372+
kernel.add_plugin(custom_plugin_class)
373+
374+
tools = ResponsesAgentThreadActions._get_tools(
375+
agent=mock_agent,
376+
kernel=kernel,
377+
function_choice_behavior=MagicMock(),
378+
)
379+
380+
assert len(tools) == len(mock_agent.tools) + len(kernel.get_full_list_of_function_metadata())

0 commit comments

Comments
 (0)