|
278 | 278 | "metadata": {}, |
279 | 279 | "outputs": [], |
280 | 280 | "source": [ |
281 | | - "# Copyright (c) Microsoft. All rights reserved.\n", |
282 | | - "\n", |
283 | | - "import asyncio\n", |
284 | | - "\n", |
285 | | - "from azure.ai.agents.models import McpTool\n", |
286 | | - "from azure.identity.aio import DefaultAzureCredential\n", |
287 | | - "\n", |
288 | | - "from semantic_kernel.agents import AzureAIAgent, AzureAIAgentSettings, AzureAIAgentThread\n", |
289 | | - "from semantic_kernel.contents import ChatMessageContent, FunctionCallContent, FunctionResultContent\n", |
290 | | - "\n", |
291 | | - "TASK = \"What's the current weather in Lisbon?\"\n", |
292 | | - "\n", |
293 | | - "async def handle_intermediate_messages(message: ChatMessageContent) -> None:\n", |
294 | | - " for item in message.items or []:\n", |
295 | | - " if isinstance(item, FunctionResultContent):\n", |
296 | | - " print(f\"Function Result:> {item.result} for function: {item.name}\")\n", |
297 | | - " elif isinstance(item, FunctionCallContent):\n", |
298 | | - " print(f\"Function Call:> {item.name} with arguments: {item.arguments}\")\n", |
299 | | - " else:\n", |
300 | | - " print(f\"{item}\")\n", |
301 | | - "\n", |
302 | | - "\n", |
303 | | - "async def main() -> None:\n", |
304 | | - " async with (\n", |
305 | | - " DefaultAzureCredential() as creds,\n", |
306 | | - " AzureAIAgent.create_client(credential=creds) as client,\n", |
307 | | - " ):\n", |
308 | | - " # 1. Define the MCP tool with the server URL\n", |
309 | | - " mcp_tool = McpTool(\n", |
310 | | - " server_label=\"weather\",\n", |
311 | | - " server_url=f\"{apim_resource_gateway_url}/weather-mcp/mcp\",\n", |
312 | | - " allowed_tools=[], # Specify allowed tools if needed\n", |
313 | | - " )\n", |
314 | | - "\n", |
315 | | - " # Optionally you may configure to require approval\n", |
316 | | - " # Allowed values are \"never\" or \"always\"\n", |
317 | | - " mcp_tool.set_approval_mode(\"never\")\n", |
318 | | - "\n", |
319 | | - " agent_settings = AzureAIAgentSettings(\n", |
320 | | - " model_deployment_name=models_config[0]['name']\n", |
321 | | - " )\n", |
322 | | - "\n", |
323 | | - " # 2. Create an agent with the MCP tool on the Azure AI agent service\n", |
324 | | - " agent_definition = await client.agents.create_agent(\n", |
325 | | - " model=agent_settings.model_deployment_name,\n", |
326 | | - " tools=mcp_tool.definitions,\n", |
327 | | - " instructions=\"You are a helpful agent that can use MCP tools to assist users.\",\n", |
328 | | - " )\n", |
329 | | - "\n", |
330 | | - " # 3. Create a Semantic Kernel agent for the Azure AI agent\n", |
331 | | - " agent = AzureAIAgent(\n", |
332 | | - " client=client,\n", |
333 | | - " definition=agent_definition,\n", |
334 | | - " )\n", |
335 | | - "\n", |
336 | | - " # 4. Create a thread for the agent\n", |
337 | | - " # If no thread is provided, a new thread will be\n", |
338 | | - " # created and returned with the initial response\n", |
339 | | - " thread: AzureAIAgentThread | None = None\n", |
340 | | - "\n", |
341 | | - " print(f\"# User: '{TASK}'\")\n", |
342 | | - " # 5. Invoke the agent for the specified thread for response\n", |
343 | | - " async for response in agent.invoke(\n", |
344 | | - " messages=TASK, thread=thread, on_intermediate_message=handle_intermediate_messages\n", |
345 | | - " ):\n", |
346 | | - " print(f\"# Agent: {response}\")\n", |
347 | | - "\n", |
348 | | - "\n", |
349 | | - "\n", |
350 | | - "if __name__ == \"__main__\":\n", |
351 | | - " asyncio.run(main())" |
| 281 | + "from azure.ai.agents.models import ListSortOrder, MessageTextContent, McpTool, RequiredMcpToolCall, SubmitToolApprovalAction, ToolApproval\n", |
| 282 | + "from azure.ai.projects import AIProjectClient\n", |
| 283 | + "from azure.identity import DefaultAzureCredential\n", |
| 284 | + "import time\n", |
| 285 | + "\n", |
| 286 | + "project_client = AIProjectClient(endpoint=foundry_project_endpoint,\n", |
| 287 | + " credential=DefaultAzureCredential())\n", |
| 288 | + "agents_client = project_client.agents\n", |
| 289 | + "\n", |
| 290 | + "# MCP tool definition\n", |
| 291 | + "mcp_tool = McpTool(\n", |
| 292 | + " server_label=\"weather\",\n", |
| 293 | + " server_url=f\"{apim_resource_gateway_url}/weather-mcp/sse\",\n", |
| 294 | + " #allowed_tools=[], # Optional initial allow‑list\n", |
| 295 | + ")\n", |
| 296 | + "\n", |
| 297 | + "#mcp_tool.update_headers({\"Authorization\": f\"Bearer {apim_subscription_key}\"})\n", |
| 298 | + "\n", |
| 299 | + "prompt = \"What's the weather in Lisbon, Cairo and London?\"\n", |
| 300 | + "\n", |
| 301 | + "# Agent creation\n", |
| 302 | + "agent = agents_client.create_agent(\n", |
| 303 | + " model=str(models_config[0].get('name')),\n", |
| 304 | + " name=\"agent-mcp\",\n", |
| 305 | + " instructions=\"You are a sarcastic AI agent. Use the tools provided to answer the user's questions. Be sure to cite your sources and answer in details.\",\n", |
| 306 | + " tools=mcp_tool.definitions\n", |
| 307 | + ")\n", |
| 308 | + "print(f\"🎉 Created agent, agent ID: {agent.id}\")\n", |
| 309 | + "print(f\"✨ MCP Server: {mcp_tool.server_label} at {mcp_tool.server_url}\")\n", |
| 310 | + "\n", |
| 311 | + "# Thread creation\n", |
| 312 | + "thread = agents_client.threads.create()\n", |
| 313 | + "print(f\"🧵 Created thread, thread ID: {thread.id}\")\n", |
| 314 | + "\n", |
| 315 | + "# Message creation\n", |
| 316 | + "message = agents_client.messages.create(\n", |
| 317 | + " thread_id=thread.id,\n", |
| 318 | + " role=\"user\",\n", |
| 319 | + " content=prompt,\n", |
| 320 | + ")\n", |
| 321 | + "print(f\"💬 Created message, message ID: {message.id}\")\n", |
| 322 | + "\n", |
| 323 | + "mcp_tool.set_approval_mode(\"never\") # Disable human approval\n", |
| 324 | + "\n", |
| 325 | + "# Run\n", |
| 326 | + "run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources)\n", |
| 327 | + "while run.status in [\"queued\", \"in_progress\", \"requires_action\"]:\n", |
| 328 | + " time.sleep(2)\n", |
| 329 | + " run = agents_client.runs.get(thread_id=thread.id, run_id=run.id)\n", |
| 330 | + " print(f\"⏳ Run status: {run.status}\")\n", |
| 331 | + "if run.status == \"failed\":\n", |
| 332 | + " print(f\"❌ Run error: {run.last_error}\")\n", |
| 333 | + "\n", |
| 334 | + "# Get Run steps\n", |
| 335 | + "run_steps = agents_client.run_steps.list(thread_id=thread.id, run_id=run.id)\n", |
| 336 | + "print()\n", |
| 337 | + "\n", |
| 338 | + "for step in run_steps:\n", |
| 339 | + " print(f\"🔄 Run step: {step.id}, status: {step.status}, type: {step.type}\")\n", |
| 340 | + " if step.type == \"tool_calls\":\n", |
| 341 | + " print(f\"🛠️ Tool call details:\")\n", |
| 342 | + " for tool_call in step.step_details.tool_calls:\n", |
| 343 | + " print(json.dumps(tool_call.as_dict(), indent=5))\n", |
| 344 | + "\n", |
| 345 | + "# Get the messages in the thread\n", |
| 346 | + "print(\"\\n📜 Messages in the thread:\")\n", |
| 347 | + "messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING)\n", |
| 348 | + "\n", |
| 349 | + "for item in messages:\n", |
| 350 | + " last_message_content = item.content[-1]\n", |
| 351 | + " if isinstance(last_message_content, MessageTextContent):\n", |
| 352 | + " print(f\"🗨️ {item.role}: {last_message_content.text.value}\")\n", |
| 353 | + "\n", |
| 354 | + "# Clean up resources\n", |
| 355 | + "# agents_client.delete_agent(agent.id)\n" |
352 | 356 | ] |
353 | 357 | }, |
354 | 358 | { |
|
516 | 520 | ], |
517 | 521 | "metadata": { |
518 | 522 | "kernelspec": { |
519 | | - "display_name": ".venv", |
| 523 | + "display_name": "myenv", |
520 | 524 | "language": "python", |
521 | 525 | "name": "python3" |
522 | 526 | }, |
|
530 | 534 | "name": "python", |
531 | 535 | "nbconvert_exporter": "python", |
532 | 536 | "pygments_lexer": "ipython3", |
533 | | - "version": "3.12.10" |
| 537 | + "version": "3.13.5" |
534 | 538 | } |
535 | 539 | }, |
536 | 540 | "nbformat": 4, |
|
0 commit comments