diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 55c6df140bf2..c7697fdf3c7e 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -13,7 +13,7 @@ * Added OpenAPI tool sample. See `sample_agent_openapi.py`. * Added OpenAPI with Project Connection sample. See `sample_agent_openapi_with_project_connection.py`. * Added SharePoint grounding tool sample. See `sample_agent_sharepoint.py`. - +* Improved MCP client sample showing direct MCP tool invocation. See `samples/mcp_client/sample_mcp_tool_async.py`. ## 2.0.0b2 (2025-11-14) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_mcp_tool_async_.py b/sdk/ai/azure-ai-projects/samples/agents/sample_mcp_tool_async_.py deleted file mode 100644 index 5754bef416b9..000000000000 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_mcp_tool_async_.py +++ /dev/null @@ -1,101 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to interact with the Foundry Project MCP tool. - -USAGE: - python sample_mcp_tool_async.py - - Before running the sample: - - pip install "azure-ai-projects>=2.0.0b1" azure-identity python-dotenv mcp - - Set these environment variables with your own values: - 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Microsoft Foundry portal. - 2) IMAGE_GEN_DEPLOYMENT_NAME - The deployment name of the image generation model, as found under the "Name" column in - the "Models + endpoints" tab in your Microsoft Foundry project. -""" - -import asyncio -import os -from dotenv import load_dotenv -from azure.ai.projects.aio import AIProjectClient -from azure.identity.aio import DefaultAzureCredential -from mcp import ClientSession -from mcp.client.streamable_http import streamablehttp_client - -load_dotenv() - -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] - - -async def main(): - - async with ( - DefaultAzureCredential() as credential, - AIProjectClient(endpoint=endpoint, credential=credential) as project_client, - project_client.get_openai_client() as openai_client, - streamablehttp_client( - url=f"{endpoint}/mcp_tools?api-version=2025-05-15-preview", - headers={"Authorization": f"Bearer {(await credential.get_token('https://ai.azure.com')).token}"}, - ) as (read_stream, write_stream, _), - ): - # Create a session using the client streams - async with ClientSession(read_stream, write_stream) as session: - # Initialize the connection - await session.initialize() - # List available tools - tools = await session.list_tools() - print(f"Available tools: {[tool.name for tool in tools.tools]}") - - # For each tool, print its details - for tool in tools.tools: - print(f"\n\nTool Name: {tool.name}, Input Schema: {tool.inputSchema}") - - # Run the code interpreter tool - code_interpreter_result = await session.call_tool( - name="code_interpreter", - arguments={"code": "print('Hello from Microsoft Foundry MCP Code Interpreter tool!')"}, - ) - print(f"\n\nCode Interpreter Output: {code_interpreter_result.content}") - - # Run the image_generation tool - image_generation_result = await session.call_tool( - name="image_generation", - arguments={"prompt": "Draw a cute puppy riding a skateboard"}, - meta={"imagegen_model_deployment_name": os.getenv("IMAGE_GEN_DEPLOYMENT_NAME", "")}, - ) - print(f"\n\nImage Generation Output: {image_generation_result.content}") - - # Create a vector store - vector_store = await openai_client.vector_stores.create( - name="sample_vector_store", - ) - - vector_store_file = await openai_client.vector_stores.files.upload_and_poll( - vector_store_id=vector_store.id, - file=open( - os.path.abspath(os.path.join(os.path.dirname(__file__), "./assets/product_info.md")), - "rb", - ), - ) - - print(f"\n\nUploaded file, file ID: {vector_store_file.id} to vector store ID: {vector_store.id}") - - # Call the file_search tool - file_search_result = await session.call_tool( - name="file_search", - arguments={"queries": ["What feature does Smart Eyewear offer?"]}, - meta={"vector_store_ids": [vector_store.id]}, - ) - print(f"\n\nFile Search Output: {file_search_result.content}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py index c94ee85be4ae..b1fc008e5844 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/tools/sample_agent_ai_search.py @@ -24,9 +24,11 @@ 3) AI_SEARCH_PROJECT_CONNECTION_ID - The AI Search project connection ID, as found in the "Connections" tab in your Microsoft Foundry project. 4) AI_SEARCH_INDEX_NAME - The name of the AI Search index to use for searching. + 5) AI_SEARCH_USER_INPUT - (Optional) The question to ask. If not set, you will be prompted. """ import os +from typing import Optional from dotenv import load_dotenv from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient @@ -36,76 +38,101 @@ AzureAISearchToolResource, AISearchIndexResource, AzureAISearchQueryType, + AgentVersionObject, ) +# Global variables to be asserted after main execution +output: Optional[str] = None + load_dotenv() endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] -with ( - DefaultAzureCredential() as credential, - AIProjectClient(endpoint=endpoint, credential=credential) as project_client, - project_client.get_openai_client() as openai_client, -): - - # [START tool_declaration] - tool = AzureAISearchAgentTool( - azure_ai_search=AzureAISearchToolResource( - indexes=[ - AISearchIndexResource( - project_connection_id=os.environ["AI_SEARCH_PROJECT_CONNECTION_ID"], - index_name=os.environ["AI_SEARCH_INDEX_NAME"], - query_type=AzureAISearchQueryType.SIMPLE, + +def main() -> None: + global output + agent: Optional[AgentVersionObject] = None + + with ( + DefaultAzureCredential() as credential, + AIProjectClient(endpoint=endpoint, credential=credential) as project_client, + project_client.get_openai_client() as openai_client, + ): + try: + # [START tool_declaration] + tool = AzureAISearchAgentTool( + azure_ai_search=AzureAISearchToolResource( + indexes=[ + AISearchIndexResource( + project_connection_id=os.environ["AI_SEARCH_PROJECT_CONNECTION_ID"], + index_name=os.environ["AI_SEARCH_INDEX_NAME"], + query_type=AzureAISearchQueryType.SIMPLE, + ), + ] + ) + ) + # [END tool_declaration] + + agent = project_client.agents.create_version( + agent_name="MyAgent", + definition=PromptAgentDefinition( + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + instructions="""You are a helpful assistant. You must always provide citations for + answers using the tool and render them as: `\u3010message_idx:search_idx\u2020source\u3011`.""", + tools=[tool], ), - ] - ) - ) - # [END tool_declaration] - - agent = project_client.agents.create_version( - agent_name="MyAgent", - definition=PromptAgentDefinition( - model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], - instructions="""You are a helpful assistant. You must always provide citations for - answers using the tool and render them as: `\u3010message_idx:search_idx\u2020source\u3011`.""", - tools=[tool], - ), - description="You are a helpful agent.", - ) - print(f"Agent created (id: {agent.id}, name: {agent.name}, version: {agent.version})") - - user_input = input("Enter your question (e.g., 'Tell me about mental health services'): \n") - - stream_response = openai_client.responses.create( - stream=True, - tool_choice="required", - input=user_input, - extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, - ) - - for event in stream_response: - if event.type == "response.created": - print(f"Follow-up response created with ID: {event.response.id}") - elif event.type == "response.output_text.delta": - print(f"Delta: {event.delta}") - elif event.type == "response.text.done": - print(f"\nFollow-up response done!") - elif event.type == "response.output_item.done": - if event.item.type == "message": - item = event.item - if item.content[-1].type == "output_text": - text_content = item.content[-1] - for annotation in text_content.annotations: - if annotation.type == "url_citation": - print( - f"URL Citation: {annotation.url}, " - f"Start index: {annotation.start_index}, " - f"End index: {annotation.end_index}" - ) - elif event.type == "response.completed": - print(f"\nFollow-up completed!") - print(f"Full response: {event.response.output_text}") - - print("\nCleaning up...") - project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - print("Agent deleted") + description="You are a helpful agent.", + ) + print(f"Agent created (id: {agent.id}, name: {agent.name}, version: {agent.version})") + + # Get user input from environment variable or prompt + user_input = os.environ.get("AI_SEARCH_USER_INPUT") + if not user_input: + user_input = input("Enter your question (e.g., 'Tell me about mental health services'): \n") + + print(f"Question: {user_input}\n") + + stream_response = openai_client.responses.create( + stream=True, + tool_choice="required", + input=user_input, + extra_body={"agent": {"name": agent.name, "type": "agent_reference"}}, + ) + + output = None + for event in stream_response: + if event.type == "response.created": + print(f"Follow-up response created with ID: {event.response.id}") + elif event.type == "response.output_text.delta": + print(f"Delta: {event.delta}") + elif event.type == "response.text.done": + print(f"\nFollow-up response done!") + elif event.type == "response.output_item.done": + if event.item.type == "message": + item = event.item + if item.content[-1].type == "output_text": + text_content = item.content[-1] + for annotation in text_content.annotations: + if annotation.type == "url_citation": + print( + f"URL Citation: {annotation.url}, " + f"Start index: {annotation.start_index}, " + f"End index: {annotation.end_index}" + ) + elif event.type == "response.completed": + output = event.response.output_text + print(f"\nFollow-up completed!") + print(f"Full response: {output}") + except Exception as e: + print(f"Error occurred: {e}") + raise e + finally: + if isinstance(agent, AgentVersionObject): + print("\nCleaning up...") + project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) + print("Agent deleted") + + +if __name__ == "__main__": + main() + assert isinstance(output, str) and len(output) > 0, "Output should be a non-empty string" diff --git a/sdk/ai/azure-ai-projects/samples/mcp_client/assets/product_info.md b/sdk/ai/azure-ai-projects/samples/mcp_client/assets/product_info.md new file mode 100644 index 000000000000..48dfc503dd5a --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/mcp_client/assets/product_info.md @@ -0,0 +1,51 @@ +# Information about product item_number: 1 + +## Brand +Contoso Galaxy Innovations + +## Category +Smart Eyewear + +## Features +- Augmented Reality interface +- Voice-controlled AI agent +- HD video recording with 3D audio +- UV protection and blue light filtering +- Wireless charging with extended battery life + +## User Guide + +### 1. Introduction +Introduction to your new SmartView Glasses + +### 2. Product Overview +Overview of features and controls + +### 3. Sizing and Fit +Finding your perfect fit and style adjustments + +### 4. Proper Care and Maintenance +Cleaning and caring for your SmartView Glasses + +### 5. Break-in Period +Adjusting to the augmented reality experience + +### 6. Safety Tips +Safety guidelines for public and private spaces + +### 7. Troubleshooting +Quick fixes for common issues + +## Warranty Information +Two-year limited warranty on all electronic components + +## Contact Information +Customer Support at support@contoso-galaxy-innovations.com + +## Return Policy +30-day return policy with no questions asked + +## FAQ +- How to sync your SmartView Glasses with your devices +- Troubleshooting connection issues +- Customizing your augmented reality environment diff --git a/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py b/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py new file mode 100644 index 000000000000..c86d7c5c5524 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/mcp_client/sample_mcp_tool_async.py @@ -0,0 +1,141 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to directly interact with MCP (Model Context Protocol) tools + using the low-level MCP client library to connect to the Foundry Project's MCP tools API: + {AZURE_AI_PROJECT_ENDPOINT}/mcp_tools?api-version=2025-05-15-preview + + For agent-based MCP tool usage, see samples in samples/agents/tools/sample_agent_mcp.py + and related files in that directory. + + WORKFLOW: + This sample demonstrates a typical MCP client workflow: + 1. Establish connection to the Foundry Project MCP endpoint using ClientSession + 2. Initialize the session and discover available tools + 3. Invoke tools programmatically with specific arguments and metadata + 4. Process and save tool outputs (e.g., writing image generation results to a file) + 5. Chain multiple tool calls together (code interpreter → image generation → file search) + +USAGE: + python sample_mcp_tool_async.py + + Before running the sample: + + pip install "azure-ai-projects>=2.0.0b1" azure-identity python-dotenv mcp + + Set these environment variables with your own values: + 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Microsoft Foundry portal. + 2) IMAGE_GEN_DEPLOYMENT_NAME - The deployment name of the image generation model, as found under the "Name" column in + the "Models + endpoints" tab in your Microsoft Foundry project. + 3) (Optional) LOG_LEVEL - Logging level for HTTP client debugging. Valid values: + - CRITICAL or 50 - Suppresses all logs except critical errors + - FATAL - same as CRITICAL + - ERROR or 40 - Shows errors only + - WARNING or WARN or 30 - Shows warnings and errors + - INFO or 20 - Shows informational messages, warnings, and errors + - DEBUG or 10 - Shows detailed HTTP requests/responses and all other logs + - NOTSET or 0 - Uses parent logger configuration +""" + +import asyncio +import base64 +import os +import logging +from dotenv import load_dotenv +from azure.ai.projects.aio import AIProjectClient +from azure.identity.aio import DefaultAzureCredential +from mcp import ClientSession +from mcp.types import ImageContent +from mcp.client.streamable_http import streamablehttp_client + +load_dotenv() + +# Configure logging level from environment variable +# Set LOG_LEVEL=DEBUG to see detailed HTTP requests and responses +log_level = os.getenv("LOG_LEVEL", "").upper() +if log_level: + logging.basicConfig(level=getattr(logging, log_level, logging.CRITICAL)) + # Enable httpx logging to see HTTP requests at the same level + logging.getLogger("httpx").setLevel(getattr(logging, log_level, logging.CRITICAL)) + +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] + + +async def main(): + + async with ( + DefaultAzureCredential() as credential, + AIProjectClient(endpoint=endpoint, credential=credential) as project_client, + project_client.get_openai_client() as openai_client, + streamablehttp_client( + url=f"{endpoint}/mcp_tools?api-version=2025-05-15-preview", + headers={"Authorization": f"Bearer {(await credential.get_token('https://ai.azure.com')).token}"}, + ) as (read_stream, write_stream, _), + ClientSession(read_stream, write_stream) as session, + ): + + # Initialize the connection + await session.initialize() + # List available tools + tools = await session.list_tools() + print(f"Available tools: {[tool.name for tool in tools.tools]}") + + # For each tool, print its details + for tool in tools.tools: + print(f"\n\nTool Name: {tool.name}, Input Schema: {tool.inputSchema}") + + # Run the code interpreter tool + code_interpreter_result = await session.call_tool( + name="code_interpreter", + arguments={"code": "print('Hello from Microsoft Foundry MCP Code Interpreter tool!')"}, + ) + print(f"\n\nCode Interpreter Output: {code_interpreter_result.content}") + + # Run the image_generation tool + image_generation_result = await session.call_tool( + name="image_generation", + arguments={"prompt": "Draw a cute puppy riding a skateboard"}, + meta={"imagegen_model_deployment_name": os.getenv("IMAGE_GEN_DEPLOYMENT_NAME", "")}, + ) + + # Save the image generation output to a file + if image_generation_result.content and isinstance(image_generation_result.content[0], ImageContent): + filename = "puppy.png" + file_path = os.path.abspath(filename) + print(f"\nImage saved to: {file_path}") + + with open(file_path, "wb") as f: + f.write(base64.b64decode(image_generation_result.content[0].data)) + + # Create a vector store + vector_store = await openai_client.vector_stores.create( + name="sample_vector_store", + ) + + vector_store_file = await openai_client.vector_stores.files.upload_and_poll( + vector_store_id=vector_store.id, + file=open( + os.path.abspath(os.path.join(os.path.dirname(__file__), "./assets/product_info.md")), + "rb", + ), + ) + + print(f"\n\nUploaded file, file ID: {vector_store_file.id} to vector store ID: {vector_store.id}") + + # Call the file_search tool + file_search_result = await session.call_tool( + name="file_search", + arguments={"queries": ["What feature does Smart Eyewear offer?"]}, + meta={"vector_store_ids": [vector_store.id]}, + ) + print(f"\n\nFile Search Output: {file_search_result.content}") + + +if __name__ == "__main__": + asyncio.run(main())