Skip to content

Commit fceab57

Browse files
authored
Migrate samples from agentsv2-preview repo (#43798)
* Migrate samples from agentsv2-preview repo * run black * update * fix * comment out some samples
1 parent 4653d95 commit fceab57

12 files changed

+533
-39
lines changed
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
# # ------------------------------------
2+
# # Copyright (c) Microsoft Corporation.
3+
# # Licensed under the MIT License.
4+
# # ------------------------------------
5+
6+
# """
7+
# DESCRIPTION:
8+
# This sample demonstrates how to integrate memory into a prompt agent.
9+
# USAGE:
10+
# python sample_agent_memory.py
11+
12+
# Before running the sample:
13+
# pip install python-dotenv azure-identity azure-ai-projects>=2.0.0b1
14+
15+
# Deploy a chat model (e.g. gpt-4.1) and an embedding model (e.g. text-embedding-3-small).
16+
# Once you have deployed models, set the deployment name in the variables below.
17+
18+
# Set these environment variables with your own values:
19+
# 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview
20+
# page of your Azure AI Foundry portal.
21+
# 2) AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model for the agent, as found under the "Name" column in
22+
# the "Models + endpoints" tab in your Azure AI Foundry project.
23+
# 3) AZURE_AI_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model for memory, as found under the "Name" column in
24+
# the "Models + endpoints" tab in your Azure AI Foundry project.
25+
# 4) AZURE_AI_EMBEDDING_MODEL_DEPLOYMENT_NAME - The deployment name of the embedding model for memory, as found under the
26+
# "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project.
27+
# """
28+
29+
# import os
30+
# from dotenv import load_dotenv
31+
# from azure.identity import DefaultAzureCredential
32+
# from azure.ai.projects import AIProjectClient
33+
# from azure.ai.projects.models import (
34+
# MemoryStoreDefaultDefinition,
35+
# MemoryStoreDefaultOptions,
36+
# MemorySearchOptions,
37+
# ResponsesUserMessageItemParam,
38+
# MemorySearchTool,
39+
# PromptAgentDefinition,
40+
# )
41+
42+
# load_dotenv()
43+
44+
# project_client = AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=DefaultAzureCredential())
45+
46+
# with project_client:
47+
48+
# openai_client = project_client.get_openai_client()
49+
50+
# # Create a memory store
51+
# definition = MemoryStoreDefaultDefinition(
52+
# chat_model=os.environ["AZURE_AI_CHAT_MODEL_DEPLOYMENT_NAME"],
53+
# embedding_model=os.environ["AZURE_AI_EMBEDDING_MODEL_DEPLOYMENT_NAME"],
54+
# )
55+
# memory_store = project_client.memory_stores.create(
56+
# name="my_memory_store",
57+
# description="Example memory store for conversations",
58+
# definition=definition,
59+
# )
60+
# print(f"Created memory store: {memory_store.name} ({memory_store.id}): {memory_store.description}")
61+
62+
# # Create a prompt agent with memory search tool
63+
# agent = project_client.agents.create_version(
64+
# agent_name="MyAgent",
65+
# definition=PromptAgentDefinition(
66+
# model=os.environ["AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME"],
67+
# instructions="You are a helpful assistant that answers general questions",
68+
# ),
69+
# tools=[
70+
# MemorySearchTool(
71+
# memory_store_name=memory_store.name,
72+
# scope="{{$userId}}",
73+
# update_delay=10, # Wait 5 seconds of inactivity before updating memories
74+
# # In a real application, set this to a higher value like 300 (5 minutes, default)
75+
# )
76+
# ],
77+
# )
78+
# print(f"Agent created (id: {agent.id}, name: {agent.name}, version: {agent.version})")
79+
80+
# # Create a conversation with the agent with memory tool enabled
81+
# conversation = openai_client.conversations.create()
82+
# print(f"Created conversation (id: {conversation.id})")
83+
84+
# # Create an agent response to initial user message
85+
# response = openai_client.responses.create(
86+
# conversation=conversation.id,
87+
# extra_body={"agent": AgentReference(name=agent.name).as_dict()},
88+
# input=[ResponsesUserMessageItemParam(content="I prefer dark roast coffee")],
89+
# )
90+
# print(f"Response output: {response.output_text}")
91+
92+
# # After an inactivity in the conversation, memories will be extracted from the conversation and stored
93+
# sleep(60)
94+
95+
# # Create a new conversation
96+
# new_conversation = openai_client.conversations.create()
97+
# print(f"Created new conversation (id: {new_conversation.id})")
98+
99+
# # Create an agent response with stored memories
100+
# new_response = openai_client.responses.create(
101+
# conversation=new_conversation.id,
102+
# extra_body={"agent": AgentReference(name=agent.name).as_dict()},
103+
# input=[ResponsesUserMessageItemParam(content="Please order my usual coffee")],
104+
# )
105+
# print(f"Response output: {new_response.output_text}")
106+
107+
# # Clean up
108+
# openai_client.conversations.delete(conversation.id)
109+
# openai_client.conversations.delete(new_conversation.id)
110+
# print("Conversations deleted")
111+
112+
# project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version)
113+
# print("Agent deleted")
114+
115+
# project_client.memory_stores.delete(memory_store.name)
116+
# print("Memory store deleted")
Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
# ------------------------------------
2+
# Copyright (c) Microsoft Corporation.
3+
# Licensed under the MIT License.
4+
# ------------------------------------
5+
6+
"""
7+
DESCRIPTION:
8+
This sample demonstrates how to interact with the Foundry Project MCP tool.
9+
USAGE:
10+
python sample_mcp_tool.py
11+
12+
Before running the sample:
13+
pip install python-dotenv azure-identity azure-ai-projects>=2.0.0b1 mcp
14+
15+
Set these environment variables with your own values:
16+
1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview
17+
page of your Azure AI Foundry portal.
18+
2) IMAGE_GEN_DEPLOYMENT_NAME - The deployment name of the image generation model, as found under the "Name" column in
19+
the "Models + endpoints" tab in your Azure AI Foundry project.
20+
"""
21+
22+
import asyncio
23+
import os
24+
from dotenv import load_dotenv
25+
from azure.ai.projects.aio import AIProjectClient
26+
from azure.identity.aio import DefaultAzureCredential
27+
from mcp import ClientSession
28+
from mcp.client.streamable_http import streamablehttp_client
29+
30+
load_dotenv()
31+
32+
33+
async def main():
34+
credential = DefaultAzureCredential()
35+
try:
36+
# Fetch the Entra ID token with audience as https://ai.azure.com
37+
access_token = await credential.get_token("https://ai.azure.com")
38+
endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT", "").rstrip("/")
39+
async with streamablehttp_client(
40+
url=f"{endpoint}/mcp_tools?api-version=2025-05-15-preview",
41+
headers={"Authorization": f"Bearer {access_token.token}"},
42+
) as (read_stream, write_stream, _):
43+
# Create a session using the client streams
44+
async with ClientSession(read_stream, write_stream) as session:
45+
# Initialize the connection
46+
await session.initialize()
47+
# List available tools
48+
tools = await session.list_tools()
49+
print(f"Available tools: {[tool.name for tool in tools.tools]}")
50+
51+
# For each tool, print its details
52+
for tool in tools.tools:
53+
print(f"\n\nTool Name: {tool.name}, Input Schema: {tool.inputSchema}")
54+
55+
# Run the code interpreter tool
56+
code_interpreter_result = await session.call_tool(
57+
name="code_interpreter",
58+
arguments={"code": "print('Hello from Azure AI Foundry MCP Code Interpreter tool!')"},
59+
)
60+
print(f"\n\nCode Interpreter Output: {code_interpreter_result.content}")
61+
62+
# Run the image_generation tool
63+
image_generation_result = await session.call_tool(
64+
name="image_generation",
65+
arguments={"prompt": "Draw a cute puppy riding a skateboard"},
66+
meta={"imagegen_model_deployment_name": os.getenv("IMAGE_GEN_DEPLOYMENT_NAME", "")},
67+
)
68+
print(f"\n\nImage Generation Output: {image_generation_result.content}")
69+
70+
# Run the file_search tool
71+
# Create a project client
72+
project_client = AIProjectClient(
73+
endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"],
74+
credential=credential,
75+
api_version="2025-05-15-preview",
76+
)
77+
async with project_client:
78+
# Create a vector store
79+
openai_client = await project_client.get_openai_client()
80+
vector_store = await openai_client.vector_stores.create(
81+
name="sample_vector_store",
82+
)
83+
84+
vector_store_file = await openai_client.vector_stores.files.upload_and_poll(
85+
vector_store_id=vector_store.id,
86+
file=open(
87+
os.path.abspath(os.path.join(os.path.dirname(__file__), "./assets/product_info.md")),
88+
"rb",
89+
),
90+
)
91+
92+
print(f"\n\nUploaded file, file ID: {vector_store_file.id} to vector store ID: {vector_store.id}")
93+
94+
# Call the file_search tool
95+
file_search_result = await session.call_tool(
96+
name="file_search",
97+
arguments={"queries": ["What feature does Smart Eyewear offer?"]},
98+
meta={"vector_store_ids": [vector_store.id]},
99+
)
100+
print(f"\n\nFile Search Output: {file_search_result.content}")
101+
finally:
102+
await credential.close()
103+
104+
105+
if __name__ == "__main__":
106+
asyncio.run(main())
Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
# # ------------------------------------
2+
# # Copyright (c) Microsoft Corporation.
3+
# # Licensed under the MIT License.
4+
# # ------------------------------------
5+
6+
# """
7+
# DESCRIPTION:
8+
# This sample demonstrates how to interact with the memory store to add and retrieve memory.
9+
# USAGE:
10+
# python sample_memory_advanced.py
11+
12+
# Before running the sample:
13+
# pip install python-dotenv azure-identity azure-ai-projects>=2.0.0b1
14+
15+
# Deploy a chat model (e.g. gpt-4.1) and an embedding model (e.g. text-embedding-3-small).
16+
# Once you have deployed models, set the deployment name in the variables below.
17+
18+
# Set these environment variables with your own values:
19+
# 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview
20+
# page of your Azure AI Foundry portal.
21+
# 2) AZURE_AI_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in
22+
# the "Models + endpoints" tab in your Azure AI Foundry project.
23+
# 3) AZURE_AI_EMBEDDING_MODEL_DEPLOYMENT_NAME - The deployment name of the embedding model, as found under the
24+
# "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project.
25+
# """
26+
27+
# import os
28+
# from dotenv import load_dotenv
29+
# from azure.identity import DefaultAzureCredential
30+
# from azure.ai.projects import AIProjectClient
31+
# from azure.ai.projects.models import (
32+
# MemoryStoreDefaultDefinition,
33+
# MemoryStoreDefaultOptions,
34+
# MemorySearchOptions,
35+
# ResponsesUserMessageItemParam,
36+
# ResponsesAssistantMessageItemParam,
37+
# MemorySearchTool,
38+
# PromptAgentDefinition,
39+
# )
40+
41+
# load_dotenv()
42+
43+
# project_client = AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=DefaultAzureCredential())
44+
45+
# with project_client:
46+
47+
# # Create memory store with advanced options
48+
# options = MemoryStoreDefaultOptions(
49+
# user_profile_enabled=True,
50+
# user_profile_details="Preferences and interests relevant to coffee expert agent",
51+
# chat_summary_enabled=True,
52+
# )
53+
# definition = MemoryStoreDefaultDefinition(
54+
# chat_model=os.environ["AZURE_AI_CHAT_MODEL_DEPLOYMENT_NAME"],
55+
# embedding_model=os.environ["AZURE_AI_EMBEDDING_MODEL_DEPLOYMENT_NAME"],
56+
# options=options,
57+
# )
58+
# memory_store = project_client.memory_stores.create(
59+
# name="my_memory_store_3",
60+
# description="Example memory store for conversations",
61+
# definition=definition,
62+
# )
63+
# print(f"Created memory store: {memory_store.name} ({memory_store.id}): {memory_store.description}")
64+
65+
# # Set scope to associate the memories with.
66+
# # You can also use "{{$userId}}"" to take the oid of the request authentication header.
67+
# scope = "user_123"
68+
69+
# # Extract memories from messages and add them to the memory store
70+
# user_message = ResponsesUserMessageItemParam(
71+
# content="I prefer dark roast coffee and usually drink it in the morning"
72+
# )
73+
# update_poller = project_client.memory_stores.begin_update_memories(
74+
# name=memory_store.name,
75+
# scope=scope,
76+
# items=[user_message], # Pass conversation items that you want to add to memory
77+
# # update_delay=300 # Keep default inactivity delay before starting update
78+
# )
79+
# print(f"Scheduled memory update operation (Update ID: {update_poller.update_id}, Status: {update_poller.status()})")
80+
81+
# # Extend the previous update with another update and more messages
82+
# new_message = ResponsesUserMessageItemParam(content="I also like cappuccinos in the afternoon")
83+
# new_update_poller = project_client.memory_stores.begin_update_memories(
84+
# name=memory_store.name,
85+
# scope=scope,
86+
# items=[new_message],
87+
# previous_update_id=update_poller.update_id, # Extend from previous update ID
88+
# update_delay=0, # Trigger update immediately without waiting for inactivity
89+
# )
90+
# print(
91+
# f"Scheduled memory update operation (Update ID: {new_update_poller.update_id}, Status: {new_update_poller.status()})"
92+
# )
93+
94+
# # As first update has not started yet, the new update will cancel the first update and cover both sets of messages
95+
# print(
96+
# f"Superseded first memory update operation (Update ID: {update_poller.update_id}, Status: {update_poller.status()})"
97+
# )
98+
99+
# new_update_result = new_update_poller.result()
100+
# print(
101+
# f"Second update {new_update_poller.update_id} completed with {len(new_update_result.memory_operations)} memory operations"
102+
# )
103+
# for operation in new_update_result.memory_operations:
104+
# print(
105+
# f" - Operation: {operation.kind}, Memory ID: {operation.memory_item.memory_id}, Content: {operation.memory_item.content}"
106+
# )
107+
108+
# # Retrieve memories from the memory store
109+
# query_message = ResponsesUserMessageItemParam(content="What are my morning coffee preferences?")
110+
# search_response = project_client.memory_stores.search_memories(
111+
# name=memory_store.name, scope=scope, items=[query_message], options=MemorySearchOptions(max_memories=5)
112+
# )
113+
# print(f"Found {len(search_response.memories)} memories")
114+
# for memory in search_response.memories:
115+
# print(f" - Memory ID: {memory.memory_item.memory_id}, Content: {memory.memory_item.content}")
116+
117+
# # Perform another search using the previous search as context
118+
# agent_message = ResponsesAssistantMessageItemParam(
119+
# content="You previously indicated a preference for dark roast coffee in the morning."
120+
# )
121+
# followup_query = ResponsesUserMessageItemParam(
122+
# content="What about afternoon?" # Follow-up assuming context from previous messages
123+
# )
124+
# followup_search_response = project_client.memory_stores.search_memories(
125+
# name=memory_store.name,
126+
# scope=scope,
127+
# items=[agent_message, followup_query],
128+
# previous_search_id=search_response.search_id,
129+
# options=MemorySearchOptions(max_memories=5),
130+
# )
131+
# print(f"Found {len(followup_search_response.memories)} memories")
132+
# for memory in followup_search_response.memories:
133+
# print(f" - Memory ID: {memory.memory_item.memory_id}, Content: {memory.memory_item.content}")
134+
135+
# # Delete memories for the current scope
136+
# delete_scope_response = project_client.memory_stores.delete_scope(name=memory_store.name, scope=scope)
137+
# print(f"Deleted memories for scope '{scope}': {delete_scope_response.deleted}")
138+
139+
# # Delete memory store
140+
# delete_response = project_client.memory_stores.delete(memory_store.name)
141+
# print(f"Deleted: {delete_response.deleted}")

0 commit comments

Comments
 (0)