|
| 1 | +# # ------------------------------------ |
| 2 | +# # Copyright (c) Microsoft Corporation. |
| 3 | +# # Licensed under the MIT License. |
| 4 | +# # ------------------------------------ |
| 5 | + |
| 6 | +# """ |
| 7 | +# DESCRIPTION: |
| 8 | +# This sample demonstrates how to interact with the memory store to add and retrieve memory. |
| 9 | +# USAGE: |
| 10 | +# python sample_memory_advanced.py |
| 11 | + |
| 12 | +# Before running the sample: |
| 13 | +# pip install python-dotenv azure-identity azure-ai-projects>=2.0.0b1 |
| 14 | + |
| 15 | +# Deploy a chat model (e.g. gpt-4.1) and an embedding model (e.g. text-embedding-3-small). |
| 16 | +# Once you have deployed models, set the deployment name in the variables below. |
| 17 | + |
| 18 | +# Set these environment variables with your own values: |
| 19 | +# 1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview |
| 20 | +# page of your Azure AI Foundry portal. |
| 21 | +# 2) AZURE_AI_CHAT_MODEL_DEPLOYMENT_NAME - The deployment name of the chat model, as found under the "Name" column in |
| 22 | +# the "Models + endpoints" tab in your Azure AI Foundry project. |
| 23 | +# 3) AZURE_AI_EMBEDDING_MODEL_DEPLOYMENT_NAME - The deployment name of the embedding model, as found under the |
| 24 | +# "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. |
| 25 | +# """ |
| 26 | + |
| 27 | +# import os |
| 28 | +# from dotenv import load_dotenv |
| 29 | +# from azure.identity import DefaultAzureCredential |
| 30 | +# from azure.ai.projects import AIProjectClient |
| 31 | +# from azure.ai.projects.models import ( |
| 32 | +# MemoryStoreDefaultDefinition, |
| 33 | +# MemoryStoreDefaultOptions, |
| 34 | +# MemorySearchOptions, |
| 35 | +# ResponsesUserMessageItemParam, |
| 36 | +# ResponsesAssistantMessageItemParam, |
| 37 | +# MemorySearchTool, |
| 38 | +# PromptAgentDefinition, |
| 39 | +# ) |
| 40 | + |
| 41 | +# load_dotenv() |
| 42 | + |
| 43 | +# project_client = AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=DefaultAzureCredential()) |
| 44 | + |
| 45 | +# with project_client: |
| 46 | + |
| 47 | +# # Create memory store with advanced options |
| 48 | +# options = MemoryStoreDefaultOptions( |
| 49 | +# user_profile_enabled=True, |
| 50 | +# user_profile_details="Preferences and interests relevant to coffee expert agent", |
| 51 | +# chat_summary_enabled=True, |
| 52 | +# ) |
| 53 | +# definition = MemoryStoreDefaultDefinition( |
| 54 | +# chat_model=os.environ["AZURE_AI_CHAT_MODEL_DEPLOYMENT_NAME"], |
| 55 | +# embedding_model=os.environ["AZURE_AI_EMBEDDING_MODEL_DEPLOYMENT_NAME"], |
| 56 | +# options=options, |
| 57 | +# ) |
| 58 | +# memory_store = project_client.memory_stores.create( |
| 59 | +# name="my_memory_store_3", |
| 60 | +# description="Example memory store for conversations", |
| 61 | +# definition=definition, |
| 62 | +# ) |
| 63 | +# print(f"Created memory store: {memory_store.name} ({memory_store.id}): {memory_store.description}") |
| 64 | + |
| 65 | +# # Set scope to associate the memories with. |
| 66 | +# # You can also use "{{$userId}}"" to take the oid of the request authentication header. |
| 67 | +# scope = "user_123" |
| 68 | + |
| 69 | +# # Extract memories from messages and add them to the memory store |
| 70 | +# user_message = ResponsesUserMessageItemParam( |
| 71 | +# content="I prefer dark roast coffee and usually drink it in the morning" |
| 72 | +# ) |
| 73 | +# update_poller = project_client.memory_stores.begin_update_memories( |
| 74 | +# name=memory_store.name, |
| 75 | +# scope=scope, |
| 76 | +# items=[user_message], # Pass conversation items that you want to add to memory |
| 77 | +# # update_delay=300 # Keep default inactivity delay before starting update |
| 78 | +# ) |
| 79 | +# print(f"Scheduled memory update operation (Update ID: {update_poller.update_id}, Status: {update_poller.status()})") |
| 80 | + |
| 81 | +# # Extend the previous update with another update and more messages |
| 82 | +# new_message = ResponsesUserMessageItemParam(content="I also like cappuccinos in the afternoon") |
| 83 | +# new_update_poller = project_client.memory_stores.begin_update_memories( |
| 84 | +# name=memory_store.name, |
| 85 | +# scope=scope, |
| 86 | +# items=[new_message], |
| 87 | +# previous_update_id=update_poller.update_id, # Extend from previous update ID |
| 88 | +# update_delay=0, # Trigger update immediately without waiting for inactivity |
| 89 | +# ) |
| 90 | +# print( |
| 91 | +# f"Scheduled memory update operation (Update ID: {new_update_poller.update_id}, Status: {new_update_poller.status()})" |
| 92 | +# ) |
| 93 | + |
| 94 | +# # As first update has not started yet, the new update will cancel the first update and cover both sets of messages |
| 95 | +# print( |
| 96 | +# f"Superseded first memory update operation (Update ID: {update_poller.update_id}, Status: {update_poller.status()})" |
| 97 | +# ) |
| 98 | + |
| 99 | +# new_update_result = new_update_poller.result() |
| 100 | +# print( |
| 101 | +# f"Second update {new_update_poller.update_id} completed with {len(new_update_result.memory_operations)} memory operations" |
| 102 | +# ) |
| 103 | +# for operation in new_update_result.memory_operations: |
| 104 | +# print( |
| 105 | +# f" - Operation: {operation.kind}, Memory ID: {operation.memory_item.memory_id}, Content: {operation.memory_item.content}" |
| 106 | +# ) |
| 107 | + |
| 108 | +# # Retrieve memories from the memory store |
| 109 | +# query_message = ResponsesUserMessageItemParam(content="What are my morning coffee preferences?") |
| 110 | +# search_response = project_client.memory_stores.search_memories( |
| 111 | +# name=memory_store.name, scope=scope, items=[query_message], options=MemorySearchOptions(max_memories=5) |
| 112 | +# ) |
| 113 | +# print(f"Found {len(search_response.memories)} memories") |
| 114 | +# for memory in search_response.memories: |
| 115 | +# print(f" - Memory ID: {memory.memory_item.memory_id}, Content: {memory.memory_item.content}") |
| 116 | + |
| 117 | +# # Perform another search using the previous search as context |
| 118 | +# agent_message = ResponsesAssistantMessageItemParam( |
| 119 | +# content="You previously indicated a preference for dark roast coffee in the morning." |
| 120 | +# ) |
| 121 | +# followup_query = ResponsesUserMessageItemParam( |
| 122 | +# content="What about afternoon?" # Follow-up assuming context from previous messages |
| 123 | +# ) |
| 124 | +# followup_search_response = project_client.memory_stores.search_memories( |
| 125 | +# name=memory_store.name, |
| 126 | +# scope=scope, |
| 127 | +# items=[agent_message, followup_query], |
| 128 | +# previous_search_id=search_response.search_id, |
| 129 | +# options=MemorySearchOptions(max_memories=5), |
| 130 | +# ) |
| 131 | +# print(f"Found {len(followup_search_response.memories)} memories") |
| 132 | +# for memory in followup_search_response.memories: |
| 133 | +# print(f" - Memory ID: {memory.memory_item.memory_id}, Content: {memory.memory_item.content}") |
| 134 | + |
| 135 | +# # Delete memories for the current scope |
| 136 | +# delete_scope_response = project_client.memory_stores.delete_scope(name=memory_store.name, scope=scope) |
| 137 | +# print(f"Deleted memories for scope '{scope}': {delete_scope_response.deleted}") |
| 138 | + |
| 139 | +# # Delete memory store |
| 140 | +# delete_response = project_client.memory_stores.delete(memory_store.name) |
| 141 | +# print(f"Deleted: {delete_response.deleted}") |
0 commit comments