Skip to content

Commit 7637eff

Browse files
committed
Update 04_memory_tools to use built-in memory client tool schemas
Major changes: - Use memory_client.get_all_memory_tool_schemas() instead of manually defining tools - Use memory_client.resolve_function_call() to execute tool calls - Switch from LangChain to OpenAI client directly to show the standard pattern - Demonstrate how the memory client provides ready-to-use tool schemas - Show proper tool call resolution pattern This aligns with the memory server's built-in tool support and demonstrates the recommended integration pattern.
1 parent 0de2ddb commit 7637eff

File tree

1 file changed

+150
-139
lines changed

1 file changed

+150
-139
lines changed

python-recipes/context-engineering/notebooks/section-3-memory/04_memory_tools.ipynb

Lines changed: 150 additions & 139 deletions
Original file line numberDiff line numberDiff line change
@@ -127,11 +127,10 @@
127127
"import os\n",
128128
"import asyncio\n",
129129
"from langchain_openai import ChatOpenAI\n",
130-
"from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage\n",
131-
"from langchain_core.tools import tool\n",
132-
"from pydantic import BaseModel, Field\n",
133130
"from typing import List, Optional\n",
134131
"from agent_memory_client import MemoryAPIClient as MemoryClient, MemoryClientConfig\n",
132+
"import json\n",
133+
"import asyncio\n",
135134
"\n",
136135
"# Initialize\n",
137136
"student_id = \"student_memory_tools\"\n",
@@ -145,8 +144,6 @@
145144
")\n",
146145
"memory_client = MemoryClient(config=config)\n",
147146
"\n",
148-
"llm = ChatOpenAI(model=\"gpt-4o\", temperature=0.7)\n",
149-
"\n",
150147
"print(f\"✅ Setup complete for {student_id}\")"
151148
]
152149
},
@@ -163,7 +160,9 @@
163160
"cell_type": "markdown",
164161
"metadata": {},
165162
"source": [
166-
"### Tool 1: Store Memory"
163+
"### Getting Memory Tools from the Client\n",
164+
"\n",
165+
"The memory client provides built-in tool schemas that are ready to use with LLMs. You don't need to manually define tools - the client handles this for you!"
167166
]
168167
},
169168
{
@@ -172,55 +171,24 @@
172171
"metadata": {},
173172
"outputs": [],
174173
"source": [
175-
"class StoreMemoryInput(BaseModel):\n",
176-
" text: str = Field(description=\"The information to remember\")\n",
177-
" memory_type: str = Field(\n",
178-
" default=\"semantic\",\n",
179-
" description=\"Type of memory: 'semantic' for facts, 'episodic' for events\"\n",
180-
" )\n",
181-
" topics: List[str] = Field(\n",
182-
" default=[],\n",
183-
" description=\"Topics/tags for this memory (e.g., ['preferences', 'courses'])\"\n",
184-
" )\n",
174+
"# Get all memory tool schemas from the client\n",
175+
"# This includes: create_long_term_memory, search_long_term_memory, etc.\n",
176+
"memory_tool_schemas = memory_client.get_all_memory_tool_schemas()\n",
185177
"\n",
186-
"@tool(args_schema=StoreMemoryInput)\n",
187-
"async def store_memory(text: str, memory_type: str = \"semantic\", topics: List[str] = []) -> str:\n",
188-
" \"\"\"\n",
189-
" Store important information in long-term memory.\n",
190-
" \n",
191-
" Use this tool when:\n",
192-
" - Student shares preferences (e.g., \"I prefer online courses\")\n",
193-
" - Student states goals (e.g., \"I want to graduate in 2026\")\n",
194-
" - Student provides important facts (e.g., \"My major is Computer Science\")\n",
195-
" - You learn something that should be remembered for future sessions\n",
196-
" \n",
197-
" Do NOT use for:\n",
198-
" - Temporary conversation context (working memory handles this)\n",
199-
" - Trivial details\n",
200-
" - Information that changes frequently\n",
201-
" \n",
202-
" Examples:\n",
203-
" - text=\"Student prefers morning classes\", memory_type=\"semantic\", topics=[\"preferences\", \"schedule\"]\n",
204-
" - text=\"Student completed CS101 with grade A\", memory_type=\"episodic\", topics=[\"courses\", \"grades\"]\n",
205-
" \"\"\"\n",
206-
" try:\n",
207-
" await memory_client.create_long_term_memory([ClientMemoryRecord(\n",
208-
" text=text,\n",
209-
" memory_type=memory_type,\n",
210-
" topics=topics if topics else [\"general\"]\n",
211-
" )])\n",
212-
" return f\"✅ Stored memory: {text}\"\n",
213-
" except Exception as e:\n",
214-
" return f\"❌ Failed to store memory: {str(e)}\"\n",
215-
"\n",
216-
"print(\"✅ store_memory tool defined\")"
178+
"print(\"Available memory tools:\")\n",
179+
"for tool in memory_tool_schemas:\n",
180+
" print(f\" - {tool['function']['name']}: {tool['function']['description'][:80]}...\")\n",
181+
"\n",
182+
"print(\"\\n✅ Memory tool schemas loaded from client\")"
217183
]
218184
},
219185
{
220186
"cell_type": "markdown",
221187
"metadata": {},
222188
"source": [
223-
"### Tool 2: Search Memories"
189+
"### How Tool Resolution Works\n",
190+
"\n",
191+
"When the LLM calls a memory tool, you use `resolve_function_call()` to execute it:"
224192
]
225193
},
226194
{
@@ -229,47 +197,58 @@
229197
"metadata": {},
230198
"outputs": [],
231199
"source": [
232-
"class SearchMemoriesInput(BaseModel):\n",
233-
" query: str = Field(description=\"What to search for in memories\")\n",
234-
" limit: int = Field(default=5, description=\"Maximum number of memories to retrieve\")\n",
235-
"\n",
236-
"@tool(args_schema=SearchMemoriesInput)\n",
237-
"async def search_memories(query: str, limit: int = 5) -> str:\n",
238-
" \"\"\"\n",
239-
" Search for relevant memories using semantic search.\n",
240-
" \n",
241-
" Use this tool when:\n",
242-
" - You need to recall information about the student\n",
243-
" - Student asks \"What do you know about me?\"\n",
244-
" - You need context from previous sessions\n",
245-
" - Making personalized recommendations\n",
246-
" \n",
247-
" The search uses semantic matching, so natural language queries work well.\n",
248-
" \n",
249-
" Examples:\n",
250-
" - query=\"student preferences\" → finds preference-related memories\n",
251-
" - query=\"completed courses\" → finds course completion records\n",
252-
" - query=\"goals\" → finds student's stated goals\n",
253-
" \"\"\"\n",
254-
" try:\n",
255-
" memories = await memory_client.search_long_term_memory(\n",
256-
" text=query,\n",
257-
" limit=limit\n",
258-
" )\n",
259-
" \n",
260-
" if not memories:\n",
261-
" return \"No relevant memories found.\"\n",
262-
" \n",
263-
" result = f\"Found {len(memories)} relevant memories:\\n\\n\"\n",
264-
" for i, memory in enumerate(memories, 1):\n",
265-
" result += f\"{i}. {memory.text}\\n\"\n",
266-
" result += f\" Type: {memory.memory_type} | Topics: {', '.join(memory.topics)}\\n\\n\"\n",
267-
" \n",
268-
" return result\n",
269-
" except Exception as e:\n",
270-
" return f\"❌ Failed to search memories: {str(e)}\"\n",
200+
"import json\n",
201+
"\n",
202+
"# Example: LLM wants to store a memory\n",
203+
"# The LLM will call: create_long_term_memory with arguments\n",
204+
"\n",
205+
"# Simulate a tool call from the LLM\n",
206+
"example_tool_call = {\n",
207+
" \"name\": \"create_long_term_memory\",\n",
208+
" \"arguments\": json.dumps({\n",
209+
" \"memories\": [\n",
210+
" {\n",
211+
" \"text\": \"Student prefers morning classes\",\n",
212+
" \"memory_type\": \"semantic\",\n",
213+
" \"topics\": [\"preferences\", \"schedule\"]\n",
214+
" }\n",
215+
" ]\n",
216+
" })\n",
217+
"}\n",
218+
"\n",
219+
"# Resolve the tool call\n",
220+
"result = await memory_client.resolve_function_call(\n",
221+
" function_name=example_tool_call[\"name\"],\n",
222+
" args=json.loads(example_tool_call[\"arguments\"]),\n",
223+
" session_id=session_id,\n",
224+
" user_id=student_id\n",
225+
")\n",
226+
"\n",
227+
"print(f\"Tool call result: {result}\")\n",
228+
"print(\"\\n✅ Memory stored via tool call!\")\n",
229+
"\n",
230+
"# Similarly for search:\n",
231+
"search_tool_call = {\n",
232+
" \"name\": \"search_long_term_memory\",\n",
233+
" \"arguments\": json.dumps({\n",
234+
" \"text\": \"student preferences\",\n",
235+
" \"limit\": 5\n",
236+
" })\n",
237+
"}\n",
238+
"\n",
239+
"search_result = await memory_client.resolve_function_call(\n",
240+
" function_name=search_tool_call[\"name\"],\n",
241+
" args=json.loads(search_tool_call[\"arguments\"]),\n",
242+
" session_id=session_id,\n",
243+
" user_id=student_id\n",
244+
")\n",
245+
"\n",
246+
"print(f\"\\nSearch result: {search_result}\")\n",
247+
"print(\"\\n✅ Memories retrieved via tool call!\")\n",
271248
"\n",
272-
"print(\"✅ search_memories tool defined\")"
249+
"# The key insight: You don't need to manually implement tool logic!\n",
250+
"# The memory client handles everything via resolve_function_call()\n",
251+
" pass # Just for demonstration"
273252
]
274253
},
275254
{
@@ -287,15 +266,19 @@
287266
"metadata": {},
288267
"outputs": [],
289268
"source": [
290-
"# Configure agent with memory tools\n",
291-
"memory_tools = [store_memory, search_memories]\n",
292-
"llm_with_tools = llm.bind_tools(memory_tools)\n",
269+
"# Configure agent with memory tools from the client\n",
270+
"# Note: For LangChain, we need to convert OpenAI tool schemas to LangChain format\n",
271+
"# In production with OpenAI directly, you'd use memory_tool_schemas as-is\n",
272+
"\n",
273+
"# For this demo, we'll show the pattern with OpenAI's API directly\n",
274+
"import openai\n",
275+
"openai_client = openai.AsyncOpenAI()\n",
293276
"\n",
294277
"system_prompt = \"\"\"You are a class scheduling agent for Redis University.\n",
295278
"\n",
296279
"You have access to memory tools:\n",
297-
"- store_memory: Store important information about the student\n",
298-
"- search_memories: Search for information you've stored before\n",
280+
"- create_long_term_memory: Store important information about the student\n",
281+
"- search_long_term_memory: Search for information you've stored before\n",
299282
"\n",
300283
"Use these tools intelligently:\n",
301284
"- When students share preferences, goals, or important facts → store them\n",
@@ -328,39 +311,53 @@
328311
"user_message = \"I prefer online courses because I work part-time.\"\n",
329312
"\n",
330313
"messages = [\n",
331-
" SystemMessage(content=system_prompt),\n",
332-
" HumanMessage(content=user_message)\n",
314+
" {\"role\": \"system\", \"content\": system_prompt},\n",
315+
" {\"role\": \"user\", \"content\": user_message}\n",
333316
"]\n",
334317
"\n",
335318
"print(f\"\\n👤 User: {user_message}\")\n",
336319
"\n",
337-
"# First response - should call store_memory\n",
338-
"response = llm_with_tools.invoke(messages)\n",
320+
"# Call LLM with memory tools\n",
321+
"response = await openai_client.chat.completions.create(\n",
322+
" model=\"gpt-4o\",\n",
323+
" messages=messages,\n",
324+
" tools=memory_tool_schemas\n",
325+
")\n",
326+
"\n",
327+
"message = response.choices[0].message\n",
339328
"\n",
340-
"if response.tool_calls:\n",
329+
"if message.tool_calls:\n",
341330
" print(\"\\n🤖 Agent decision: Store this preference\")\n",
342-
" for tool_call in response.tool_calls:\n",
343-
" print(f\" Tool: {tool_call['name']}\")\n",
344-
" print(f\" Args: {tool_call['args']}\")\n",
331+
" for tool_call in message.tool_calls:\n",
332+
" print(f\" Tool: {tool_call.function.name}\")\n",
333+
" print(f\" Args: {tool_call.function.arguments}\")\n",
345334
" \n",
346-
" # Execute the tool\n",
347-
" if tool_call['name'] == 'store_memory':\n",
348-
" result = await store_memory.ainvoke(tool_call['args'])\n",
349-
" print(f\" Result: {result}\")\n",
350-
" \n",
351-
" # Add tool result to messages\n",
352-
" messages.append(response)\n",
353-
" messages.append(ToolMessage(\n",
354-
" content=result,\n",
355-
" tool_call_id=tool_call['id']\n",
356-
" ))\n",
335+
" # Resolve the tool call using the memory client\n",
336+
" result = await memory_client.resolve_function_call(\n",
337+
" function_name=tool_call.function.name,\n",
338+
" args=json.loads(tool_call.function.arguments),\n",
339+
" session_id=session_id,\n",
340+
" user_id=student_id\n",
341+
" )\n",
342+
" print(f\" Result: {result}\")\n",
343+
" \n",
344+
" # Add tool result to messages\n",
345+
" messages.append({\"role\": \"assistant\", \"content\": message.content or \"\", \"tool_calls\": [{\n",
346+
" \"id\": tool_call.id,\n",
347+
" \"type\": \"function\",\n",
348+
" \"function\": {\"name\": tool_call.function.name, \"arguments\": tool_call.function.arguments}\n",
349+
" }]})\n",
350+
" messages.append({\"role\": \"tool\", \"content\": str(result), \"tool_call_id\": tool_call.id})\n",
357351
" \n",
358352
" # Get final response\n",
359-
" final_response = llm_with_tools.invoke(messages)\n",
360-
" print(f\"\\n🤖 Agent: {final_response.content}\")\n",
353+
" final_response = await openai_client.chat.completions.create(\n",
354+
" model=\"gpt-4o\",\n",
355+
" messages=messages\n",
356+
" )\n",
357+
" print(f\"\\n🤖 Agent: {final_response.choices[0].message.content}\")\n",
361358
"else:\n",
362-
" print(f\"\\n🤖 Agent: {response.content}\")\n",
363-
" print(\"\\n⚠️ Agent didn't use store_memory tool\")\n",
359+
" print(f\"\\n🤖 Agent: {message.content}\")\n",
360+
" print(\"\\n⚠️ Agent didn't use memory tool\")\n",
364361
"\n",
365362
"print(\"\\n\" + \"=\" * 80)"
366363
]
@@ -388,40 +385,54 @@
388385
"user_message = \"What courses would you recommend for me?\"\n",
389386
"\n",
390387
"messages = [\n",
391-
" SystemMessage(content=system_prompt),\n",
392-
" HumanMessage(content=user_message)\n",
388+
" {\"role\": \"system\", \"content\": system_prompt},\n",
389+
" {\"role\": \"user\", \"content\": user_message}\n",
393390
"]\n",
394391
"\n",
395392
"print(f\"\\n👤 User: {user_message}\")\n",
396393
"\n",
397-
"# First response - should call search_memories\n",
398-
"response = llm_with_tools.invoke(messages)\n",
394+
"# Call LLM with memory tools\n",
395+
"response = await openai_client.chat.completions.create(\n",
396+
" model=\"gpt-4o\",\n",
397+
" messages=messages,\n",
398+
" tools=memory_tool_schemas\n",
399+
")\n",
400+
"\n",
401+
"message = response.choices[0].message\n",
399402
"\n",
400-
"if response.tool_calls:\n",
403+
"if message.tool_calls:\n",
401404
" print(\"\\n🤖 Agent decision: Search for preferences first\")\n",
402-
" for tool_call in response.tool_calls:\n",
403-
" print(f\" Tool: {tool_call['name']}\")\n",
404-
" print(f\" Args: {tool_call['args']}\")\n",
405+
" for tool_call in message.tool_calls:\n",
406+
" print(f\" Tool: {tool_call.function.name}\")\n",
407+
" print(f\" Args: {tool_call.function.arguments}\")\n",
405408
" \n",
406-
" # Execute the tool\n",
407-
" if tool_call['name'] == 'search_memories':\n",
408-
" result = await search_memories.ainvoke(tool_call['args'])\n",
409-
" print(f\"\\n Retrieved memories:\")\n",
410-
" print(f\" {result}\")\n",
411-
" \n",
412-
" # Add tool result to messages\n",
413-
" messages.append(response)\n",
414-
" messages.append(ToolMessage(\n",
415-
" content=result,\n",
416-
" tool_call_id=tool_call['id']\n",
417-
" ))\n",
409+
" # Resolve the tool call using the memory client\n",
410+
" result = await memory_client.resolve_function_call(\n",
411+
" function_name=tool_call.function.name,\n",
412+
" args=json.loads(tool_call.function.arguments),\n",
413+
" session_id=session_id,\n",
414+
" user_id=student_id\n",
415+
" )\n",
416+
" print(f\"\\n Retrieved memories:\")\n",
417+
" print(f\" {result}\")\n",
418+
" \n",
419+
" # Add tool result to messages\n",
420+
" messages.append({\"role\": \"assistant\", \"content\": message.content or \"\", \"tool_calls\": [{\n",
421+
" \"id\": tool_call.id,\n",
422+
" \"type\": \"function\",\n",
423+
" \"function\": {\"name\": tool_call.function.name, \"arguments\": tool_call.function.arguments}\n",
424+
" }]})\n",
425+
" messages.append({\"role\": \"tool\", \"content\": str(result), \"tool_call_id\": tool_call.id})\n",
418426
" \n",
419427
" # Get final response\n",
420-
" final_response = llm_with_tools.invoke(messages)\n",
421-
" print(f\"\\n🤖 Agent: {final_response.content}\")\n",
428+
" final_response = await openai_client.chat.completions.create(\n",
429+
" model=\"gpt-4o\",\n",
430+
" messages=messages\n",
431+
" )\n",
432+
" print(f\"\\n🤖 Agent: {final_response.choices[0].message.content}\")\n",
422433
" print(\"\\n✅ Agent used memories to personalize recommendation!\")\n",
423434
"else:\n",
424-
" print(f\"\\n🤖 Agent: {response.content}\")\n",
435+
" print(f\"\\n🤖 Agent: {message.content}\")\n",
425436
" print(\"\\n⚠️ Agent didn't search memories\")\n",
426437
"\n",
427438
"print(\"\\n\" + \"=\" * 80)"

0 commit comments

Comments
 (0)