Skip to content

Commit c4c7501

Browse files
committed
Keep LangChain/LangGraph pattern while using memory client tools
Updated 04_memory_tools to: - Use LangChain tools (this is a LangChain/LangGraph course\!) - Wrap memory_client.resolve_function_call() in LangChain @tool decorators - Use llm.bind_tools() and LangChain message types - Show how to integrate memory client's built-in tools with LangChain This gives users the best of both worlds: - Familiar LangChain/LangGraph patterns - Memory client's built-in tool implementations via resolve_function_call()
1 parent 7637eff commit c4c7501

File tree

1 file changed

+104
-148
lines changed

1 file changed

+104
-148
lines changed

python-recipes/context-engineering/notebooks/section-3-memory/04_memory_tools.ipynb

Lines changed: 104 additions & 148 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,9 @@
127127
"import os\n",
128128
"import asyncio\n",
129129
"from langchain_openai import ChatOpenAI\n",
130+
"from langchain_openai import ChatOpenAI\n",
131+
"from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage\n",
132+
"from langchain_core.tools import tool\n",
130133
"from typing import List, Optional\n",
131134
"from agent_memory_client import MemoryAPIClient as MemoryClient, MemoryClientConfig\n",
132135
"import json\n",
@@ -144,6 +147,8 @@
144147
")\n",
145148
"memory_client = MemoryClient(config=config)\n",
146149
"\n",
150+
"llm = ChatOpenAI(model=\"gpt-4o\", temperature=0.7)\n",
151+
"\n",
147152
"print(f\"✅ Setup complete for {student_id}\")"
148153
]
149154
},
@@ -172,83 +177,66 @@
172177
"outputs": [],
173178
"source": [
174179
"# Get all memory tool schemas from the client\n",
175-
"# This includes: create_long_term_memory, search_long_term_memory, etc.\n",
180+
"# The memory client provides OpenAI-format tool schemas\n",
176181
"memory_tool_schemas = memory_client.get_all_memory_tool_schemas()\n",
177182
"\n",
178-
"print(\"Available memory tools:\")\n",
179-
"for tool in memory_tool_schemas:\n",
180-
" print(f\" - {tool['function']['name']}: {tool['function']['description'][:80]}...\")\n",
183+
"print(\"Available memory tools from client:\")\n",
184+
"for tool_schema in memory_tool_schemas:\n",
185+
" print(f\" - {tool_schema['function']['name']}: {tool_schema['function']['description'][:80]}...\")\n",
186+
"\n",
187+
"# Create LangChain tools that wrap the memory client's resolve_function_call\n",
188+
"# This allows us to use LangChain's tool calling while leveraging the client's built-in tools\n",
189+
"\n",
190+
"@tool\n",
191+
"async def create_long_term_memory(memories: List[dict]) -> str:\n",
192+
" \"\"\"\n",
193+
" Store important information in long-term memory.\n",
194+
" \n",
195+
" Args:\n",
196+
" memories: List of memory objects with 'text', 'memory_type', 'topics', and 'entities'\n",
197+
" \n",
198+
" Use this when students share preferences, goals, or important facts.\n",
199+
" \"\"\"\n",
200+
" result = await memory_client.resolve_function_call(\n",
201+
" function_name=\"create_long_term_memory\",\n",
202+
" args={\"memories\": memories},\n",
203+
" session_id=session_id,\n",
204+
" user_id=student_id\n",
205+
" )\n",
206+
" return f\"✅ Stored {len(memories)} memory(ies): {result}\"\n",
207+
"\n",
208+
"@tool\n",
209+
"async def search_long_term_memory(text: str, limit: int = 5) -> str:\n",
210+
" \"\"\"\n",
211+
" Search for relevant memories using semantic search.\n",
212+
" \n",
213+
" Args:\n",
214+
" text: What to search for in memories\n",
215+
" limit: Maximum number of memories to retrieve (default: 5)\n",
216+
" \n",
217+
" Use this when you need to recall information about the student.\n",
218+
" \"\"\"\n",
219+
" result = await memory_client.resolve_function_call(\n",
220+
" function_name=\"search_long_term_memory\",\n",
221+
" args={\"text\": text, \"limit\": limit},\n",
222+
" session_id=session_id,\n",
223+
" user_id=student_id\n",
224+
" )\n",
225+
" return str(result)\n",
181226
"\n",
182-
"print(\"\\n✅ Memory tool schemas loaded from client\")"
227+
"print(\"\\n✅ LangChain tools created that wrap memory client's built-in tools\")"
183228
]
184229
},
185230
{
186231
"cell_type": "markdown",
187232
"metadata": {},
188233
"source": [
189-
"### How Tool Resolution Works\n",
234+
"### Key Insight: Wrapping the Memory Client\n",
190235
"\n",
191-
"When the LLM calls a memory tool, you use `resolve_function_call()` to execute it:"
192-
]
193-
},
194-
{
195-
"cell_type": "code",
196-
"execution_count": null,
197-
"metadata": {},
198-
"outputs": [],
199-
"source": [
200-
"import json\n",
201-
"\n",
202-
"# Example: LLM wants to store a memory\n",
203-
"# The LLM will call: create_long_term_memory with arguments\n",
204-
"\n",
205-
"# Simulate a tool call from the LLM\n",
206-
"example_tool_call = {\n",
207-
" \"name\": \"create_long_term_memory\",\n",
208-
" \"arguments\": json.dumps({\n",
209-
" \"memories\": [\n",
210-
" {\n",
211-
" \"text\": \"Student prefers morning classes\",\n",
212-
" \"memory_type\": \"semantic\",\n",
213-
" \"topics\": [\"preferences\", \"schedule\"]\n",
214-
" }\n",
215-
" ]\n",
216-
" })\n",
217-
"}\n",
218-
"\n",
219-
"# Resolve the tool call\n",
220-
"result = await memory_client.resolve_function_call(\n",
221-
" function_name=example_tool_call[\"name\"],\n",
222-
" args=json.loads(example_tool_call[\"arguments\"]),\n",
223-
" session_id=session_id,\n",
224-
" user_id=student_id\n",
225-
")\n",
226-
"\n",
227-
"print(f\"Tool call result: {result}\")\n",
228-
"print(\"\\n✅ Memory stored via tool call!\")\n",
229-
"\n",
230-
"# Similarly for search:\n",
231-
"search_tool_call = {\n",
232-
" \"name\": \"search_long_term_memory\",\n",
233-
" \"arguments\": json.dumps({\n",
234-
" \"text\": \"student preferences\",\n",
235-
" \"limit\": 5\n",
236-
" })\n",
237-
"}\n",
238-
"\n",
239-
"search_result = await memory_client.resolve_function_call(\n",
240-
" function_name=search_tool_call[\"name\"],\n",
241-
" args=json.loads(search_tool_call[\"arguments\"]),\n",
242-
" session_id=session_id,\n",
243-
" user_id=student_id\n",
244-
")\n",
245-
"\n",
246-
"print(f\"\\nSearch result: {search_result}\")\n",
247-
"print(\"\\n✅ Memories retrieved via tool call!\")\n",
248-
"\n",
249-
"# The key insight: You don't need to manually implement tool logic!\n",
250-
"# The memory client handles everything via resolve_function_call()\n",
251-
" pass # Just for demonstration"
236+
"Our LangChain tools are thin wrappers around `memory_client.resolve_function_call()`. This gives us:\n",
237+
"- LangChain's tool calling interface (familiar to LangGraph users)\n",
238+
"- Memory client's built-in tool implementations (no need to reimplement)\n",
239+
"- Best of both worlds!"
252240
]
253241
},
254242
{
@@ -266,13 +254,9 @@
266254
"metadata": {},
267255
"outputs": [],
268256
"source": [
269-
"# Configure agent with memory tools from the client\n",
270-
"# Note: For LangChain, we need to convert OpenAI tool schemas to LangChain format\n",
271-
"# In production with OpenAI directly, you'd use memory_tool_schemas as-is\n",
272-
"\n",
273-
"# For this demo, we'll show the pattern with OpenAI's API directly\n",
274-
"import openai\n",
275-
"openai_client = openai.AsyncOpenAI()\n",
257+
"# Configure agent with our LangChain memory tools\n",
258+
"memory_tools = [create_long_term_memory, search_long_term_memory]\n",
259+
"llm_with_tools = llm.bind_tools(memory_tools)\n",
276260
"\n",
277261
"system_prompt = \"\"\"You are a class scheduling agent for Redis University.\n",
278262
"\n",
@@ -288,7 +272,7 @@
288272
"Be proactive about using memory to provide personalized service.\n",
289273
"\"\"\"\n",
290274
"\n",
291-
"print(\"✅ Agent configured with memory tools\")"
275+
"print(\"✅ Agent configured with LangChain memory tools\")"
292276
]
293277
},
294278
{
@@ -311,52 +295,38 @@
311295
"user_message = \"I prefer online courses because I work part-time.\"\n",
312296
"\n",
313297
"messages = [\n",
314-
" {\"role\": \"system\", \"content\": system_prompt},\n",
315-
" {\"role\": \"user\", \"content\": user_message}\n",
298+
" SystemMessage(content=system_prompt),\n",
299+
" HumanMessage(content=user_message)\n",
316300
"]\n",
317301
"\n",
318302
"print(f\"\\n👤 User: {user_message}\")\n",
319303
"\n",
320-
"# Call LLM with memory tools\n",
321-
"response = await openai_client.chat.completions.create(\n",
322-
" model=\"gpt-4o\",\n",
323-
" messages=messages,\n",
324-
" tools=memory_tool_schemas\n",
325-
")\n",
326-
"\n",
327-
"message = response.choices[0].message\n",
304+
"# First response - should call create_long_term_memory\n",
305+
"response = llm_with_tools.invoke(messages)\n",
328306
"\n",
329-
"if message.tool_calls:\n",
307+
"if response.tool_calls:\n",
330308
" print(\"\\n🤖 Agent decision: Store this preference\")\n",
331-
" for tool_call in message.tool_calls:\n",
332-
" print(f\" Tool: {tool_call.function.name}\")\n",
333-
" print(f\" Args: {tool_call.function.arguments}\")\n",
309+
" for tool_call in response.tool_calls:\n",
310+
" print(f\" Tool: {tool_call['name']}\")\n",
311+
" print(f\" Args: {tool_call['args']}\")\n",
334312
" \n",
335-
" # Resolve the tool call using the memory client\n",
336-
" result = await memory_client.resolve_function_call(\n",
337-
" function_name=tool_call.function.name,\n",
338-
" args=json.loads(tool_call.function.arguments),\n",
339-
" session_id=session_id,\n",
340-
" user_id=student_id\n",
341-
" )\n",
342-
" print(f\" Result: {result}\")\n",
343-
" \n",
344-
" # Add tool result to messages\n",
345-
" messages.append({\"role\": \"assistant\", \"content\": message.content or \"\", \"tool_calls\": [{\n",
346-
" \"id\": tool_call.id,\n",
347-
" \"type\": \"function\",\n",
348-
" \"function\": {\"name\": tool_call.function.name, \"arguments\": tool_call.function.arguments}\n",
349-
" }]})\n",
350-
" messages.append({\"role\": \"tool\", \"content\": str(result), \"tool_call_id\": tool_call.id})\n",
313+
" # Execute the tool (LangChain handles calling our wrapped function)\n",
314+
" if tool_call['name'] == 'create_long_term_memory':\n",
315+
" result = await create_long_term_memory.ainvoke(tool_call['args'])\n",
316+
" print(f\" Result: {result}\")\n",
317+
" \n",
318+
" # Add tool result to messages\n",
319+
" messages.append(response)\n",
320+
" messages.append(ToolMessage(\n",
321+
" content=result,\n",
322+
" tool_call_id=tool_call['id']\n",
323+
" ))\n",
351324
" \n",
352325
" # Get final response\n",
353-
" final_response = await openai_client.chat.completions.create(\n",
354-
" model=\"gpt-4o\",\n",
355-
" messages=messages\n",
356-
" )\n",
357-
" print(f\"\\n🤖 Agent: {final_response.choices[0].message.content}\")\n",
326+
" final_response = llm_with_tools.invoke(messages)\n",
327+
" print(f\"\\n🤖 Agent: {final_response.content}\")\n",
358328
"else:\n",
359-
" print(f\"\\n🤖 Agent: {message.content}\")\n",
329+
" print(f\"\\n🤖 Agent: {response.content}\")\n",
360330
" print(\"\\n⚠️ Agent didn't use memory tool\")\n",
361331
"\n",
362332
"print(\"\\n\" + \"=\" * 80)"
@@ -385,54 +355,40 @@
385355
"user_message = \"What courses would you recommend for me?\"\n",
386356
"\n",
387357
"messages = [\n",
388-
" {\"role\": \"system\", \"content\": system_prompt},\n",
389-
" {\"role\": \"user\", \"content\": user_message}\n",
358+
" SystemMessage(content=system_prompt),\n",
359+
" HumanMessage(content=user_message)\n",
390360
"]\n",
391361
"\n",
392362
"print(f\"\\n👤 User: {user_message}\")\n",
393363
"\n",
394-
"# Call LLM with memory tools\n",
395-
"response = await openai_client.chat.completions.create(\n",
396-
" model=\"gpt-4o\",\n",
397-
" messages=messages,\n",
398-
" tools=memory_tool_schemas\n",
399-
")\n",
400-
"\n",
401-
"message = response.choices[0].message\n",
364+
"# First response - should call search_long_term_memory\n",
365+
"response = llm_with_tools.invoke(messages)\n",
402366
"\n",
403-
"if message.tool_calls:\n",
367+
"if response.tool_calls:\n",
404368
" print(\"\\n🤖 Agent decision: Search for preferences first\")\n",
405-
" for tool_call in message.tool_calls:\n",
406-
" print(f\" Tool: {tool_call.function.name}\")\n",
407-
" print(f\" Args: {tool_call.function.arguments}\")\n",
408-
" \n",
409-
" # Resolve the tool call using the memory client\n",
410-
" result = await memory_client.resolve_function_call(\n",
411-
" function_name=tool_call.function.name,\n",
412-
" args=json.loads(tool_call.function.arguments),\n",
413-
" session_id=session_id,\n",
414-
" user_id=student_id\n",
415-
" )\n",
416-
" print(f\"\\n Retrieved memories:\")\n",
417-
" print(f\" {result}\")\n",
369+
" for tool_call in response.tool_calls:\n",
370+
" print(f\" Tool: {tool_call['name']}\")\n",
371+
" print(f\" Args: {tool_call['args']}\")\n",
418372
" \n",
419-
" # Add tool result to messages\n",
420-
" messages.append({\"role\": \"assistant\", \"content\": message.content or \"\", \"tool_calls\": [{\n",
421-
" \"id\": tool_call.id,\n",
422-
" \"type\": \"function\",\n",
423-
" \"function\": {\"name\": tool_call.function.name, \"arguments\": tool_call.function.arguments}\n",
424-
" }]})\n",
425-
" messages.append({\"role\": \"tool\", \"content\": str(result), \"tool_call_id\": tool_call.id})\n",
373+
" # Execute the tool\n",
374+
" if tool_call['name'] == 'search_long_term_memory':\n",
375+
" result = await search_long_term_memory.ainvoke(tool_call['args'])\n",
376+
" print(f\"\\n Retrieved memories:\")\n",
377+
" print(f\" {result}\")\n",
378+
" \n",
379+
" # Add tool result to messages\n",
380+
" messages.append(response)\n",
381+
" messages.append(ToolMessage(\n",
382+
" content=result,\n",
383+
" tool_call_id=tool_call['id']\n",
384+
" ))\n",
426385
" \n",
427386
" # Get final response\n",
428-
" final_response = await openai_client.chat.completions.create(\n",
429-
" model=\"gpt-4o\",\n",
430-
" messages=messages\n",
431-
" )\n",
432-
" print(f\"\\n🤖 Agent: {final_response.choices[0].message.content}\")\n",
387+
" final_response = llm_with_tools.invoke(messages)\n",
388+
" print(f\"\\n🤖 Agent: {final_response.content}\")\n",
433389
" print(\"\\n✅ Agent used memories to personalize recommendation!\")\n",
434390
"else:\n",
435-
" print(f\"\\n🤖 Agent: {message.content}\")\n",
391+
" print(f\"\\n🤖 Agent: {response.content}\")\n",
436392
" print(\"\\n⚠️ Agent didn't search memories\")\n",
437393
"\n",
438394
"print(\"\\n\" + \"=\" * 80)"

0 commit comments

Comments
 (0)