Skip to content

Commit ff2c4ac

Browse files
committed
Fix all save_working_memory calls to use put_working_memory
- Created script to automatically convert save_working_memory to put_working_memory - Converts messages to MemoryMessage objects - Creates WorkingMemory objects with proper structure - Fixed remaining query= to text= in search calls - Updated 4 notebooks with save_working_memory calls All notebooks now use the correct MemoryAPIClient API.
1 parent 39290e6 commit ff2c4ac

File tree

5 files changed

+292
-19
lines changed

5 files changed

+292
-19
lines changed

python-recipes/context-engineering/notebooks/section-3-memory/01_working_memory_with_extraction_strategies.ipynb

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@
100100
"\n",
101101
"print(\"✅ Memory components imported successfully\")\n",
102102
"print(\"\\nNote: This notebook demonstrates working memory concepts.\")\n",
103-
"print(\"The MemoryClient provides working memory via save_working_memory() and get_working_memory()\")"
103+
"print(\"The MemoryClient provides working memory via put_working_memory() and get_or_create_working_memory()\")"
104104
]
105105
},
106106
{
@@ -209,9 +209,25 @@
209209
"]\n",
210210
"\n",
211211
"# Save to working memory\n",
212-
"await memory_client.save_working_memory(\n",
212+
"from agent_memory_client import WorkingMemory, MemoryMessage\n",
213+
"\n",
214+
"# Convert messages to MemoryMessage format\n",
215+
"memory_messages = [MemoryMessage(**msg) for msg in messages]\n",
216+
"\n",
217+
"# Create WorkingMemory object\n",
218+
"working_memory = WorkingMemory(\n",
219+
" session_id=session_id,\n",
220+
" user_id=\"demo_user\",\n",
221+
" messages=memory_messages,\n",
222+
" memories=[],\n",
223+
" data={}\n",
224+
")\n",
225+
"\n",
226+
"await memory_client.put_working_memory(\n",
213227
" session_id=session_id,\n",
214-
" messages=messages\n",
228+
" memory=working_memory,\n",
229+
" user_id=\"demo_user\",\n",
230+
" model_name=\"gpt-4o\"\n",
215231
")\n",
216232
"\n",
217233
"print(\"✅ Conversation saved to working memory\")\n",

python-recipes/context-engineering/notebooks/section-3-memory/03_memory_integration.ipynb

Lines changed: 54 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -188,12 +188,25 @@
188188
"\n",
189189
"# Step 4: Save working memory\n",
190190
"print(\"\\n4. Saving working memory...\")\n",
191-
"await memory_client.save_working_memory(\n",
191+
"from agent_memory_client import WorkingMemory, MemoryMessage\n",
192+
"\n",
193+
"# Convert messages to MemoryMessage format\n",
194+
"memory_messages = [MemoryMessage(**msg) for msg in []\n",
195+
"\n",
196+
"# Create WorkingMemory object\n",
197+
"working_memory = WorkingMemory(\n",
192198
" session_id=session_id_1,\n",
193-
" messages=[\n",
194-
" {\"role\": \"user\", \"content\": user_query},\n",
195-
" {\"role\": \"assistant\", \"content\": response.content}\n",
196-
" ]\n",
199+
" user_id=\"demo_user\",\n",
200+
" messages=memory_messages,\n",
201+
" memories=[],\n",
202+
" data={}\n",
203+
")\n",
204+
"\n",
205+
"await memory_client.put_working_memory(\n",
206+
" session_id=session_id_1,\n",
207+
" memory=working_memory,\n",
208+
" user_id=\"demo_user\",\n",
209+
" model_name=\"gpt-4o\"\n",
197210
")\n",
198211
"print(\" ✅ Working memory saved\")\n",
199212
"print(\" ✅ Agent Memory Server will automatically extract important facts to long-term memory\")"
@@ -267,9 +280,25 @@
267280
" {\"role\": \"assistant\", \"content\": response.content}\n",
268281
"])\n",
269282
"\n",
270-
"await memory_client.save_working_memory(\n",
283+
"from agent_memory_client import WorkingMemory, MemoryMessage\n",
284+
"\n",
285+
"# Convert messages to MemoryMessage format\n",
286+
"memory_messages = [MemoryMessage(**msg) for msg in all_messages]\n",
287+
"\n",
288+
"# Create WorkingMemory object\n",
289+
"working_memory = WorkingMemory(\n",
290+
" session_id=session_id_1,\n",
291+
" user_id=\"demo_user\",\n",
292+
" messages=memory_messages,\n",
293+
" memories=[],\n",
294+
" data={}\n",
295+
")\n",
296+
"\n",
297+
"await memory_client.put_working_memory(\n",
271298
" session_id=session_id_1,\n",
272-
" messages=all_messages\n",
299+
" memory=working_memory,\n",
300+
" user_id=\"demo_user\",\n",
301+
" model_name=\"gpt-4o\"\n",
273302
")\n",
274303
"print(\" ✅ Working memory saved with both turns\")\n",
275304
"print(\" ✅ Preferences will be extracted to long-term memory\")"
@@ -373,12 +402,25 @@
373402
"\n",
374403
"# Step 4: Save working memory\n",
375404
"print(\"\\n4. Saving working memory...\")\n",
376-
"await memory_client.save_working_memory(\n",
405+
"from agent_memory_client import WorkingMemory, MemoryMessage\n",
406+
"\n",
407+
"# Convert messages to MemoryMessage format\n",
408+
"memory_messages = [MemoryMessage(**msg) for msg in []\n",
409+
"\n",
410+
"# Create WorkingMemory object\n",
411+
"working_memory = WorkingMemory(\n",
377412
" session_id=session_id_2,\n",
378-
" messages=[\n",
379-
" {\"role\": \"user\", \"content\": user_query_3},\n",
380-
" {\"role\": \"assistant\", \"content\": response.content}\n",
381-
" ]\n",
413+
" user_id=\"demo_user\",\n",
414+
" messages=memory_messages,\n",
415+
" memories=[],\n",
416+
" data={}\n",
417+
")\n",
418+
"\n",
419+
"await memory_client.put_working_memory(\n",
420+
" session_id=session_id_2,\n",
421+
" memory=working_memory,\n",
422+
" user_id=\"demo_user\",\n",
423+
" model_name=\"gpt-4o\"\n",
382424
")\n",
383425
"print(\" ✅ Working memory saved for new session\")"
384426
]

python-recipes/context-engineering/notebooks/section-4-optimizations/01_context_window_management.ipynb

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -331,9 +331,25 @@
331331
" {\"role\": \"assistant\", \"content\": response.content}\n",
332332
" ])\n",
333333
" \n",
334-
" await memory_client.save_working_memory(\n",
334+
" from agent_memory_client import WorkingMemory, MemoryMessage\n",
335+
" \n",
336+
" # Convert messages to MemoryMessage format\n",
337+
" memory_messages = [MemoryMessage(**msg) for msg in all_messages]\n",
338+
" \n",
339+
" # Create WorkingMemory object\n",
340+
" working_memory = WorkingMemory(\n",
341+
" session_id=session_id,\n",
342+
" user_id=\"demo_user\",\n",
343+
" messages=memory_messages,\n",
344+
" memories=[],\n",
345+
" data={}\n",
346+
" )\n",
347+
" \n",
348+
" await memory_client.put_working_memory(\n",
335349
" session_id=session_id,\n",
336-
" messages=all_messages\n",
350+
" memory=working_memory,\n",
351+
" user_id=\"demo_user\",\n",
352+
" model_name=\"gpt-4o\"\n",
337353
" )\n",
338354
" \n",
339355
" return response.content, len(all_messages)\n",

python-recipes/context-engineering/notebooks/section-4-optimizations/03_grounding_with_memory.ipynb

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -183,9 +183,25 @@
183183
" {\"role\": \"user\" if isinstance(m, HumanMessage) else \"assistant\", \"content\": m.content}\n",
184184
" for m in conversation_history\n",
185185
" ]\n",
186-
" await memory_client.save_working_memory(\n",
186+
" from agent_memory_client import WorkingMemory, MemoryMessage\n",
187+
" \n",
188+
" # Convert messages to MemoryMessage format\n",
189+
" memory_messages = [MemoryMessage(**msg) for msg in messages_to_save]\n",
190+
" \n",
191+
" # Create WorkingMemory object\n",
192+
" working_memory = WorkingMemory(\n",
193+
" session_id=session_id,\n",
194+
" user_id=\"demo_user\",\n",
195+
" messages=memory_messages,\n",
196+
" memories=[],\n",
197+
" data={}\n",
198+
" )\n",
199+
" \n",
200+
" await memory_client.put_working_memory(\n",
187201
" session_id=session_id,\n",
188-
" messages=messages_to_save\n",
202+
" memory=working_memory,\n",
203+
" user_id=\"demo_user\",\n",
204+
" model_name=\"gpt-4o\"\n",
189205
" )\n",
190206
" \n",
191207
" return response.content, conversation_history\n",
Lines changed: 183 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,183 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Fix save_working_memory calls in notebooks to use put_working_memory.
4+
"""
5+
6+
import json
7+
import sys
8+
from pathlib import Path
9+
10+
11+
def fix_save_working_memory_call(cell_source):
12+
"""
13+
Replace save_working_memory calls with put_working_memory.
14+
15+
Converts:
16+
await memory_client.save_working_memory(
17+
session_id=session_id,
18+
messages=messages
19+
)
20+
21+
To:
22+
from agent_memory_client import WorkingMemory, MemoryMessage
23+
24+
memory_messages = [MemoryMessage(**msg) for msg in messages]
25+
working_memory = WorkingMemory(
26+
session_id=session_id,
27+
user_id=user_id,
28+
messages=memory_messages,
29+
memories=[],
30+
data={}
31+
)
32+
33+
await memory_client.put_working_memory(
34+
session_id=session_id,
35+
memory=working_memory,
36+
user_id=user_id,
37+
model_name="gpt-4o"
38+
)
39+
"""
40+
source_text = ''.join(cell_source)
41+
42+
# Skip if this is just documentation
43+
if 'save_working_memory()' in source_text and ('print(' in source_text or 'MemoryClient provides' in source_text):
44+
# Just update the documentation text
45+
new_source = []
46+
for line in cell_source:
47+
line = line.replace('save_working_memory()', 'put_working_memory()')
48+
line = line.replace('get_working_memory()', 'get_or_create_working_memory()')
49+
new_source.append(line)
50+
return new_source
51+
52+
# Check if this cell has an actual save_working_memory call
53+
if 'await memory_client.save_working_memory(' not in source_text:
54+
return cell_source
55+
56+
new_source = []
57+
in_save_call = False
58+
save_indent = ''
59+
session_id_var = 'session_id'
60+
messages_var = 'messages'
61+
user_id_var = 'user_id'
62+
63+
# First pass: find the variables used
64+
for line in cell_source:
65+
if 'await memory_client.save_working_memory(' in line:
66+
save_indent = line[:len(line) - len(line.lstrip())]
67+
in_save_call = True
68+
elif in_save_call:
69+
if 'session_id=' in line:
70+
session_id_var = line.split('session_id=')[1].split(',')[0].split(')')[0].strip()
71+
elif 'messages=' in line:
72+
messages_var = line.split('messages=')[1].split(',')[0].split(')')[0].strip()
73+
if ')' in line:
74+
in_save_call = False
75+
76+
# Check if user_id is defined in the cell
77+
if 'user_id' not in source_text:
78+
# Try to find student_id or demo_student
79+
if 'student_id' in source_text:
80+
user_id_var = 'student_id'
81+
elif 'demo_student' in source_text:
82+
user_id_var = '"demo_student_working_memory"'
83+
else:
84+
user_id_var = '"demo_user"'
85+
86+
# Second pass: replace the call
87+
in_save_call = False
88+
skip_lines = 0
89+
90+
for i, line in enumerate(cell_source):
91+
if skip_lines > 0:
92+
skip_lines -= 1
93+
continue
94+
95+
if 'await memory_client.save_working_memory(' in line:
96+
# Add imports if not already present
97+
if 'from agent_memory_client import WorkingMemory' not in source_text:
98+
new_source.append(f'{save_indent}from agent_memory_client import WorkingMemory, MemoryMessage\n')
99+
new_source.append(f'{save_indent}\n')
100+
101+
# Add conversion code
102+
new_source.append(f'{save_indent}# Convert messages to MemoryMessage format\n')
103+
new_source.append(f'{save_indent}memory_messages = [MemoryMessage(**msg) for msg in {messages_var}]\n')
104+
new_source.append(f'{save_indent}\n')
105+
new_source.append(f'{save_indent}# Create WorkingMemory object\n')
106+
new_source.append(f'{save_indent}working_memory = WorkingMemory(\n')
107+
new_source.append(f'{save_indent} session_id={session_id_var},\n')
108+
new_source.append(f'{save_indent} user_id={user_id_var},\n')
109+
new_source.append(f'{save_indent} messages=memory_messages,\n')
110+
new_source.append(f'{save_indent} memories=[],\n')
111+
new_source.append(f'{save_indent} data={{}}\n')
112+
new_source.append(f'{save_indent})\n')
113+
new_source.append(f'{save_indent}\n')
114+
new_source.append(f'{save_indent}await memory_client.put_working_memory(\n')
115+
new_source.append(f'{save_indent} session_id={session_id_var},\n')
116+
new_source.append(f'{save_indent} memory=working_memory,\n')
117+
new_source.append(f'{save_indent} user_id={user_id_var},\n')
118+
new_source.append(f'{save_indent} model_name="gpt-4o"\n')
119+
new_source.append(f'{save_indent})\n')
120+
121+
# Skip the rest of the save_working_memory call
122+
in_save_call = True
123+
elif in_save_call:
124+
if ')' in line:
125+
in_save_call = False
126+
# Skip this line (part of old call)
127+
else:
128+
new_source.append(line)
129+
130+
return new_source
131+
132+
133+
def fix_notebook(notebook_path: Path) -> bool:
134+
"""Fix a single notebook."""
135+
print(f"Processing: {notebook_path}")
136+
137+
with open(notebook_path, 'r') as f:
138+
nb = json.load(f)
139+
140+
modified = False
141+
142+
for cell in nb['cells']:
143+
if cell['cell_type'] == 'code':
144+
original_source = cell['source'][:]
145+
cell['source'] = fix_save_working_memory_call(cell['source'])
146+
147+
if cell['source'] != original_source:
148+
modified = True
149+
150+
if modified:
151+
with open(notebook_path, 'w') as f:
152+
json.dump(nb, f, indent=2, ensure_ascii=False)
153+
f.write('\n')
154+
print(f" ✅ Updated {notebook_path.name}")
155+
return True
156+
else:
157+
print(f" ⏭️ No changes needed for {notebook_path.name}")
158+
return False
159+
160+
161+
def main():
162+
notebooks_dir = Path(__file__).parent.parent / 'notebooks'
163+
164+
# Find all notebooks with save_working_memory
165+
patterns = [
166+
'section-3-memory/*.ipynb',
167+
'section-4-optimizations/*.ipynb'
168+
]
169+
170+
total_updated = 0
171+
172+
for pattern in patterns:
173+
for notebook_path in notebooks_dir.glob(pattern):
174+
if fix_notebook(notebook_path):
175+
total_updated += 1
176+
177+
print(f"\n✅ Updated {total_updated} notebooks")
178+
return 0
179+
180+
181+
if __name__ == '__main__':
182+
sys.exit(main())
183+

0 commit comments

Comments
 (0)