Skip to content

Commit 4f26734

Browse files
committed
Support an optional user ID everywhere
1 parent 067f157 commit 4f26734

File tree

17 files changed

+217
-68
lines changed

17 files changed

+217
-68
lines changed

CLAUDE.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ uv run ruff check # Run linting
1616
uv run ruff format # Format code
1717
uv run pytest # Run tests
1818
uv run pytest tests/ # Run specific test directory
19+
uv run pytest --run-api-tests # Run all tests, including API tests
1920
uv add <dependency> # Add a dependency to pyproject.toml and update lock file
2021
uv remove <dependency> # Remove a dependency from pyproject.toml and update lock file
2122

agent-memory-client/agent_memory_client/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
memory management capabilities for AI agents and applications.
66
"""
77

8-
__version__ = "0.9.0b3"
8+
__version__ = "0.9.0b4"
99

1010
from .client import MemoryAPIClient, MemoryClientConfig, create_memory_client
1111
from .exceptions import (

agent-memory-client/agent_memory_client/client.py

Lines changed: 40 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,11 @@ async def health_check(self) -> HealthCheckResponse:
166166
raise
167167

168168
async def list_sessions(
169-
self, limit: int = 20, offset: int = 0, namespace: str | None = None
169+
self,
170+
limit: int = 20,
171+
offset: int = 0,
172+
namespace: str | None = None,
173+
user_id: str | None = None,
170174
) -> SessionListResponse:
171175
"""
172176
List available sessions with optional pagination and namespace filtering.
@@ -175,6 +179,7 @@ async def list_sessions(
175179
limit: Maximum number of sessions to return (default: 20)
176180
offset: Offset for pagination (default: 0)
177181
namespace: Optional namespace filter
182+
user_id: Optional user ID filter
178183
179184
Returns:
180185
SessionListResponse containing session IDs and total count
@@ -188,6 +193,9 @@ async def list_sessions(
188193
elif self.config.default_namespace is not None:
189194
params["namespace"] = self.config.default_namespace
190195

196+
if user_id is not None:
197+
params["user_id"] = user_id
198+
191199
try:
192200
response = await self._client.get("/v1/working-memory/", params=params)
193201
response.raise_for_status()
@@ -199,6 +207,7 @@ async def list_sessions(
199207
async def get_working_memory(
200208
self,
201209
session_id: str,
210+
user_id: str | None = None,
202211
namespace: str | None = None,
203212
window_size: int | None = None,
204213
model_name: ModelNameLiteral | None = None,
@@ -209,6 +218,7 @@ async def get_working_memory(
209218
210219
Args:
211220
session_id: The session ID to retrieve working memory for
221+
user_id: The user ID to retrieve working memory for
212222
namespace: Optional namespace for the session
213223
window_size: Optional number of messages to include
214224
model_name: Optional model name to determine context window size
@@ -304,14 +314,15 @@ async def put_working_memory(
304314
raise
305315

306316
async def delete_working_memory(
307-
self, session_id: str, namespace: str | None = None
317+
self, session_id: str, namespace: str | None = None, user_id: str | None = None
308318
) -> AckResponse:
309319
"""
310320
Delete working memory for a session.
311321
312322
Args:
313323
session_id: The session ID to delete memory for
314324
namespace: Optional namespace for the session
325+
user_id: Optional user ID for the session
315326
316327
Returns:
317328
AckResponse indicating success
@@ -322,6 +333,9 @@ async def delete_working_memory(
322333
elif self.config.default_namespace is not None:
323334
params["namespace"] = self.config.default_namespace
324335

336+
if user_id is not None:
337+
params["user_id"] = user_id
338+
325339
try:
326340
response = await self._client.delete(
327341
f"/v1/working-memory/{session_id}", params=params
@@ -571,7 +585,7 @@ async def search_long_term_memory(
571585
572586
print(f"Found {results.total} memories")
573587
for memory in results.memories:
574-
print(f"- {memory.text[:100]}... (distance: {memory.distance})")
588+
print(f"- {memory.text[:100]}... (distance: {memory.dist})")
575589
```
576590
"""
577591
# Convert dictionary filters to their proper filter objects if needed
@@ -651,11 +665,12 @@ async def search_memory_tool(
651665
user_id: str | None = None,
652666
) -> dict[str, Any]:
653667
"""
654-
Simplified memory search designed for LLM tool use.
668+
Simplified long-term memory search designed for LLM tool use.
655669
656670
This method provides a streamlined interface for LLMs to search
657671
long-term memory with common parameters and user-friendly output.
658-
Perfect for exposing as a tool to LLM frameworks.
672+
Perfect for exposing as a tool to LLM frameworks. Note: This only
673+
searches long-term memory, not working memory.
659674
660675
Args:
661676
query: The search query text
@@ -664,6 +679,7 @@ async def search_memory_tool(
664679
memory_type: Optional memory type ("episodic", "semantic", "message")
665680
max_results: Maximum results to return (default: 5)
666681
min_relevance: Optional minimum relevance score (0.0-1.0)
682+
user_id: Optional user ID to filter memories by
667683
668684
Returns:
669685
Dict with 'memories' list and 'summary' for LLM consumption
@@ -729,8 +745,8 @@ async def search_memory_tool(
729745
"created_at": memory.created_at.isoformat()
730746
if memory.created_at
731747
else None,
732-
"relevance_score": 1.0 - memory.distance
733-
if hasattr(memory, "distance") and memory.distance is not None
748+
"relevance_score": 1.0 - memory.dist
749+
if hasattr(memory, "dist") and memory.dist is not None
734750
else None,
735751
}
736752
)
@@ -784,7 +800,7 @@ async def handle_tool_calls(client, tool_calls):
784800
"type": "function",
785801
"function": {
786802
"name": "search_memory",
787-
"description": "Search long-term memory for relevant information based on a query. Use this when you need to recall past conversations, user preferences, or previously stored information.",
803+
"description": "Search long-term memory for relevant information based on a query. Use this when you need to recall past conversations, user preferences, or previously stored information. Note: This searches only long-term memory, not current working memory.",
788804
"parameters": {
789805
"type": "object",
790806
"properties": {
@@ -820,6 +836,10 @@ async def handle_tool_calls(client, tool_calls):
820836
"maximum": 1.0,
821837
"description": "Optional minimum relevance score (0.0-1.0, higher = more relevant)",
822838
},
839+
"user_id": {
840+
"type": "string",
841+
"description": "Optional user ID to filter memories by (e.g., 'user123')",
842+
},
823843
},
824844
"required": ["query"],
825845
},
@@ -864,6 +884,7 @@ async def get_working_memory_tool(
864884
result = await self.get_working_memory(
865885
session_id=session_id,
866886
namespace=namespace or self.config.default_namespace,
887+
user_id=user_id,
867888
)
868889

869890
# Format for LLM consumption
@@ -2037,18 +2058,24 @@ async def append_messages_to_working_memory(
20372058
# Convert existing messages to dict format if they're objects
20382059
converted_existing_messages = []
20392060
for msg in existing_messages:
2040-
if hasattr(msg, "model_dump"):
2041-
converted_existing_messages.append(msg.model_dump())
2042-
elif hasattr(msg, "role") and hasattr(msg, "content"):
2061+
if hasattr(msg, "model_dump") and callable(
2062+
getattr(msg, "model_dump", None)
2063+
):
2064+
converted_existing_messages.append(msg.model_dump()) # type: ignore
2065+
elif (
2066+
hasattr(msg, "role")
2067+
and hasattr(msg, "content")
2068+
and not isinstance(msg, dict)
2069+
):
20432070
converted_existing_messages.append(
2044-
{"role": msg.role, "content": msg.content}
2071+
{"role": msg.role, "content": msg.content} # type: ignore
20452072
)
20462073
elif isinstance(msg, dict):
20472074
# Message is already a dictionary, use as-is
20482075
converted_existing_messages.append(msg)
20492076
else:
20502077
# Fallback for any other message type - convert to string content
2051-
converted_existing_messages.append( # type: ignore
2078+
converted_existing_messages.append(
20522079
{"role": "user", "content": str(msg)}
20532080
)
20542081

agent_memory_server/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
"""Redis Agent Memory Server - A memory system for conversational AI."""
22

3-
__version__ = "0.9.0b3"
3+
__version__ = "0.9.0b4"

agent_memory_server/api.py

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ async def list_sessions(
188188
Get a list of session IDs, with optional pagination.
189189
190190
Args:
191-
options: Query parameters (page, size, namespace)
191+
options: Query parameters (limit, offset, namespace, user_id)
192192
193193
Returns:
194194
List of session IDs
@@ -200,6 +200,7 @@ async def list_sessions(
200200
limit=options.limit,
201201
offset=options.offset,
202202
namespace=options.namespace,
203+
user_id=options.user_id,
203204
)
204205

205206
return SessionListResponse(
@@ -211,8 +212,8 @@ async def list_sessions(
211212
@router.get("/v1/working-memory/{session_id}", response_model=WorkingMemoryResponse)
212213
async def get_working_memory(
213214
session_id: str,
215+
user_id: str | None = None,
214216
namespace: str | None = None,
215-
window_size: int = settings.window_size, # Deprecated: kept for backward compatibility
216217
model_name: ModelNameLiteral | None = None,
217218
context_window_max: int | None = None,
218219
current_user: UserInfo = Depends(get_current_user),
@@ -225,8 +226,8 @@ async def get_working_memory(
225226
226227
Args:
227228
session_id: The session ID
229+
user_id: The user ID to retrieve working memory for
228230
namespace: The namespace to use for the session
229-
window_size: DEPRECATED - The number of messages to include (kept for backward compatibility)
230231
model_name: The client's LLM model name (will determine context window size if provided)
231232
context_window_max: Direct specification of the context window max tokens (overrides model_name)
232233
@@ -240,6 +241,7 @@ async def get_working_memory(
240241
session_id=session_id,
241242
namespace=namespace,
242243
redis_client=redis,
244+
user_id=user_id,
243245
)
244246

245247
if not working_mem:
@@ -249,6 +251,7 @@ async def get_working_memory(
249251
memories=[],
250252
session_id=session_id,
251253
namespace=namespace,
254+
user_id=user_id,
252255
)
253256

254257
# Apply token-based truncation if we have messages and model info
@@ -266,17 +269,14 @@ async def get_working_memory(
266269
break
267270
working_mem.messages = truncated_messages
268271

269-
# Fallback to message-count truncation for backward compatibility
270-
elif len(working_mem.messages) > window_size:
271-
working_mem.messages = working_mem.messages[-window_size:]
272-
273272
return working_mem
274273

275274

276275
@router.put("/v1/working-memory/{session_id}", response_model=WorkingMemoryResponse)
277276
async def put_working_memory(
278277
session_id: str,
279278
memory: WorkingMemory,
279+
user_id: str | None = None,
280280
model_name: ModelNameLiteral | None = None,
281281
context_window_max: int | None = None,
282282
background_tasks=Depends(get_background_tasks),
@@ -291,6 +291,7 @@ async def put_working_memory(
291291
Args:
292292
session_id: The session ID
293293
memory: Working memory to save
294+
user_id: Optional user ID for the session (overrides user_id in memory object)
294295
model_name: The client's LLM model name for context window determination
295296
context_window_max: Direct specification of context window max tokens
296297
background_tasks: DocketBackgroundTasks instance (injected automatically)
@@ -303,6 +304,10 @@ async def put_working_memory(
303304
# Ensure session_id matches
304305
memory.session_id = session_id
305306

307+
# Override user_id if provided as query parameter
308+
if user_id is not None:
309+
memory.user_id = user_id
310+
306311
# Validate that all structured memories have id (if any)
307312
for mem in memory.memories:
308313
if not mem.id:
@@ -359,6 +364,7 @@ async def put_working_memory(
359364
@router.delete("/v1/working-memory/{session_id}", response_model=AckResponse)
360365
async def delete_working_memory(
361366
session_id: str,
367+
user_id: str | None = None,
362368
namespace: str | None = None,
363369
current_user: UserInfo = Depends(get_current_user),
364370
):
@@ -369,6 +375,7 @@ async def delete_working_memory(
369375
370376
Args:
371377
session_id: The session ID
378+
user_id: Optional user ID for the session
372379
namespace: Optional namespace for the session
373380
374381
Returns:
@@ -379,6 +386,7 @@ async def delete_working_memory(
379386
# Delete unified working memory
380387
await working_memory.delete_working_memory(
381388
session_id=session_id,
389+
user_id=user_id,
382390
namespace=namespace,
383391
redis_client=redis,
384392
)
@@ -558,6 +566,7 @@ async def memory_prompt(
558566
working_mem = await working_memory.get_working_memory(
559567
session_id=params.session.session_id,
560568
namespace=params.session.namespace,
569+
user_id=params.session.user_id,
561570
redis_client=redis,
562571
)
563572

agent_memory_server/config.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ def load_yaml_settings():
2020
class Settings(BaseSettings):
2121
redis_url: str = "redis://localhost:6379"
2222
long_term_memory: bool = True
23-
window_size: int = 20
2423
openai_api_key: str | None = None
2524
anthropic_api_key: str | None = None
2625
generation_model: str = "gpt-4o-mini"
@@ -66,6 +65,9 @@ class Settings(BaseSettings):
6665
auth0_client_id: str | None = None
6766
auth0_client_secret: str | None = None
6867

68+
# Working memory settings
69+
window_size: int = 20 # Default number of recent messages to return
70+
6971
# Other Application settings
7072
log_level: Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] = "INFO"
7173

0 commit comments

Comments
 (0)