Skip to content

Commit 4f1cd40

Browse files
committed
Grab bag of updates, fixes
1 parent 3126c08 commit 4f1cd40

25 files changed

+3011
-470
lines changed

agent-memory-client/agent_memory_client/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
memory management capabilities for AI agents and applications.
66
"""
77

8-
__version__ = "0.9.0b4"
8+
__version__ = "0.9.0b5"
99

1010
from .client import MemoryAPIClient, MemoryClientConfig, create_memory_client
1111
from .exceptions import (

agent-memory-client/agent_memory_client/client.py

Lines changed: 49 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
import asyncio
88
import re
9-
from collections.abc import AsyncIterator
9+
from collections.abc import AsyncIterator, Sequence
1010
from typing import TYPE_CHECKING, Any, Literal, TypedDict
1111

1212
if TYPE_CHECKING:
@@ -416,7 +416,7 @@ async def set_working_memory_data(
416416
async def add_memories_to_working_memory(
417417
self,
418418
session_id: str,
419-
memories: list[ClientMemoryRecord | MemoryRecord],
419+
memories: Sequence[ClientMemoryRecord | MemoryRecord],
420420
namespace: str | None = None,
421421
replace: bool = False,
422422
) -> WorkingMemoryResponse:
@@ -482,7 +482,7 @@ async def add_memories_to_working_memory(
482482
return await self.put_working_memory(session_id, working_memory)
483483

484484
async def create_long_term_memory(
485-
self, memories: list[ClientMemoryRecord | MemoryRecord]
485+
self, memories: Sequence[ClientMemoryRecord | MemoryRecord]
486486
) -> AckResponse:
487487
"""
488488
Create long-term memories for later retrieval.
@@ -541,6 +541,29 @@ async def create_long_term_memory(
541541
self._handle_http_error(e.response)
542542
raise
543543

544+
async def delete_long_term_memories(self, memory_ids: Sequence[str]) -> AckResponse:
545+
"""
546+
Delete long-term memories.
547+
548+
Args:
549+
memory_ids: List of memory IDs to delete
550+
551+
Returns:
552+
AckResponse indicating success
553+
"""
554+
params = {"memory_ids": list(memory_ids)}
555+
556+
try:
557+
response = await self._client.delete(
558+
"/v1/long-term-memory",
559+
params=params,
560+
)
561+
response.raise_for_status()
562+
return AckResponse(**response.json())
563+
except httpx.HTTPStatusError as e:
564+
self._handle_http_error(e.response)
565+
raise
566+
544567
async def search_long_term_memory(
545568
self,
546569
text: str,
@@ -666,8 +689,8 @@ async def search_long_term_memory(
666689
async def search_memory_tool(
667690
self,
668691
query: str,
669-
topics: list[str] | None = None,
670-
entities: list[str] | None = None,
692+
topics: Sequence[str] | None = None,
693+
entities: Sequence[str] | None = None,
671694
memory_type: str | None = None,
672695
max_results: int = 5,
673696
min_relevance: float | None = None,
@@ -940,8 +963,8 @@ async def add_memory_tool(
940963
session_id: str,
941964
text: str,
942965
memory_type: str,
943-
topics: list[str] | None = None,
944-
entities: list[str] | None = None,
966+
topics: Sequence[str] | None = None,
967+
entities: Sequence[str] | None = None,
945968
namespace: str | None = None,
946969
user_id: str | None = None,
947970
) -> dict[str, Any]:
@@ -1172,7 +1195,7 @@ def get_update_memory_data_tool_schema(cls) -> dict[str, Any]:
11721195
}
11731196

11741197
@classmethod
1175-
def get_all_memory_tool_schemas(cls) -> list[dict[str, Any]]:
1198+
def get_all_memory_tool_schemas(cls) -> Sequence[dict[str, Any]]:
11761199
"""
11771200
Get all memory-related tool schemas for easy LLM integration.
11781201
@@ -1200,7 +1223,7 @@ def get_all_memory_tool_schemas(cls) -> list[dict[str, Any]]:
12001223
]
12011224

12021225
@classmethod
1203-
def get_all_memory_tool_schemas_anthropic(cls) -> list[dict[str, Any]]:
1226+
def get_all_memory_tool_schemas_anthropic(cls) -> Sequence[dict[str, Any]]:
12041227
"""
12051228
Get all memory-related tool schemas in Anthropic format.
12061229
@@ -1470,11 +1493,11 @@ async def resolve_tool_call(
14701493

14711494
async def resolve_tool_calls(
14721495
self,
1473-
tool_calls: list[dict[str, Any]],
1496+
tool_calls: Sequence[dict[str, Any]],
14741497
session_id: str,
14751498
namespace: str | None = None,
14761499
user_id: str | None = None,
1477-
) -> list[ToolCallResolutionResult]:
1500+
) -> Sequence[ToolCallResolutionResult]:
14781501
"""
14791502
Resolve multiple tool calls from any LLM provider format.
14801503
@@ -1713,11 +1736,11 @@ async def _resolve_update_memory_data(
17131736

17141737
async def resolve_function_calls(
17151738
self,
1716-
function_calls: list[dict[str, Any]],
1739+
function_calls: Sequence[dict[str, Any]],
17171740
session_id: str,
17181741
namespace: str | None = None,
17191742
user_id: str | None = None,
1720-
) -> list[ToolCallResolutionResult]:
1743+
) -> Sequence[ToolCallResolutionResult]:
17211744
"""
17221745
Resolve multiple function calls in batch.
17231746
@@ -1765,7 +1788,7 @@ async def resolve_function_calls(
17651788
async def promote_working_memories_to_long_term(
17661789
self,
17671790
session_id: str,
1768-
memory_ids: list[str] | None = None,
1791+
memory_ids: Sequence[str] | None = None,
17691792
namespace: str | None = None,
17701793
) -> AckResponse:
17711794
"""
@@ -1805,10 +1828,10 @@ async def promote_working_memories_to_long_term(
18051828

18061829
async def bulk_create_long_term_memories(
18071830
self,
1808-
memory_batches: list[list[ClientMemoryRecord | MemoryRecord]],
1831+
memory_batches: Sequence[Sequence[ClientMemoryRecord | MemoryRecord]],
18091832
batch_size: int = 100,
18101833
delay_between_batches: float = 0.1,
1811-
) -> list[AckResponse]:
1834+
) -> Sequence[AckResponse]:
18121835
"""
18131836
Create multiple batches of memories with proper rate limiting.
18141837
@@ -2104,6 +2127,8 @@ async def memory_prompt(
21042127
"""
21052128
Hydrate a user query with memory context and return a prompt ready to send to an LLM.
21062129
2130+
NOTE: `long_term_search` uses the same filter options as `search_long_term_memories`.
2131+
21072132
Args:
21082133
query: The input text to find relevant context for
21092134
session_id: Optional session ID to include session messages
@@ -2163,9 +2188,17 @@ async def memory_prompt(
21632188

21642189
# Add long-term search parameters if provided
21652190
if long_term_search is not None:
2191+
if "namespace" not in long_term_search:
2192+
if namespace is not None:
2193+
long_term_search["namespace"] = {"eq": namespace}
2194+
elif self.config.default_namespace is not None:
2195+
long_term_search["namespace"] = {
2196+
"eq": self.config.default_namespace
2197+
}
21662198
payload["long_term_search"] = long_term_search
21672199

21682200
try:
2201+
print("Payload: ", payload)
21692202
response = await self._client.post(
21702203
"/v1/memory/prompt",
21712204
json=payload,

agent_memory_server/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
"""Redis Agent Memory Server - A memory system for conversational AI."""
22

3-
__version__ = "0.9.0b4"
3+
__version__ = "0.9.0b5"

agent_memory_server/api.py

Lines changed: 38 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import tiktoken
2-
from fastapi import APIRouter, Depends, HTTPException
2+
from fastapi import APIRouter, Depends, HTTPException, Query
33
from mcp.server.fastmcp.prompts import base
44
from mcp.types import TextContent
55
from ulid import ULID
@@ -338,7 +338,7 @@ async def put_working_memory(
338338
updated_memory.namespace,
339339
)
340340

341-
# Index message-based memories (existing logic)
341+
# Index message-based memories
342342
if updated_memory.messages:
343343
from agent_memory_server.models import MemoryRecord
344344

@@ -348,6 +348,7 @@ async def put_working_memory(
348348
session_id=session_id,
349349
text=f"{msg.role}: {msg.content}",
350350
namespace=updated_memory.namespace,
351+
user_id=updated_memory.user_id,
351352
memory_type=MemoryTypeEnum.MESSAGE,
352353
)
353354
for msg in updated_memory.messages
@@ -452,20 +453,41 @@ async def search_long_term_memory(
452453
# Extract filter objects from the payload
453454
filters = payload.get_filters()
454455

456+
print("Long-term search filters: ", filters)
457+
455458
kwargs = {
456459
"distance_threshold": payload.distance_threshold,
457460
"limit": payload.limit,
458461
"offset": payload.offset,
459462
**filters,
460463
}
461464

462-
if payload.text:
463-
kwargs["text"] = payload.text
465+
print("Kwargs: ", kwargs)
466+
467+
kwargs["text"] = payload.text or ""
464468

465469
# Pass text and filter objects to the search function (no redis needed for vectorstore adapter)
466470
return await long_term_memory.search_long_term_memories(**kwargs)
467471

468472

473+
@router.delete("/v1/long-term-memory", response_model=AckResponse)
474+
async def delete_long_term_memory(
475+
memory_ids: list[str] = Query(default=[], alias="memory_ids"),
476+
current_user: UserInfo = Depends(get_current_user),
477+
):
478+
"""
479+
Delete long-term memories by ID
480+
481+
Args:
482+
memory_ids: List of memory IDs to delete (passed as query parameters)
483+
"""
484+
if not settings.long_term_memory:
485+
raise HTTPException(status_code=400, detail="Long-term memory is disabled")
486+
487+
count = await long_term_memory.delete_long_term_memories(ids=memory_ids)
488+
return AckResponse(status=f"ok, deleted {count} memories")
489+
490+
469491
@router.post("/v1/memory/search", response_model=MemoryRecordResultsResponse)
470492
async def search_memory(
471493
payload: SearchRequest,
@@ -546,6 +568,8 @@ async def memory_prompt(
546568
redis = await get_redis_conn()
547569
_messages = []
548570

571+
print("Received params: ", params)
572+
549573
if params.session:
550574
# Use token limit for memory prompt, fallback to message count for backward compatibility
551575
if params.session.model_name or params.session.context_window_max:
@@ -616,8 +640,17 @@ async def memory_prompt(
616640

617641
if params.long_term_search:
618642
# TODO: Exclude session messages if we already included them from session memory
643+
644+
# If no text is provided in long_term_search, use the user's query
645+
if not params.long_term_search.text:
646+
# Create a new SearchRequest with the query as text
647+
search_payload = params.long_term_search.model_copy()
648+
search_payload.text = params.query
649+
else:
650+
search_payload = params.long_term_search
651+
619652
long_term_memories = await search_long_term_memory(
620-
params.long_term_search,
653+
search_payload,
621654
)
622655

623656
if long_term_memories.total > 0:

agent_memory_server/config.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,8 @@ class Settings(BaseSettings):
7878

7979
# Topic modeling
8080
topic_model_source: Literal["BERTopic", "LLM"] = "LLM"
81+
# If using BERTopic, use a supported model, such as
82+
# "MaartenGr/BERTopic_Wikipedia"
8183
topic_model: str = "gpt-4o-mini"
8284
enable_topic_extraction: bool = True
8385
top_k_topics: int = 3

agent_memory_server/docket_tasks.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
from agent_memory_server.extraction import extract_discrete_memories
1111
from agent_memory_server.long_term_memory import (
1212
compact_long_term_memories,
13+
delete_long_term_memories,
1314
extract_memory_structure,
1415
index_long_term_memories,
1516
promote_working_memory_to_long_term,
@@ -28,6 +29,7 @@
2829
compact_long_term_memories,
2930
extract_discrete_memories,
3031
promote_working_memory_to_long_term,
32+
delete_long_term_memories,
3133
]
3234

3335

0 commit comments

Comments
 (0)