Skip to content

Commit 58ee06b

Browse files
abrookinsclaude
andcommitted
refactor: resolve PR review comments on recency and MCP changes
- Move recency functions to utils.recency to eliminate circular imports - Update MCP methods to return Pydantic objects instead of JSON strings - Move imports to top of files now that circular dependencies are resolved 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent a1a5a4d commit 58ee06b

File tree

5 files changed

+113
-129
lines changed

5 files changed

+113
-129
lines changed

agent_memory_server/long_term_memory.py

Lines changed: 5 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
1-
import hashlib
21
import json
32
import logging
43
import numbers
54
import time
65
from collections.abc import Iterable
76
from datetime import UTC, datetime, timedelta
8-
from math import exp, log
97
from typing import Any
108

119
from docket.dependencies import Perpetual
@@ -41,6 +39,11 @@
4139
MemoryTypeEnum,
4240
)
4341
from agent_memory_server.utils.keys import Keys
42+
from agent_memory_server.utils.recency import (
43+
_days_between,
44+
generate_memory_hash,
45+
rerank_with_recency,
46+
)
4447
from agent_memory_server.utils.redis import (
4548
ensure_search_index_exists,
4649
get_redis_conn,
@@ -122,29 +125,6 @@ async def extract_memory_structure(memory: MemoryRecord):
122125
) # type: ignore
123126

124127

125-
def generate_memory_hash(memory: MemoryRecord) -> str:
126-
"""
127-
Generate a stable hash for a memory based on text, user_id, and session_id.
128-
129-
Args:
130-
memory: MemoryRecord object containing memory data
131-
132-
Returns:
133-
A stable hash string
134-
"""
135-
# Create a deterministic string representation of the key content fields only
136-
# This ensures merged memories with same content have the same hash
137-
content_fields = {
138-
"text": memory.text,
139-
"user_id": memory.user_id,
140-
"session_id": memory.session_id,
141-
"namespace": memory.namespace,
142-
"memory_type": memory.memory_type,
143-
}
144-
content_json = json.dumps(content_fields, sort_keys=True)
145-
return hashlib.sha256(content_json.encode()).hexdigest()
146-
147-
148128
async def merge_memories_with_llm(
149129
memories: list[MemoryRecord], llm_client: Any = None
150130
) -> MemoryRecord:
@@ -1363,74 +1343,6 @@ async def delete_long_term_memories(
13631343
return await adapter.delete_memories(ids)
13641344

13651345

1366-
# Seconds per day constant for time calculations
1367-
SECONDS_PER_DAY = 86400.0
1368-
1369-
1370-
def _days_between(now: datetime, then: datetime | None) -> float:
1371-
if then is None:
1372-
return float("inf")
1373-
delta = now - then
1374-
return max(delta.total_seconds() / SECONDS_PER_DAY, 0.0)
1375-
1376-
1377-
def score_recency(
1378-
memory: MemoryRecordResult,
1379-
*,
1380-
now: datetime,
1381-
params: dict,
1382-
) -> float:
1383-
"""Compute a recency score in [0, 1] combining freshness and novelty.
1384-
1385-
- freshness decays with last_accessed using half-life `half_life_last_access_days`
1386-
- novelty decays with created_at using half-life `half_life_created_days`
1387-
- recency = freshness_weight * freshness + novelty_weight * novelty
1388-
"""
1389-
half_life_last_access = max(
1390-
float(params.get("half_life_last_access_days", 7.0)), 0.001
1391-
)
1392-
half_life_created = max(float(params.get("half_life_created_days", 30.0)), 0.001)
1393-
1394-
freshness_weight = float(params.get("freshness_weight", 0.6))
1395-
novelty_weight = float(params.get("novelty_weight", 0.4))
1396-
1397-
# Convert to decay rates
1398-
access_decay_rate = log(2.0) / half_life_last_access
1399-
creation_decay_rate = log(2.0) / half_life_created
1400-
1401-
days_since_access = _days_between(now, memory.last_accessed)
1402-
days_since_created = _days_between(now, memory.created_at)
1403-
1404-
freshness = exp(-access_decay_rate * days_since_access)
1405-
novelty = exp(-creation_decay_rate * days_since_created)
1406-
1407-
recency_score = freshness_weight * freshness + novelty_weight * novelty
1408-
# Clamp to [0, 1]
1409-
return max(0.0, min(1.0, recency_score))
1410-
1411-
1412-
def rerank_with_recency(
1413-
results: list[MemoryRecordResult],
1414-
*,
1415-
now: datetime,
1416-
params: dict,
1417-
) -> list[MemoryRecordResult]:
1418-
"""Re-rank results using combined semantic similarity and recency.
1419-
1420-
score = semantic_weight * (1 - dist) + recency_weight * recency_score
1421-
"""
1422-
semantic_weight = float(params.get("semantic_weight", 0.8))
1423-
recency_weight = float(params.get("recency_weight", 0.2))
1424-
1425-
def combined_score(mem: MemoryRecordResult) -> float:
1426-
similarity = 1.0 - float(mem.dist)
1427-
recency = score_recency(mem, now=now, params=params)
1428-
return semantic_weight * similarity + recency_weight * recency
1429-
1430-
# Sort by descending score (stable sort preserves original order on ties)
1431-
return sorted(results, key=combined_score, reverse=True)
1432-
1433-
14341346
def _is_numeric(value: Any) -> bool:
14351347
"""Check if a value is numeric (int, float, or other number type)."""
14361348
return isinstance(value, numbers.Number)

agent_memory_server/mcp.py

Lines changed: 6 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33

44
import ulid
55
from mcp.server.fastmcp import FastMCP as _FastMCPBase
6-
from mcp.types import TextContent
76

87
from agent_memory_server.api import (
98
create_long_term_memory as core_create_long_term_memory,
@@ -451,28 +450,14 @@ async def search_long_term_memory(
451450
offset=offset,
452451
)
453452
results = await core_search_long_term_memory(payload)
454-
import json as _json
455-
456-
return TextContent(
457-
type="text",
458-
text=_json.dumps(
459-
MemoryRecordResults(
460-
total=results.total,
461-
memories=results.memories,
462-
next_offset=results.next_offset,
463-
).model_dump(mode="json")
464-
),
453+
return MemoryRecordResults(
454+
total=results.total,
455+
memories=results.memories,
456+
next_offset=results.next_offset,
465457
)
466458
except Exception as e:
467459
logger.error(f"Error in search_long_term_memory tool: {e}")
468-
import json as _json
469-
470-
return TextContent(
471-
type="text",
472-
text=_json.dumps(
473-
MemoryRecordResults(total=0, memories=[], next_offset=None).model_dump()
474-
),
475-
)
460+
return MemoryRecordResults(total=0, memories=[], next_offset=None)
476461

477462

478463
# Notes that exist outside of the docstring to avoid polluting the LLM prompt:
@@ -621,12 +606,7 @@ async def memory_prompt(
621606
if search_payload is not None:
622607
_params["long_term_search"] = search_payload
623608

624-
import json as _json
625-
626-
result = await core_memory_prompt(
627-
params=MemoryPromptRequest(query=query, **_params)
628-
)
629-
return TextContent(type="text", text=_json.dumps(result.model_dump()))
609+
return await core_memory_prompt(params=MemoryPromptRequest(query=query, **_params))
630610

631611

632612
@mcp_app.tool()

agent_memory_server/utils/recency.py

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
"""Recency-related utilities for memory scoring and hashing."""
2+
3+
import hashlib
4+
import json
5+
from datetime import datetime
6+
from math import exp, log
7+
8+
from agent_memory_server.models import MemoryRecord, MemoryRecordResult
9+
10+
11+
# Seconds per day constant for time calculations
12+
SECONDS_PER_DAY = 86400.0
13+
14+
15+
def generate_memory_hash(memory: MemoryRecord) -> str:
16+
"""
17+
Generate a stable hash for a memory based on text, user_id, and session_id.
18+
19+
Args:
20+
memory: MemoryRecord object containing memory data
21+
22+
Returns:
23+
A stable hash string
24+
"""
25+
# Create a deterministic string representation of the key content fields only
26+
# This ensures merged memories with same content have the same hash
27+
content_fields = {
28+
"text": memory.text,
29+
"user_id": memory.user_id,
30+
"session_id": memory.session_id,
31+
"namespace": memory.namespace,
32+
"memory_type": memory.memory_type,
33+
}
34+
content_json = json.dumps(content_fields, sort_keys=True)
35+
return hashlib.sha256(content_json.encode()).hexdigest()
36+
37+
38+
def _days_between(now: datetime, then: datetime | None) -> float:
39+
if then is None:
40+
return float("inf")
41+
delta = now - then
42+
return max(delta.total_seconds() / SECONDS_PER_DAY, 0.0)
43+
44+
45+
def score_recency(
46+
memory: MemoryRecordResult,
47+
*,
48+
now: datetime,
49+
params: dict,
50+
) -> float:
51+
"""Compute a recency score in [0, 1] combining freshness and novelty.
52+
53+
- freshness decays with last_accessed using half-life `half_life_last_access_days`
54+
- novelty decays with created_at using half-life `half_life_created_days`
55+
- recency = freshness_weight * freshness + novelty_weight * novelty
56+
"""
57+
half_life_last_access = max(
58+
float(params.get("half_life_last_access_days", 7.0)), 0.001
59+
)
60+
half_life_created = max(float(params.get("half_life_created_days", 30.0)), 0.001)
61+
62+
freshness_weight = float(params.get("freshness_weight", 0.6))
63+
novelty_weight = float(params.get("novelty_weight", 0.4))
64+
65+
# Convert to decay rates
66+
access_decay_rate = log(2.0) / half_life_last_access
67+
creation_decay_rate = log(2.0) / half_life_created
68+
69+
days_since_access = _days_between(now, memory.last_accessed)
70+
days_since_created = _days_between(now, memory.created_at)
71+
72+
freshness = exp(-access_decay_rate * days_since_access)
73+
novelty = exp(-creation_decay_rate * days_since_created)
74+
75+
recency_score = freshness_weight * freshness + novelty_weight * novelty
76+
return min(max(recency_score, 0.0), 1.0)
77+
78+
79+
def rerank_with_recency(
80+
results: list[MemoryRecordResult],
81+
*,
82+
now: datetime,
83+
params: dict,
84+
) -> list[MemoryRecordResult]:
85+
"""Re-rank results using combined semantic similarity and recency.
86+
87+
score = semantic_weight * (1 - dist) + recency_weight * recency_score
88+
"""
89+
semantic_weight = float(params.get("semantic_weight", 0.8))
90+
recency_weight = float(params.get("recency_weight", 0.2))
91+
92+
def combined_score(mem: MemoryRecordResult) -> float:
93+
similarity = 1.0 - float(mem.dist)
94+
recency = score_recency(mem, now=now, params=params)
95+
return semantic_weight * similarity + recency_weight * recency
96+
97+
# Sort by descending score (stable sort preserves original order on ties)
98+
return sorted(results, key=combined_score, reverse=True)

agent_memory_server/utils/redis_query.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44

55
from redisvl.query import AggregationQuery, RangeQuery, VectorQuery
66

7-
# Import constants from long_term_memory module
8-
from agent_memory_server.long_term_memory import SECONDS_PER_DAY
7+
# Import constants from utils.recency module
8+
from agent_memory_server.utils.recency import SECONDS_PER_DAY
99

1010

1111
class RecencyAggregationQuery(AggregationQuery):

agent_memory_server/vectorstore_adapter.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@
3535
MemoryRecordResult,
3636
MemoryRecordResults,
3737
)
38+
from agent_memory_server.utils.recency import generate_memory_hash, rerank_with_recency
39+
from agent_memory_server.utils.redis_query import RecencyAggregationQuery
3840

3941

4042
logger = logging.getLogger(__name__)
@@ -417,9 +419,6 @@ def generate_memory_hash(self, memory: MemoryRecord) -> str:
417419
A stable hash string
418420
"""
419421
# Use the same hash logic as long_term_memory.py for consistency
420-
# Lazy import to avoid circular dependency
421-
from agent_memory_server.long_term_memory import generate_memory_hash
422-
423422
return generate_memory_hash(memory)
424423

425424
def _apply_client_side_recency_reranking(
@@ -438,9 +437,6 @@ def _apply_client_side_recency_reranking(
438437
return memory_results
439438

440439
try:
441-
# Lazy import to avoid circular dependency
442-
from agent_memory_server.long_term_memory import rerank_with_recency
443-
444440
now = datetime.now(UTC)
445441
params = {
446442
"semantic_weight": float(recency_params.get("semantic_weight", 0.8))
@@ -917,8 +913,6 @@ async def _search_with_redis_aggregation(
917913
)
918914

919915
# Aggregate with APPLY/SORTBY boosted score via helper
920-
# Lazy import to avoid circular dependency
921-
from agent_memory_server.utils.redis_query import RecencyAggregationQuery
922916

923917
now_ts = int(datetime.now(UTC).timestamp())
924918
agg = (

0 commit comments

Comments
 (0)