Skip to content

Commit 453c7b5

Browse files
abrookinsclaude
andcommitted
feat: expand short recency parameter names to descriptive ones
- Update core algorithms to use descriptive names (freshness_weight, novelty_weight, etc.) - Add backward compatibility for old short names (wf, wa, w_sem, w_recency) - Update API models with new descriptive field names while preserving old ones - Add helper function to build recency params with fallback to old names - Update tests to demonstrate new preferred parameter naming - Internal functions now use clear variable names (semantic_weight vs w_sem) Old names still work for backward compatibility: - wf → freshness_weight - wa → novelty_weight - w_sem → semantic_weight - w_recency → recency_weight 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent a8fb65c commit 453c7b5

File tree

6 files changed

+122
-61
lines changed

6 files changed

+122
-61
lines changed

agent_memory_server/api.py

Lines changed: 51 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
from typing import Any
2+
13
import tiktoken
24
from fastapi import APIRouter, Depends, HTTPException, Query
35
from mcp.server.fastmcp.prompts import base
@@ -128,6 +130,54 @@ def _calculate_context_usage_percentages(
128130
return min(total_percentage, 100.0), min(until_summarization_percentage, 100.0)
129131

130132

133+
def _build_recency_params(payload: SearchRequest) -> dict[str, Any]:
134+
"""Build recency parameters dict with backward compatibility.
135+
136+
Prefers new descriptive parameter names over old short names.
137+
"""
138+
# Use new parameter names if available, fall back to old ones, then defaults
139+
semantic_weight = (
140+
payload.recency_semantic_weight
141+
if payload.recency_semantic_weight is not None
142+
else (payload.recency_w_sem if payload.recency_w_sem is not None else 0.8)
143+
)
144+
recency_weight = (
145+
payload.recency_recency_weight
146+
if payload.recency_recency_weight is not None
147+
else (
148+
payload.recency_w_recency if payload.recency_w_recency is not None else 0.2
149+
)
150+
)
151+
freshness_weight = (
152+
payload.recency_freshness_weight
153+
if payload.recency_freshness_weight is not None
154+
else (payload.recency_wf if payload.recency_wf is not None else 0.6)
155+
)
156+
novelty_weight = (
157+
payload.recency_novelty_weight
158+
if payload.recency_novelty_weight is not None
159+
else (payload.recency_wa if payload.recency_wa is not None else 0.4)
160+
)
161+
162+
return {
163+
# Use new descriptive names internally
164+
"semantic_weight": semantic_weight,
165+
"recency_weight": recency_weight,
166+
"freshness_weight": freshness_weight,
167+
"novelty_weight": novelty_weight,
168+
"half_life_last_access_days": (
169+
payload.recency_half_life_last_access_days
170+
if payload.recency_half_life_last_access_days is not None
171+
else 7.0
172+
),
173+
"half_life_created_days": (
174+
payload.recency_half_life_created_days
175+
if payload.recency_half_life_created_days is not None
176+
else 30.0
177+
),
178+
}
179+
180+
131181
async def _summarize_working_memory(
132182
memory: WorkingMemory,
133183
model_name: ModelNameLiteral | None = None,
@@ -558,25 +608,8 @@ async def search_long_term_memory(
558608
else False
559609
)
560610
if server_side_recency:
561-
recency_params = {
562-
"w_sem": payload.recency_w_sem
563-
if payload.recency_w_sem is not None
564-
else 0.8,
565-
"w_recency": payload.recency_w_recency
566-
if payload.recency_w_recency is not None
567-
else 0.2,
568-
"wf": payload.recency_wf if payload.recency_wf is not None else 0.6,
569-
"wa": payload.recency_wa if payload.recency_wa is not None else 0.4,
570-
# map half-life to smoothing constants server-side if needed
571-
"half_life_last_access_days": payload.recency_half_life_last_access_days
572-
if payload.recency_half_life_last_access_days is not None
573-
else 7.0,
574-
"half_life_created_days": payload.recency_half_life_created_days
575-
if payload.recency_half_life_created_days is not None
576-
else 30.0,
577-
}
578611
kwargs["server_side_recency"] = True
579-
kwargs["recency_params"] = recency_params
612+
kwargs["recency_params"] = _build_recency_params(payload)
580613
return await long_term_memory.search_long_term_memories(**kwargs)
581614

582615
raw_results = await long_term_memory.search_long_term_memories(**kwargs)

agent_memory_server/long_term_memory.py

Lines changed: 28 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1383,28 +1383,32 @@ def score_recency(
13831383
) -> float:
13841384
"""Compute a recency score in [0, 1] combining freshness and novelty.
13851385
1386-
- freshness f decays with last_accessed using half-life `half_life_last_access_days`
1387-
- novelty a decays with created_at using half-life `half_life_created_days`
1388-
- r = wf * f + wa * a
1386+
- freshness decays with last_accessed using half-life `half_life_last_access_days`
1387+
- novelty decays with created_at using half-life `half_life_created_days`
1388+
- recency = freshness_weight * freshness + novelty_weight * novelty
13891389
"""
1390-
half_life_la = max(float(params.get("half_life_last_access_days", 7.0)), 0.001)
1391-
half_life_cr = max(float(params.get("half_life_created_days", 30.0)), 0.001)
1392-
wf = float(params.get("wf", 0.6))
1393-
wa = float(params.get("wa", 0.4))
1390+
half_life_last_access = max(
1391+
float(params.get("half_life_last_access_days", 7.0)), 0.001
1392+
)
1393+
half_life_created = max(float(params.get("half_life_created_days", 30.0)), 0.001)
1394+
1395+
# Support both old and new parameter names for backward compatibility
1396+
freshness_weight = float(params.get("freshness_weight", params.get("wf", 0.6)))
1397+
novelty_weight = float(params.get("novelty_weight", params.get("wa", 0.4)))
13941398

13951399
# Convert to decay rates
1396-
mu = log(2.0) / half_life_la
1397-
lam = log(2.0) / half_life_cr
1400+
access_decay_rate = log(2.0) / half_life_last_access
1401+
creation_decay_rate = log(2.0) / half_life_created
13981402

13991403
days_since_access = _days_between(now, memory.last_accessed)
14001404
days_since_created = _days_between(now, memory.created_at)
14011405

1402-
f = exp(-mu * days_since_access)
1403-
a = exp(-lam * days_since_created)
1406+
freshness = exp(-access_decay_rate * days_since_access)
1407+
novelty = exp(-creation_decay_rate * days_since_created)
14041408

1405-
r = wf * f + wa * a
1409+
recency_score = freshness_weight * freshness + novelty_weight * novelty
14061410
# Clamp to [0, 1]
1407-
return max(0.0, min(1.0, r))
1411+
return max(0.0, min(1.0, recency_score))
14081412

14091413

14101414
def rerank_with_recency(
@@ -1415,15 +1419,16 @@ def rerank_with_recency(
14151419
) -> list[MemoryRecordResult]:
14161420
"""Re-rank results using combined semantic similarity and recency.
14171421
1418-
score = w_sem * (1 - dist) + w_recency * recency_score
1422+
score = semantic_weight * (1 - dist) + recency_weight * recency_score
14191423
"""
1420-
w_sem = float(params.get("w_sem", 0.8))
1421-
w_rec = float(params.get("w_recency", 0.2))
1424+
# Support both old and new parameter names for backward compatibility
1425+
semantic_weight = float(params.get("semantic_weight", params.get("w_sem", 0.8)))
1426+
recency_weight = float(params.get("recency_weight", params.get("w_recency", 0.2)))
14221427

14231428
def combined_score(mem: MemoryRecordResult) -> float:
1424-
sim = 1.0 - float(mem.dist)
1425-
rec = score_recency(mem, now=now, params=params)
1426-
return w_sem * sim + w_rec * rec
1429+
similarity = 1.0 - float(mem.dist)
1430+
recency = score_recency(mem, now=now, params=params)
1431+
return semantic_weight * similarity + recency_weight * recency
14271432

14281433
# Sort by descending score (stable sort preserves original order on ties)
14291434
return sorted(results, key=combined_score, reverse=True)
@@ -1507,10 +1512,10 @@ def select_ids_for_forgetting(
15071512
# Budget-based pruning (keep top N by recency among eligible)
15081513
if isinstance(budget, int) and budget >= 0 and budget < len(eligible_for_budget):
15091514
params = {
1510-
"w_sem": 0.0, # budget considers only recency
1511-
"w_recency": 1.0,
1512-
"wf": 0.6,
1513-
"wa": 0.4,
1515+
"semantic_weight": 0.0, # budget considers only recency
1516+
"recency_weight": 1.0,
1517+
"freshness_weight": 0.6,
1518+
"novelty_weight": 0.4,
15141519
"half_life_last_access_days": 7.0,
15151520
"half_life_created_days": 30.0,
15161521
}

agent_memory_server/models.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -390,6 +390,25 @@ class SearchRequest(BaseModel):
390390
recency_half_life_created_days: float | None = Field(
391391
default=None, description="Half-life (days) for created_at decay"
392392
)
393+
394+
# New descriptive parameter names (preferred over short names above)
395+
recency_semantic_weight: float | None = Field(
396+
default=None,
397+
description="Weight for semantic similarity (preferred over recency_w_sem)",
398+
)
399+
recency_recency_weight: float | None = Field(
400+
default=None,
401+
description="Weight for recency score (preferred over recency_w_recency)",
402+
)
403+
recency_freshness_weight: float | None = Field(
404+
default=None,
405+
description="Weight for freshness component (preferred over recency_wf)",
406+
)
407+
recency_novelty_weight: float | None = Field(
408+
default=None,
409+
description="Weight for novelty (age) component (preferred over recency_wa)",
410+
)
411+
393412
# Server-side recency rerank (Redis-only path) toggle
394413
server_side_recency: bool | None = Field(
395414
default=None,

agent_memory_server/utils/redis_query.py

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -56,20 +56,24 @@ def apply_recency(
5656
self, *, now_ts: int, params: dict[str, Any] | None = None
5757
) -> RecencyAggregationQuery:
5858
params = params or {}
59-
w_sem = float(params.get("w_sem", 0.8))
60-
w_rec = float(params.get("w_recency", 0.2))
61-
wf = float(params.get("wf", 0.6))
62-
wa = float(params.get("wa", 0.4))
63-
hl_la = float(params.get("half_life_last_access_days", 7.0))
64-
hl_cr = float(params.get("half_life_created_days", 30.0))
59+
60+
# Support both old and new parameter names for backward compatibility
61+
semantic_weight = float(params.get("semantic_weight", params.get("w_sem", 0.8)))
62+
recency_weight = float(
63+
params.get("recency_weight", params.get("w_recency", 0.2))
64+
)
65+
freshness_weight = float(params.get("freshness_weight", params.get("wf", 0.6)))
66+
novelty_weight = float(params.get("novelty_weight", params.get("wa", 0.4)))
67+
half_life_access = float(params.get("half_life_last_access_days", 7.0))
68+
half_life_created = float(params.get("half_life_created_days", 30.0))
6569

6670
self.apply(days_since_access=f"max(0, ({now_ts} - @last_accessed)/86400.0)")
6771
self.apply(days_since_created=f"max(0, ({now_ts} - @created_at)/86400.0)")
68-
self.apply(freshness=f"pow(2, -@days_since_access/{hl_la})")
69-
self.apply(novelty=f"pow(2, -@days_since_created/{hl_cr})")
70-
self.apply(recency=f"{wf}*@freshness+{wa}*@novelty")
72+
self.apply(freshness=f"pow(2, -@days_since_access/{half_life_access})")
73+
self.apply(novelty=f"pow(2, -@days_since_created/{half_life_created})")
74+
self.apply(recency=f"{freshness_weight}*@freshness+{novelty_weight}*@novelty")
7175
self.apply(sim="1-(@__vector_score/2)")
72-
self.apply(boosted_score=f"{w_sem}*@sim+{w_rec}*@recency")
76+
self.apply(boosted_score=f"{semantic_weight}*@sim+{recency_weight}*@recency")
7377

7478
return self
7579

tests/test_forgetting.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,10 @@ def make_result(
4040

4141
def default_params():
4242
return {
43-
"w_sem": 0.8,
44-
"w_recency": 0.2,
45-
"wf": 0.6,
46-
"wa": 0.4,
43+
"semantic_weight": 0.8,
44+
"recency_weight": 0.2,
45+
"freshness_weight": 0.6,
46+
"novelty_weight": 0.4,
4747
"half_life_last_access_days": 7.0,
4848
"half_life_created_days": 30.0,
4949
}
@@ -87,8 +87,8 @@ def test_rerank_with_recency_prefers_recent_when_similarity_close():
8787
def test_rerank_with_recency_respects_semantic_weight_when_gap_large():
8888
# If semantic similarity difference is large, it should dominate
8989
params = default_params()
90-
params["w_sem"] = 0.9
91-
params["w_recency"] = 0.1
90+
params["semantic_weight"] = 0.9
91+
params["recency_weight"] = 0.1
9292
now = datetime.now(UTC)
9393

9494
much_more_similar_old = make_result(

tests/test_recency_aggregation.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,10 @@ async def test_recency_aggregation_query_builds_and_paginates():
2121
.apply_recency(
2222
now_ts=1_700_000_000,
2323
params={
24-
"w_sem": 0.7,
25-
"w_recency": 0.3,
26-
"wf": 0.5,
27-
"wa": 0.5,
24+
"semantic_weight": 0.7,
25+
"recency_weight": 0.3,
26+
"freshness_weight": 0.5,
27+
"novelty_weight": 0.5,
2828
"half_life_last_access_days": 5.0,
2929
"half_life_created_days": 20.0,
3030
},

0 commit comments

Comments
 (0)