7
7
from abc import ABC , abstractmethod
8
8
from collections .abc import Callable
9
9
from datetime import UTC , datetime
10
+ from functools import reduce
10
11
from typing import Any , TypeVar
11
12
12
13
from langchain_core .documents import Document
13
14
from langchain_core .embeddings import Embeddings
14
15
from langchain_core .vectorstores import VectorStore
15
16
from langchain_redis .vectorstores import RedisVectorStore
17
+ from redisvl .query import RangeQuery , VectorQuery
16
18
17
19
from agent_memory_server .filters import (
18
20
CreatedAt ,
@@ -415,6 +417,7 @@ def generate_memory_hash(self, memory: MemoryRecord) -> str:
415
417
A stable hash string
416
418
"""
417
419
# Use the same hash logic as long_term_memory.py for consistency
420
+ # Lazy import to avoid circular dependency
418
421
from agent_memory_server .long_term_memory import generate_memory_hash
419
422
420
423
return generate_memory_hash (memory )
@@ -435,11 +438,10 @@ def _apply_client_side_recency_reranking(
435
438
return memory_results
436
439
437
440
try :
438
- from datetime import UTC as _UTC , datetime as _dt
439
-
441
+ # Lazy import to avoid circular dependency
440
442
from agent_memory_server .long_term_memory import rerank_with_recency
441
443
442
- now = _dt .now (_UTC )
444
+ now = datetime .now (UTC )
443
445
params = {
444
446
"semantic_weight" : float (recency_params .get ("semantic_weight" , 0.8 ))
445
447
if recency_params
@@ -686,8 +688,6 @@ async def count_memories(
686
688
"""Count memories in the vector store using LangChain."""
687
689
try :
688
690
# Convert basic filters to our filter objects, then to backend format
689
- from agent_memory_server .filters import Namespace , SessionId , UserId
690
-
691
691
namespace_filter = Namespace (eq = namespace ) if namespace else None
692
692
user_id_filter = UserId (eq = user_id ) if user_id else None
693
693
session_id_filter = SessionId (eq = session_id ) if session_id else None
@@ -891,12 +891,6 @@ async def _search_with_redis_aggregation(
891
891
Raises:
892
892
Exception: If Redis aggregation fails (caller should handle fallback)
893
893
"""
894
- from datetime import UTC as _UTC , datetime as _dt
895
-
896
- from langchain_core .documents import Document
897
- from redisvl .query import RangeQuery , VectorQuery
898
-
899
- from agent_memory_server .utils .redis_query import RecencyAggregationQuery
900
894
901
895
index = self ._get_vectorstore_index ()
902
896
if index is None :
@@ -923,7 +917,10 @@ async def _search_with_redis_aggregation(
923
917
)
924
918
925
919
# Aggregate with APPLY/SORTBY boosted score via helper
926
- now_ts = int (_dt .now (_UTC ).timestamp ())
920
+ # Lazy import to avoid circular dependency
921
+ from agent_memory_server .utils .redis_query import RecencyAggregationQuery
922
+
923
+ now_ts = int (datetime .now (UTC ).timestamp ())
927
924
agg = (
928
925
RecencyAggregationQuery .from_vector_query (
929
926
knn , filter_expression = redis_filter
@@ -1035,8 +1032,6 @@ async def search_memories(
1035
1032
if len (filters ) == 1 :
1036
1033
redis_filter = filters [0 ]
1037
1034
else :
1038
- from functools import reduce
1039
-
1040
1035
redis_filter = reduce (lambda x , y : x & y , filters )
1041
1036
1042
1037
# If server-side recency is requested, attempt RedisVL query first (DB-level path)
@@ -1179,18 +1174,12 @@ async def count_memories(
1179
1174
filters = []
1180
1175
1181
1176
if namespace :
1182
- from agent_memory_server .filters import Namespace
1183
-
1184
1177
namespace_filter = Namespace (eq = namespace ).to_filter ()
1185
1178
filters .append (namespace_filter )
1186
1179
if user_id :
1187
- from agent_memory_server .filters import UserId
1188
-
1189
1180
user_filter = UserId (eq = user_id ).to_filter ()
1190
1181
filters .append (user_filter )
1191
1182
if session_id :
1192
- from agent_memory_server .filters import SessionId
1193
-
1194
1183
session_filter = SessionId (eq = session_id ).to_filter ()
1195
1184
filters .append (session_filter )
1196
1185
@@ -1200,8 +1189,6 @@ async def count_memories(
1200
1189
if len (filters ) == 1 :
1201
1190
redis_filter = filters [0 ]
1202
1191
else :
1203
- from functools import reduce
1204
-
1205
1192
redis_filter = reduce (lambda x , y : x & y , filters )
1206
1193
1207
1194
# Use the same search method as search_memories but for counting
0 commit comments