@@ -196,26 +196,32 @@ async def search_long_term_memories(
196196 results = []
197197
198198 for doc in search_result .docs :
199- topics = safe_get (doc , "topics" , [])
200- if isinstance (topics , str ):
201- topics : list [str ] = topics .split ("," ) # type: ignore
202-
203- entities = safe_get (doc , "entities" , [])
204- if isinstance (entities , str ):
205- entities : list [str ] = entities .split ("," ) # type: ignore
199+ # NOTE: Because this may not be obvious. We index hashes, and we extract
200+ # topics and entities separately from main long-term indexing. However,
201+ # when we store the topics and entities, we store them as comma-separated
202+ # strings in the hash. Our search index picks these up and indexes them
203+ # in TAG fields, and we get them back as comma-separated strings.
204+ doc_topics = safe_get (doc , "topics" , [])
205+ if isinstance (doc_topics , str ):
206+ doc_topics = doc_topics .split ("," ) # type: ignore
207+
208+ doc_entities = safe_get (doc , "entities" , [])
209+ if isinstance (doc_entities , str ):
210+ doc_entities = doc_entities .split ("," ) # type: ignore
206211
207212 results .append (
208213 LongTermMemoryResult (
209214 id_ = safe_get (doc , "id_" ),
210215 text = safe_get (doc , "text" , "" ),
211216 dist = float (safe_get (doc , "vector_distance" , 0 )),
212217 created_at = int (safe_get (doc , "created_at" , 0 )),
218+ updated_at = int (safe_get (doc , "updated_at" , 0 )),
213219 last_accessed = int (safe_get (doc , "last_accessed" , 0 )),
214220 user_id = safe_get (doc , "user_id" ),
215221 session_id = safe_get (doc , "session_id" ),
216222 namespace = safe_get (doc , "namespace" ),
217- topics = topics ,
218- entities = entities ,
223+ topics = doc_topics ,
224+ entities = doc_entities ,
219225 )
220226 )
221227 total_results = search_result .total
0 commit comments