-
Notifications
You must be signed in to change notification settings - Fork 154
Open
Labels
enhancementNew feature or requestNew feature or request
Description
I create a simple program to test the memory manager. But when I use model openai:gpt-5-chat, I got an error like that:
from langmem import create_memory_manager
from langmem.knowledge.extraction import ExtractedMemory, Memory
import asyncio
manager = create_memory_manager(
"openai:gpt-5-chat",
enable_deletes=True
)
conversation = [
{"role": "user", "content": "Hi"},
{"role": "assistant", "content": "Hello! How can I assist you today?"},
]
async def test():
# Extract memories from conversation
memories = await manager.ainvoke({
"messages": conversation,
"existing": [
ExtractedMemory(id='e8ef6ca4-0159-4844-8c22-32cdbf87f260', content=Memory(content='User likes to receive notifications in the morning.')),
ExtractedMemory(id='2e469b7d-58bd-4575-9049-a395633cf20d', content=Memory(content='User has a preference for dark mode in all their apps.'))
],
"max_steps": 1
})
for memory in memories:
print(memory)
if __name__ == "__main__":
asyncio.run(test())Traceback (most recent call last):
File "/home/hoangnh/test_langmem/test_langmem.py", line 30, in <module>
asyncio.run(test())
File "/home/hoangnh/.local/share/uv/python/cpython-3.12.0-linux-x86_64-gnu/lib/python3.12/asyncio/runners.py", line 194, in run
return runner.run(main)
^^^^^^^^^^^^^^^^
File "/home/hoangnh/.local/share/uv/python/cpython-3.12.0-linux-x86_64-gnu/lib/python3.12/asyncio/runners.py", line 118, in run
return self._loop.run_until_complete(task)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/hoangnh/.local/share/uv/python/cpython-3.12.0-linux-x86_64-gnu/lib/python3.12/asyncio/base_events.py", line 664, in run_until_complete
return future.result()
^^^^^^^^^^^^^^^
File "/home/hoangnh/test_langmem/test_langmem.py", line 17, in test
memories = await manager.ainvoke({
^^^^^^^^^^^^^^^^^^^^^^^
File "/home/hoangnh/test_langmem/.venv/lib/python3.12/site-packages/langmem/knowledge/extraction.py", line 276, in ainvoke
response = await extractor.ainvoke(payload, config=config)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/hoangnh/test_langmem/.venv/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 3291, in ainvoke
input_ = await coro_with_context(part(), context, create_task=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/hoangnh/test_langmem/.venv/lib/python3.12/site-packages/langgraph/pregel/main.py", line 3171, in ainvoke
async for chunk in self.astream(
File "/home/hoangnh/test_langmem/.venv/lib/python3.12/site-packages/langgraph/pregel/main.py", line 3036, in astream
raise GraphRecursionError(msg)
langgraph.errors.GraphRecursionError: Recursion limit of 25 reached without hitting a stop condition. You can increase the limit by setting the `recursion_limit` config key.
For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT
Reactions are currently unavailable
Metadata
Metadata
Assignees
Labels
enhancementNew feature or requestNew feature or request