-
I am new and I still learning about langgraph. I saw the example about langgraph react agent and I am playing with it. I wanted to add memory to it like from datetime import datetime, timezone
from typing import Dict, List, Literal, cast
from langgraph.graph import StateGraph
from langgraph.prebuilt import ToolNode
from langchain_core.messages import AIMessage
from langchain_core.runnables import RunnableConfig
from langgraph.checkpoint.memory import MemorySaver
from langchain_core.prompts import ChatPromptTemplate
from agents.chatbot.configuration import Configuration
from agents.chatbot.state import InputState, State
from agents.chatbot.tools import TOOLS
from agents.chatbot.utils import load_chat_model
async def call_model(
state: State, config: RunnableConfig
) -> Dict[str, List[AIMessage]]:
configuration = Configuration.from_runnable_config(config)
configurable = config.get('configurable', {})
user_info = configurable.get('user_info', {})
current_time = datetime.now(tz=timezone.utc)
prompt = ChatPromptTemplate.from_messages(
[
("system", configuration.system_prompt),
("placeholder", "{messages}")
]
).partial(
date=current_time.isoformat(),
time=current_time.strftime("%H:%M:%S")
)
model = load_chat_model(configuration.model).bind_tools(TOOLS)
message_value = await prompt.ainvoke(
{
"messages": state.messages,
"user_info": user_info,
},
config,
response = cast(AIMessage, await model.ainvoke(message_value, config))
if state.is_last_step and response.tool_calls:
return {
"messages": [
AIMessage(
id=response.id,
content="Sorry, I could not find an answer to your question in the specified number of steps.",
)
]
}
return {"messages": [response]}
workflow = StateGraph(State, input=InputState, config_schema=Configuration)
workflow.add_node(call_model)
workflow.add_node("tools", ToolNode(TOOLS))
workflow.add_edge("__start__", "call_model")
def route_model_output(state: State) -> Literal["__end__", "tools"]:
last_message = state.messages[-1]
if not isinstance(last_message, AIMessage):
raise ValueError(
f"Expected AIMessage in output edges, but got {type(last_message).__name__}"
)
if not last_message.tool_calls:
return "__end__"
return "tools"
workflow.add_conditional_edges(
"call_model",
route_model_output,
)
workflow.add_edge("tools", "call_model")
graph = workflow.compile(
checkpointer=MemorySaver(),
interrupt_before=[],
interrupt_after=[],
)
graph.name = "Chatbot Agent" it doesn't remember the last previous conversation, I can even confirm by checking in from __future__ import annotations
from dataclasses import dataclass, field
from typing import Sequence
from langchain_core.messages import AnyMessage
from langgraph.graph import add_messages
from langgraph.managed import IsLastStep
from typing_extensions import Annotated
@dataclass
class InputState:
messages: Annotated[Sequence[AnyMessage], add_messages] = field(
default_factory=list
)
@dataclass
class State(InputState):
is_last_step: IsLastStep = field(default=False) |
Beta Was this translation helpful? Give feedback.
Replies: 2 comments 3 replies
-
@cris-m how are you invoking it? i tested your code and it works as intended: thread_config = {"configurable": {"thread_id": "1"}}
await graph.ainvoke({"messages": [("user", "hi, my name is John")]}, thread_config)
await graph.ainvoke({"messages": [("user", "what's my name?")]}, thread_config)
# the model correctly remembers the name for the same thread perhaps you're looking for cross-thread persistence, where the model remembers information from one thread in another? in that case, check out this how-to guide https://langchain-ai.github.io/langgraph/how-tos/cross-thread-persistence/ |
Beta Was this translation helpful? Give feedback.
-
I have identify the issue. The model is called in fast api and every time it was generating a new thread_id when the route is called. That why it was behaving perfect when use LangGraph Studio but it was not responding when calling the model from the fastapi route. |
Beta Was this translation helpful? Give feedback.
@cris-m how are you invoking it? i tested your code and it works as intended:
perhaps you're looking for cross-thread persistence, where the model remembers information from one thread in another? in that case, check out this how-to guide https://langchain-ai.github.io/langgraph/how-tos/cross-thread-persistence/