-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathchatbot_graph.py
More file actions
133 lines (101 loc) · 5.18 KB
/
chatbot_graph.py
File metadata and controls
133 lines (101 loc) · 5.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import os
import time
from dotenv import load_dotenv
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.runnables import RunnableConfig
from langchain_groq import ChatGroq
from langgraph.graph import END, START, StateGraph
from agent.configuration import ChatbotConfiguration
from agent.prompts import chatbot_instructions
from agent.state import ChatbotState
load_dotenv()
if os.getenv("GROQ_API_KEY") is None:
raise ValueError("GROQ_API_KEY is not set")
# Minimum thinking time in seconds to ensure users see the thinking indicator
MINIMUM_THINKING_TIME = 3.0
def generate_thinking_content(user_message: str) -> str:
"""Generate contextual thinking content based on the user's message."""
thinking_steps = []
message_lower = user_message.lower() if user_message else ""
# Add contextual thinking based on message content
if any(keyword in message_lower for keyword in ['market', 'opportunity', 'growth']):
thinking_steps.append("Analyzing market dynamics and identifying key opportunities...")
thinking_steps.append("Cross-referencing with industry trends and competitive landscape...")
elif any(keyword in message_lower for keyword in ['patent', 'ip', 'intellectual']):
thinking_steps.append("Reviewing patent landscape and expiration timelines...")
thinking_steps.append("Assessing freedom-to-operate considerations...")
elif any(keyword in message_lower for keyword in ['clinical', 'trial', 'study']):
thinking_steps.append("Searching clinical trial databases for relevant studies...")
thinking_steps.append("Evaluating trial phases and endpoint data...")
elif any(keyword in message_lower for keyword in ['drug', 'molecule', 'compound']):
thinking_steps.append("Analyzing molecular properties and therapeutic potential...")
thinking_steps.append("Reviewing existing literature and research data...")
else:
thinking_steps.append("Processing your request and gathering relevant information...")
thinking_steps.append("Analyzing context and formulating comprehensive response...")
thinking_steps.append("Synthesizing insights and preparing response...")
return "\n\n".join(thinking_steps)
def chat_response(state: ChatbotState, config: RunnableConfig) -> ChatbotState:
"""LangGraph node that generates a conversational response to the user's message.
Includes a minimum thinking delay at the start to show the thinking indicator,
then uses Groq to generate natural, helpful responses while maintaining
conversation context through the message history.
Args:
state: Current graph state containing the conversation messages
config: Configuration for the runnable, including LLM provider settings
Returns:
Dictionary with state update, including the AI's response message
"""
start_time = time.time()
# Get the latest user message for thinking content generation
user_message = ""
if state["messages"]:
for msg in reversed(state["messages"]):
if isinstance(msg, HumanMessage) or (hasattr(msg, 'type') and msg.type == 'human'):
user_message = msg.content if hasattr(msg, 'content') else str(msg)
break
# Generate thinking content (stored but primarily used by frontend during loading state)
thinking_content = generate_thinking_content(user_message)
configurable = ChatbotConfiguration.from_runnable_config(config)
# Initialize Groq model
llm = ChatGroq(
model=configurable.chat_model,
temperature=configurable.temperature,
max_retries=2,
api_key=os.getenv("GROQ_API_KEY"),
)
# Get the latest user message
if not state["messages"]:
return {"messages": [AIMessage(content="Hello! How can I help you today?")]}
# Prepare the conversation context
conversation_context = "\n".join(
[
f"{'Human' if isinstance(msg, HumanMessage) else 'Assistant'}: {msg.content}"
for msg in state["messages"][-10:] # Keep last 10 messages for context
]
)
# Format the prompt with conversation context
formatted_prompt = chatbot_instructions.format(
conversation_context=conversation_context,
current_message=state["messages"][-1].content if state["messages"] else "",
)
# Generate response
result = llm.invoke(formatted_prompt)
# Ensure minimum thinking time has elapsed before returning
elapsed = time.time() - start_time
if elapsed < MINIMUM_THINKING_TIME:
time.sleep(MINIMUM_THINKING_TIME - elapsed)
return {
"messages": [AIMessage(content=result.content)],
"thinking_content": thinking_content,
"thinking_title": "Analyzed your request"
}
# Create the Chatbot Graph
builder = StateGraph(ChatbotState, config_schema=ChatbotConfiguration)
# Define single node that handles both thinking delay and response
builder.add_node("chat_response", chat_response)
# Set the entrypoint and flow: START -> chat_response -> END
builder.add_edge(START, "chat_response")
builder.add_edge("chat_response", END)
# Compile the graph
chatbot_graph = builder.compile(name="basic-chatbot")