-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstreamlit_app.py
More file actions
145 lines (118 loc) Β· 7.19 KB
/
streamlit_app.py
File metadata and controls
145 lines (118 loc) Β· 7.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import streamlit as st
import os
import json
from typing import List
import litellm
# --- 1. DEFINE AGENT TOOLS & RULES (No changes here) ---
# ... (all your tool functions and schemas are still here and perfect)
# Tool Functions
def list_files() -> List[str]:
"""List files in the current directory."""
return ["project_plan.md", "data_report.csv", "apu_the_cat.jpg"]
def read_file(file_name: str) -> str:
"""Read a file's contents."""
if file_name == "project_plan.md":
return "Project Plan: The main goal is to build a Streamlit chatbot."
else:
return f"Error: File '{file_name}' not found or cannot be read."
def multiply_numbers(num1: float, num2: float) -> float:
"""Multiply two numbers."""
return num1 * num2
def terminate(message: str) -> None:
"""Terminate the agent loop and provide a final message."""
pass
# Mapping and tool schemas...
tool_functions = { "list_files": list_files, "read_file": read_file, "multiply_numbers": multiply_numbers, "terminate": terminate }
tools = [
{"type": "function", "function": {"name": "list_files", "description": "Returns a list of available files.", "parameters": {"type": "object", "properties": {}, "required": []}}},
{"type": "function", "function": {"name": "read_file", "description": "Reads the content of a specified file.", "parameters": {"type": "object", "properties": {"file_name": {"type": "string"}}, "required": ["file_name"]}}},
{"type": "function", "function": {"name": "multiply_numbers", "description": "Multiplies two numbers. Use this for any calculation.", "parameters": {"type": "object", "properties": {"num1": {"type": "number"}, "num2": {"type": "number"}}, "required": ["num1", "num2"]}}},
{"type": "function", "function": {"name": "terminate", "description": "Terminates the conversation when the user's request is fully answered. Prints the final message for the user.", "parameters": {"type": "object", "properties": {"message": {"type": "string"}}, "required": ["message"]}}},
]
agent_rules = {"role": "system", "content": "You are an AI agent that can perform tasks by using available tools. If a user asks a question that requires calculation, use the multiply_numbers tool. If a user asks about files, first list the files before reading them. When you are done, terminate the conversation by using the 'terminate' tool with a final, friendly message. Always provide the answer and then terminate in the same step if possible."}
# --- 2. SET UP STREAMLIT UI & SECRETS ---
st.title("π€ Agent Chatbot with Tools")
st.write("This chatbot uses an agent with tools to answer questions. It can do math and read files. Try asking: 'What is 123 times 45?'")
if "OPENROUTER_API_KEY" not in st.secrets:
st.error("API key not found. Please add it to your secrets.")
st.info("Create a file at .streamlit/secrets.toml with the content: OPENROUTER_API_KEY = 'your_key_here'")
st.stop()
api_key = st.secrets["OPENROUTER_API_KEY"]
os.environ['OPENROUTER_API_KEY'] = api_key
litellm.set_verbose = False
# --- 3. INITIALIZE & DISPLAY CHAT HISTORY ---
if "messages" not in st.session_state:
st.session_state.messages = [agent_rules]
# === NEW: Display history with custom avatars ===
for message in st.session_state.messages:
if message["role"] == "system":
continue
avatar = "π€" if message["role"] == "user" else "π€"
# Check for our custom tool messages to assign special avatars
if message["role"] == "assistant" and "βοΈ **Tool:**" in message["content"]:
avatar = "βοΈ"
if message["role"] == "assistant" and "π **Result:**" in message["content"]:
avatar = "π"
if message["role"] == "assistant" and "β
**Final Answer:**" in message["content"]:
avatar = "β
"
with st.chat_message(message["role"], avatar=avatar):
st.markdown(message["content"], unsafe_allow_html=True)
# --- 4. THE AGENT LOGIC (WITH UI ENHANCEMENTS) ---
if prompt := st.chat_input("What would you like me to do?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user", avatar="π€"):
st.markdown(prompt)
max_iterations = 10
for i in range(max_iterations):
with st.spinner("Thinking..."):
response = litellm.completion(
model="openrouter/deepseek/deepseek-chat-v3-0324:free",
messages=st.session_state.messages,
tools=tools,
)
response_message = response.choices[0].message
if response_message.tool_calls:
st.session_state.messages.append(response_message)
for tool_call in response_message.tool_calls:
tool_name = tool_call.function.name
tool_args = json.loads(tool_call.function.arguments)
if tool_name == "terminate":
final_message_content = f"β
**Final Answer:** {tool_args.get('message', 'All done!')}"
with st.chat_message("assistant", avatar="β
"):
st.markdown(final_message_content, unsafe_allow_html=True)
st.session_state.messages.append({"role": "assistant", "content": final_message_content})
break
# Display "Thinking about using a tool" message
thinking_message_content = f"βοΈ **Tool:** `{tool_name}` <br> π **Arguments:** `{tool_args}`"
with st.chat_message("assistant", avatar="βοΈ"):
st.markdown(thinking_message_content, unsafe_allow_html=True)
st.session_state.messages.append({"role": "assistant", "content": thinking_message_content})
# Execute the tool and get the result
try:
result = tool_functions[tool_name](**tool_args)
tool_response_content = json.dumps({"result": result})
except Exception as e:
tool_response_content = json.dumps({"error": str(e)})
# === NEW: Display the raw tool result ===
tool_result_display_content = f"π **Result:** `{tool_response_content}`"
with st.chat_message("assistant", avatar="π"):
st.markdown(tool_result_display_content, unsafe_allow_html=True)
st.session_state.messages.append({"role": "assistant", "content": tool_result_display_content})
# Add the actual tool result for the agent to see in the next loop
st.session_state.messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"name": tool_name,
"content": tool_response_content,
})
if tool_name == "terminate":
break
else:
final_response = response_message.content
with st.chat_message("assistant", avatar="π€"):
st.markdown(final_response)
st.session_state.messages.append({"role": "assistant", "content": final_response})
break
else:
with st.chat_message("assistant", avatar="π€"):
st.warning("The agent reached its iteration limit.")