-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllama31-chat.py
More file actions
49 lines (39 loc) · 1.51 KB
/
llama31-chat.py
File metadata and controls
49 lines (39 loc) · 1.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import streamlit as st
import ollama
def get_ai_response(messages):
try:
response = ollama.chat(
model="llama3.1",
messages=messages
)
return response['message']['content']
except Exception as e:
st.error(f"Error: {str(e)}")
return None
# Prerequisite: in terminal, pull model blob: "ollama pull llama3.1" or "ollama run llama3"
# Execute "streamlit run llama31-chat.py"
def main():
st.title("Llama 3.1 - Streamlit Chat")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# React to user input
if prompt := st.chat_input("What is your message?"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Get AI response
ai_response = get_ai_response(st.session_state.messages)
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(ai_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": ai_response})
if __name__ == "__main__":
main()