|
1 | 1 | import streamlit as st |
2 | 2 | from openai import OpenAI |
3 | 3 |
|
| 4 | +st.title("torchchat") |
| 5 | + |
| 6 | +start_state = [ |
| 7 | + { |
| 8 | + "role": "system", |
| 9 | + "content": "You're an assistant. Answer questions directly, be brief, and have fun.", |
| 10 | + }, |
| 11 | + {"role": "assistant", "content": "How can I help you?"}, |
| 12 | +] |
| 13 | + |
4 | 14 | with st.sidebar: |
5 | | - openai_api_key = st.text_input( |
6 | | - "OpenAI API Key", key="chatbot_api_key", type="password" |
| 15 | + response_max_tokens = st.slider( |
| 16 | + "Max Response Tokens", min_value=10, max_value=1000, value=250, step=10 |
7 | 17 | ) |
8 | | - "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)" |
9 | | - "[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)" |
10 | | - "[](https://codespaces.new/streamlit/llm-examples?quickstart=1)" |
11 | | - |
12 | | -st.title("💬 Chatbot") |
| 18 | + if st.button("Reset Chat", type="primary"): |
| 19 | + st.session_state["messages"] = start_state |
13 | 20 |
|
14 | 21 | if "messages" not in st.session_state: |
15 | | - st.session_state["messages"] = [ |
16 | | - { |
17 | | - "role": "system", |
18 | | - "content": "You're an assistant. Be brief, no yapping. Use as few words as possible to respond to the users' questions.", |
19 | | - }, |
20 | | - {"role": "assistant", "content": "How can I help you?"}, |
21 | | - ] |
| 22 | + st.session_state["messages"] = start_state |
| 23 | + |
22 | 24 |
|
23 | 25 | for msg in st.session_state.messages: |
24 | 26 | st.chat_message(msg["role"]).write(msg["content"]) |
25 | 27 |
|
26 | 28 | if prompt := st.chat_input(): |
27 | 29 | client = OpenAI( |
28 | | - # This is the default and can be omitted |
29 | 30 | base_url="http://127.0.0.1:5000/v1", |
30 | | - api_key="YOURMOTHER", |
| 31 | + api_key="813", # The OpenAI API requires an API key, but since we don't consume it, this can be any non-empty string. |
31 | 32 | ) |
32 | 33 |
|
33 | 34 | st.session_state.messages.append({"role": "user", "content": prompt}) |
34 | 35 | st.chat_message("user").write(prompt) |
35 | | - response = client.chat.completions.create( |
36 | | - model="stories15m", messages=st.session_state.messages, max_tokens=64 |
37 | | - ) |
38 | | - msg = response.choices[0].message.content |
39 | | - st.session_state.messages.append({"role": "assistant", "content": msg}) |
40 | | - st.chat_message("assistant").write(msg) |
| 36 | + |
| 37 | + with st.chat_message("assistant"), st.status( |
| 38 | + "Generating... ", expanded=True |
| 39 | + ) as status: |
| 40 | + |
| 41 | + def get_streamed_completion(completion_generator): |
| 42 | + start = time.time() |
| 43 | + tokcount = 0 |
| 44 | + for chunk in completion_generator: |
| 45 | + tokcount += 1 |
| 46 | + yield chunk.choices[0].delta.content |
| 47 | + |
| 48 | + status.update( |
| 49 | + label="Done, averaged {:.2f} tokens/second".format( |
| 50 | + tokcount / (time.time() - start) |
| 51 | + ), |
| 52 | + state="complete", |
| 53 | + ) |
| 54 | + |
| 55 | + response = st.write_stream( |
| 56 | + get_streamed_completion( |
| 57 | + client.chat.completions.create( |
| 58 | + model="llama3", |
| 59 | + messages=st.session_state.messages, |
| 60 | + max_tokens=response_max_tokens, |
| 61 | + stream=True, |
| 62 | + ) |
| 63 | + ) |
| 64 | + )[0] |
| 65 | + |
| 66 | + st.session_state.messages.append({"role": "assistant", "content": response}) |
0 commit comments