Skip to content

Commit f69a924

Browse files
committed
+ chat UI/UX
1 parent 363d177 commit f69a924

File tree

1 file changed

+30
-2
lines changed

1 file changed

+30
-2
lines changed

app.py

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -359,8 +359,36 @@ def generate_response():
359359
if chunk['message']['content']:
360360
yield chunk['message']['content']
361361

362-
# Stream the response
363-
assistant_response = st.write_stream(generate_response())
362+
# Create a status placeholder
363+
status_placeholder = st.empty()
364+
365+
# Show spinner while waiting for first chunk
366+
with status_placeholder.container():
367+
with st.spinner("Thinking"):
368+
# Get the response generator
369+
response_generator = generate_response()
370+
# Try to get the first chunk to exit spinner context
371+
try:
372+
first_chunk = next(response_generator)
373+
has_first_chunk = True
374+
except StopIteration:
375+
has_first_chunk = False
376+
first_chunk = ""
377+
378+
# Now stream outside the spinner context
379+
if has_first_chunk:
380+
# Clear the status placeholder and start streaming
381+
status_placeholder.empty()
382+
383+
def complete_stream():
384+
yield first_chunk
385+
for chunk in response_generator:
386+
yield chunk
387+
388+
assistant_response = st.write_stream(complete_stream())
389+
else:
390+
status_placeholder.error("No response received from the model.")
391+
assistant_response = ""
364392

365393
# Add assistant response to chat history
366394
add_message_to_current_chat("assistant", assistant_response)

0 commit comments

Comments
 (0)