|
34 | 34 | step=0.1, |
35 | 35 | help="Controls the randomness of the LLM output. Only applicable for chat/completions queries.", |
36 | 36 | ) |
37 | | - |
38 | | - if "messages" not in st.session_state: |
39 | | - st.session_state.messages = [] |
40 | | - |
41 | | - for message in st.session_state.messages: |
42 | | - with st.chat_message(message.role.value): |
43 | | - st.write(message.content) |
44 | | - |
45 | | - if prompt := st.chat_input("Ask something..."): |
46 | | - st.chat_message("user").markdown(prompt) |
47 | | - st.session_state.messages.append(ChatMessage( |
48 | | - role=ChatMessageRole.USER, |
49 | | - content=prompt, |
50 | | - ),) |
51 | | - |
52 | | - with st.chat_message("assistant"): |
53 | | - response = w.serving_endpoints.query( |
54 | | - name=selected_model, |
55 | | - messages=st.session_state.messages, |
56 | | - temperature=temperature, |
57 | | - ) |
58 | | - assistant_message = response.choices[0].message.content |
59 | | - st.markdown(assistant_message) |
60 | | - |
61 | | - st.session_state.messages.append(ChatMessage( |
62 | | - role=ChatMessageRole.ASSISTANT, |
63 | | - content=assistant_message, |
64 | | - ),) |
65 | | - |
| 37 | + prompt = st.text_area("Enter your prompt:", placeholder="Ask something...") |
| 38 | + if st.button("Invoke LLM"): |
| 39 | + response = w.serving_endpoints.query( |
| 40 | + name=selected_model, |
| 41 | + messages=[ |
| 42 | + ChatMessage( |
| 43 | + role=ChatMessageRole.SYSTEM, |
| 44 | + content="You are a helpful assistant.", |
| 45 | + ), |
| 46 | + ChatMessage( |
| 47 | + role=ChatMessageRole.USER, |
| 48 | + content=prompt, |
| 49 | + ), |
| 50 | + ], |
| 51 | + temperature=temperature, |
| 52 | + ) |
| 53 | + st.json(response.as_dict()) |
66 | 54 |
|
67 | 55 | elif model_type == "Traditional ML": |
68 | 56 | st.info( |
|
0 commit comments