@@ -1165,66 +1165,65 @@ def _generate_chatbot_content(self, chatbot) -> List[str]:
11651165
11661166 if chatbot .model :
11671167 # --- Ollama-style streaming chatbot ---
1168- chatbot_content .append (
1169- textwrap .dedent (
1170- f"""
1171- { init_messages_block }
1172-
1173- # Function to send prompt to Ollama API
1174- def generate_query(messages):
1175- response = requests.post(
1176- "{ chatbot .api_call .api_url } ",
1177- json={{"model": "{ chatbot .model } ", "messages": messages, "stream": True}},
1178- )
1179- response.raise_for_status()
1180- return response
1181-
1182- # Parse streaming response from Ollama
1183- def parse_api_response(response):
1184- try:
1185- output = ""
1186- for line in response.iter_lines():
1187- body = json.loads(line)
1188- if "error" in body:
1189- raise Exception(f"API error: {{body['error']}}")
1190- if body.get("done", False):
1191- return {{"role": "assistant", "content": output}}
1192- output += body.get("message", {{}}).get("content", "")
1193- except Exception as e:
1194- return {{"role": "assistant", "content":
1195- f"Error while processing API response: {{str(e)}}"}}
1196-
1197- # Simulated typing effect for responses
1198- def response_generator(msg_content):
1199- for word in msg_content.split():
1200- yield word + " "
1201- time.sleep(0.1)
1202- yield "\\ n"
1203-
1204- { render_messages_block }
1168+ code_block = textwrap .dedent (
1169+ f"""
1170+ { init_messages_block }
12051171
1206- { handle_prompt_block }
1172+ # Function to send prompt to Ollama API
1173+ def generate_query(messages):
1174+ response = requests.post(
1175+ "{ chatbot .api_call .api_url } ",
1176+ json={{"model": "{ chatbot .model } ", "messages": messages, "stream": True}},
1177+ )
1178+ response.raise_for_status()
1179+ return response
12071180
1208- # Retrieve question and generate answer
1209- combined = "\\ n".join(msg["content"] for msg in st.session_state.messages
1210- if msg["role"] == "user")
1211- messages = [{{"role": "user", "content": combined}}]
1212- with st.spinner('Generating answer...'):
1213- response = generate_query(messages)
1214- parsed_response = parse_api_response(response)
1215-
1216- # Add the assistant's response to the session state and display it
1217- st.session_state.messages.append(parsed_response)
1218- with st.chat_message("assistant"):
1219- st.write_stream(response_generator(parsed_response["content"]))
1220- """
1221- )
1181+ # Parse streaming response from Ollama
1182+ def parse_api_response(response):
1183+ try:
1184+ output = ""
1185+ for line in response.iter_lines():
1186+ body = json.loads(line)
1187+ if "error" in body:
1188+ raise Exception(f"API error: {{body['error']}}")
1189+ if body.get("done", False):
1190+ return {{"role": "assistant", "content": output}}
1191+ output += body.get("message", {{}}).get("content", "")
1192+ except Exception as e:
1193+ return {{"role": "assistant", "content":
1194+ f"Error while processing API response: {{str(e)}}"}}
1195+
1196+ # Simulated typing effect for responses
1197+ def response_generator(msg_content):
1198+ for word in msg_content.split():
1199+ yield word + " "
1200+ time.sleep(0.1)
1201+ yield "\\ n"
1202+
1203+ { render_messages_block }
1204+
1205+ { handle_prompt_block }
1206+
1207+ # Retrieve question and generate answer
1208+ combined = "\\ n".join(msg["content"] for msg in st.session_state.messages
1209+ if msg["role"] == "user")
1210+ messages = [{{"role": "user", "content": combined}}]
1211+ with st.spinner('Generating answer...'):
1212+ response = generate_query(messages)
1213+ parsed_response = parse_api_response(response)
1214+
1215+ # Add the assistant's response to the session state and display it
1216+ st.session_state.messages.append(parsed_response)
1217+ with st.chat_message("assistant"):
1218+ st.write_stream(response_generator(parsed_response["content"]))
1219+ """
12221220 )
1221+ chatbot_content .append (code_block )
1222+
12231223 else :
12241224 # --- Standard (non-streaming) API chatbot ---
1225- chatbot_content .append (
1226- textwrap .dedent (
1227- f"""
1225+ code_block = textwrap .dedent (
1226+ f"""
12281227 { init_messages_block }
12291228
12301229 # Function to send prompt to standard API
@@ -1271,8 +1270,8 @@ def generate_query(prompt):
12711270 else:
12721271 st.error("Failed to get response from API")
12731272 """
1274- )
12751273 )
1274+ chatbot_content .append (code_block )
12761275
12771276 if chatbot .caption :
12781277 chatbot_content .append (
0 commit comments