@@ -140,43 +140,37 @@ def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool,
140140 elif "8-bit" in agent_type :
141141 quantization = "8bit"
142142 model_type = "Local (Mistral)"
143- elif "Ollama" in agent_type :
144- # Extract model name from agent_type (e.g., "Ollama - deepseek-r1" -> "deepseek-r1")
145- model_name = agent_type .replace ("Ollama - " , "" ).strip ()
146- model_type = "Ollama"
143+ elif agent_type == "openai" :
144+ model_type = "OpenAI"
147145 else :
148- model_type = agent_type
146+ # All other models are treated as Ollama models
147+ model_type = "Ollama"
148+ model_name = agent_type
149149
150150 # Select appropriate agent and reinitialize with correct settings
151- if "Local" in model_type :
151+ if model_type == "OpenAI" :
152+ if not openai_key :
153+ response_text = "OpenAI key not found. Please check your config."
154+ print (f"Error: { response_text } " )
155+ return history + [[message , response_text ]]
156+ agent = RAGAgent (vector_store , openai_api_key = openai_key , use_cot = use_cot ,
157+ collection = collection , skip_analysis = skip_analysis )
158+ elif model_type == "Local (Mistral)" :
152159 # For HF models, we need the token
153160 if not hf_token :
154161 response_text = "Local agent not available. Please check your HuggingFace token configuration."
155162 print (f"Error: { response_text } " )
156163 return history + [[message , response_text ]]
157164 agent = LocalRAGAgent (vector_store , use_cot = use_cot , collection = collection ,
158165 skip_analysis = skip_analysis , quantization = quantization )
159- elif "Ollama" in model_type :
160- # For Ollama models, use the extracted model_name directly
166+ else : # Ollama models
161167 try :
162168 agent = LocalRAGAgent (vector_store , model_name = model_name , use_cot = use_cot ,
163169 collection = collection , skip_analysis = skip_analysis )
164170 except Exception as e :
165- response_text = f"Error initializing Ollama model: { str (e )} . Falling back to Local Mistral."
166- print (f"Error: { response_text } " )
167- # Fall back to Mistral if Ollama fails
168- if hf_token :
169- agent = LocalRAGAgent (vector_store , use_cot = use_cot , collection = collection ,
170- skip_analysis = skip_analysis )
171- else :
172- return history + [[message , "Local Mistral agent not available for fallback. Please check your HuggingFace token configuration." ]]
173- else :
174- if not openai_key :
175- response_text = "OpenAI key not found. Please check your config."
171+ response_text = f"Error initializing Ollama model: { str (e )} "
176172 print (f"Error: { response_text } " )
177173 return history + [[message , response_text ]]
178- agent = RAGAgent (vector_store , openai_api_key = openai_key , use_cot = use_cot ,
179- collection = collection , skip_analysis = skip_analysis )
180174
181175 # Process query and get response
182176 print ("Processing query..." )
0 commit comments