@@ -140,43 +140,37 @@ def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool,
140
140
elif "8-bit" in agent_type :
141
141
quantization = "8bit"
142
142
model_type = "Local (Mistral)"
143
- elif "Ollama" in agent_type :
144
- # Extract model name from agent_type (e.g., "Ollama - deepseek-r1" -> "deepseek-r1")
145
- model_name = agent_type .replace ("Ollama - " , "" ).strip ()
146
- model_type = "Ollama"
143
+ elif agent_type == "openai" :
144
+ model_type = "OpenAI"
147
145
else :
148
- model_type = agent_type
146
+ # All other models are treated as Ollama models
147
+ model_type = "Ollama"
148
+ model_name = agent_type
149
149
150
150
# Select appropriate agent and reinitialize with correct settings
151
- if "Local" in model_type :
151
+ if model_type == "OpenAI" :
152
+ if not openai_key :
153
+ response_text = "OpenAI key not found. Please check your config."
154
+ print (f"Error: { response_text } " )
155
+ return history + [[message , response_text ]]
156
+ agent = RAGAgent (vector_store , openai_api_key = openai_key , use_cot = use_cot ,
157
+ collection = collection , skip_analysis = skip_analysis )
158
+ elif model_type == "Local (Mistral)" :
152
159
# For HF models, we need the token
153
160
if not hf_token :
154
161
response_text = "Local agent not available. Please check your HuggingFace token configuration."
155
162
print (f"Error: { response_text } " )
156
163
return history + [[message , response_text ]]
157
164
agent = LocalRAGAgent (vector_store , use_cot = use_cot , collection = collection ,
158
165
skip_analysis = skip_analysis , quantization = quantization )
159
- elif "Ollama" in model_type :
160
- # For Ollama models, use the extracted model_name directly
166
+ else : # Ollama models
161
167
try :
162
168
agent = LocalRAGAgent (vector_store , model_name = model_name , use_cot = use_cot ,
163
169
collection = collection , skip_analysis = skip_analysis )
164
170
except Exception as e :
165
- response_text = f"Error initializing Ollama model: { str (e )} . Falling back to Local Mistral."
166
- print (f"Error: { response_text } " )
167
- # Fall back to Mistral if Ollama fails
168
- if hf_token :
169
- agent = LocalRAGAgent (vector_store , use_cot = use_cot , collection = collection ,
170
- skip_analysis = skip_analysis )
171
- else :
172
- return history + [[message , "Local Mistral agent not available for fallback. Please check your HuggingFace token configuration." ]]
173
- else :
174
- if not openai_key :
175
- response_text = "OpenAI key not found. Please check your config."
171
+ response_text = f"Error initializing Ollama model: { str (e )} "
176
172
print (f"Error: { response_text } " )
177
173
return history + [[message , response_text ]]
178
- agent = RAGAgent (vector_store , openai_api_key = openai_key , use_cot = use_cot ,
179
- collection = collection , skip_analysis = skip_analysis )
180
174
181
175
# Process query and get response
182
176
print ("Processing query..." )
0 commit comments