@@ -197,71 +197,24 @@ def get_config() -> AppConfig:
197197def configure_lm () -> AppConfig :
198198 """Configure the language model per dspy.ai and return config.
199199
200- Priority:
201- 1) Use dspy.LM with the `model` string (preferred per https://dspy.ai/)
202- 2) If dspy.LM is unavailable or fails and provider is OpenAI, fall back to dsp.GPT3
203- 3) If provider is Ollama and dspy.LM fails, fall back to a minimal Ollama adapter
200+ Uses dspy.LM(model=...) and sets it via dspy.configure. If configuration fails,
201+ we log and continue so non-LLM commands still work.
204202 """
205203 config = get_config ()
206204
207- model = config .lm .model
208- temperature = config .lm .temperature
209- max_tokens = config .lm .max_tokens
210-
211- # 1) Preferred: use modern dspy.LM API (as in dspy.ai)
212205 try :
213206 lm = dspy .LM (
214- model = model ,
215- temperature = temperature ,
216- max_tokens = max_tokens ,
207+ model = config . lm . model ,
208+ temperature = config . lm . temperature ,
209+ max_tokens = config . lm . max_tokens ,
217210 )
218211 dspy .configure (lm = lm )
219- logger .info (f"Configured LM via dspy.LM: { model } " )
212+ logger .info (f"Configured LM via dspy.LM: { config . lm . model } " )
220213 return config
221214 except Exception as e :
222- logger .warning (f"dspy.LM unavailable or failed: { e } " )
223-
224- # 2) OpenAI fallback via dsp.GPT3 (legacy installs)
225- try :
226- if model .startswith ("openai/" ):
227- import importlib
228-
229- dsp = importlib .import_module ("dsp" )
230- mname = model .replace ("openai/" , "" )
231- lm_impl = dsp .GPT3 (
232- model = mname ,
233- api_key = config .lm .api_key or os .getenv ("OPENAI_API_KEY" ),
234- api_base = os .getenv ("OPENAI_BASE_URL" ),
235- temperature = temperature ,
236- max_tokens = max_tokens ,
237- )
238- dspy .configure (lm = lm_impl )
239- logger .info (f"Configured LM via dsp.GPT3: { model } " )
240- return config
241- except Exception as e :
242- logger .warning (f"dsp.GPT3 fallback unavailable: { e } " )
243-
244- # 3) Ollama fallback via minimal adapter
245- try :
246- if model .startswith ("ollama_chat/" ) or model .startswith ("ollama/" ):
247- from .lm_providers import OllamaLM
248-
249- mname = model .replace ("ollama_chat/" , "" ).replace ("ollama/" , "" )
250- lm_impl = OllamaLM (
251- model = mname ,
252- base_url = config .lm .api_base or os .getenv ("OLLAMA_API_BASE" , "http://localhost:11434" ),
253- temperature = temperature ,
254- max_tokens = max_tokens ,
255- )
256- dspy .configure (lm = lm_impl )
257- logger .info (f"Configured LM via Ollama adapter: { model } " )
258- return config
259- except Exception as e :
260- logger .warning (f"Ollama adapter fallback unavailable: { e } " )
261-
262- # Final: no LM configured, allow non-LLM commands to work
263- logger .error ("No usable LM configuration found. Proceeding without LM." )
264- return config
215+ logger .error (f"Failed to configure LM via dspy.LM: { e } " )
216+ logger .info ("Continuing without LM configuration for non-LLM commands" )
217+ return config
265218
266219
267220def reload_config ():
0 commit comments