@@ -25,6 +25,30 @@ def configure_lm():
2525 temperature = float (os .getenv ("TEMPERATURE" , "0.2" ))
2626 max_tokens = int (os .getenv ("MAX_TOKENS" , "1024" ))
2727
28+ # If explicitly requested mock provider, short‑circuit to a local LM.
29+ if provider == "mock" :
30+ class _MockLM :
31+ model = "mock/local"
32+ def __call__ (self , * , prompt : str , ** kwargs ):
33+ import re , json as _json
34+ qmatch = re .search (r"Question:\s*(.*)" , prompt , re .S )
35+ question = qmatch .group (1 ).strip () if qmatch else prompt
36+ ql = question .lower ()
37+ # heuristic: suggest calculator/now/final
38+ if re .search (r"[0-9].*[+\-*/]" , question ) or any (w in ql for w in [
39+ "add" ,"sum" ,"multiply" ,"divide" ,"compute" ,"calculate" ,"total" ,"power" ,"factorial" ,"!" ,"**" ,"^"
40+ ]):
41+ # crude expression extraction
42+ cands = re .findall (r"[0-9\+\-\*/%\(\)\.!\^\s]+" , question )
43+ cands = [c .strip () for c in cands if c .strip ()]
44+ expr = max (cands , key = len ) if cands else "2+2"
45+ return _json .dumps ({"tool" : {"name" : "calculator" , "args" : {"expression" : expr }}})
46+ if any (w in ql for w in ["time" ,"date" ,"utc" ,"current time" ,"now" ]):
47+ return _json .dumps ({"tool" : {"name" : "now" , "args" : {"timezone" : "utc" }}})
48+ return _json .dumps ({"final" : {"answer" : "ok" }})
49+ dspy .settings .configure (lm = _MockLM (), track_usage = True )
50+ return
51+
2852 tried = []
2953
3054 # Helper to try multiple backends safely
@@ -56,32 +80,10 @@ def _try(name, fn):
5680 if _try ("dspy.LM(openai/<model>)" , lambda : dspy .LM (f"openai/{ openai_model } " )):
5781 return
5882
59- # Option 3: Mock LM (tests/CI)
60- class _MockLM :
83+ # If we got here, all backends failed: fall back to mock
84+ class _FallbackMockLM :
6185 model = "mock/local"
6286 def __call__ (self , * , prompt : str , ** kwargs ):
63- # Very small heuristic: if math-like, suggest calculator; if time-like, suggest now; else finalize.
64- import re , json as _json
65- qmatch = re .search (r"Question:\s*(.*)" , prompt , re .S )
66- question = qmatch .group (1 ).strip () if qmatch else prompt
67- ql = question .lower ()
68- if re .search (r"[0-9].*[+\-*/]" , question ) or any (w in ql for w in ["add" ,"sum" ,"multiply" ,"divide" ,"compute" ,"calculate" ,"total" ,"power" ,"factorial" ,"!" ,"**" ,"^" ]):
69- expr = None
70- # Try to capture an expression inside the question
71- cands = re .findall (r"[0-9\+\-\*/%\(\)\.!\^\s]+" , question )
72- cands = [c .strip () for c in cands if c .strip ()]
73- expr = max (cands , key = len ) if cands else ""
74- if expr :
75- return _json .dumps ({"tool" : {"name" : "calculator" , "args" : {"expression" : expr }}})
76- if any (w in ql for w in ["time" ,"date" ,"utc" ,"current time" ,"now" ]):
77- return _json .dumps ({"tool" : {"name" : "now" , "args" : {"timezone" : "utc" }}})
78- return _json .dumps ({"final" : {"answer" : "ok" }})
79-
80- # Allow explicit mock via env
81- if provider == "mock" :
82- dspy .settings .configure (lm = _MockLM (), track_usage = True )
83- return
84-
85- # If we got here, all backends failed: use mock and include details in a warning
86- dspy .settings .configure (lm = _MockLM (), track_usage = True )
87+ return "{\" final\" :{\" answer\" :\" ok\" }}"
88+ dspy .settings .configure (lm = _FallbackMockLM (), track_usage = True )
8789 return
0 commit comments