@@ -98,6 +98,53 @@ def get_convo(self, index) -> str:
9898
9999
100100class DummyLM (LM ):
101+ """
102+ Dummy language model for unit testing purposes.
103+
104+ Three modes of operation:
105+
106+ ## 1. List of dictionaries
107+
108+ If a list of dictionaries is provided, the dummy model will return the next dictionary
109+ in the list for each request, formatted according to the `format_fields` function.
110+ from the chat adapter.
111+
112+ ```python
113+ lm = DummyLM([{"answer": "red"}, {"answer": "blue"}])
114+ dspy.settings.configure(lm=lm)
115+ predictor("What color is the sky?")
116+ # Output: "[[## answer ##]]\n red"
117+ predictor("What color is the sky?")
118+ # Output: "[[## answer ##]]\n blue"
119+ ```
120+
121+ ## 2. Dictionary of dictionaries
122+
123+ If a dictionary of dictionaries is provided, the dummy model will return the value
124+ corresponding to the key which is contained with the final message of the prompt,
125+ formatted according to the `format_fields` function from the chat adapter.
126+
127+ ```python
128+ lm = DummyLM({"What color is the sky?": {"answer": "blue"}})
129+ dspy.settings.configure(lm=lm)
130+ predictor("What color is the sky?")
131+ # Output: "[[## answer ##]]\n blue"
132+ ```
133+
134+ ## 3. Follow examples
135+
136+ If `follow_examples` is set to True, and the prompt contains an example input exactly equal to the prompt,
137+ the dummy model will return the output from that example.
138+
139+ ```python
140+ lm = DummyLM([{"answer": "red"}], follow_examples=True)
141+ dspy.settings.configure(lm=lm)
142+ predictor("What color is the sky?, demos=dspy.Example(input="What color is the sky?", output="blue"))
143+ # Output: "[[## answer ##]]\n blue"
144+ ```
145+
146+ """
147+
101148 def __init__ (self , answers : Union [list [dict [str , str ]], dict [str , dict [str , str ]]], follow_examples : bool = False ):
102149 super ().__init__ ("dummy" , "chat" , 0.0 , 1000 , True )
103150 self .answers = answers
0 commit comments