@@ -66,7 +66,7 @@ def _invoke_llm(
6666        self ,
6767        prompt = None ,
6868        instructions = None ,
69-         msg_history = None ,
69+         messages = None ,
7070        base_model = None ,
7171        * args ,
7272        ** kwargs ,
@@ -128,28 +128,26 @@ def _invoke_llm(
128128        }
129129
130130        try :
131-             if  msg_history :
132-                 key  =  (msg_history [0 ]["content" ], msg_history [1 ]["content" ])
133-                 print ("=========trying key" , key )
131+             if  messages :
132+                 key  =  (messages [0 ]["content" ], messages [1 ]["content" ])
134133                out_text  =  mock_llm_responses [key ]
135-                 print ("========found out text" , out_text )
136-             if  prompt  and  instructions  and  not  msg_history :
134+             if  prompt  and  instructions  and  not  messages :
137135                out_text  =  mock_llm_responses [(prompt , instructions )]
138-             elif  msg_history  and  not  prompt  and  not  instructions :
139-                 if  msg_history  ==  entity_extraction .COMPILED_MSG_HISTORY :
136+             elif  messages  and  not  prompt  and  not  instructions :
137+                 if  messages  ==  entity_extraction .COMPILED_MSG_HISTORY :
140138                    out_text  =  entity_extraction .LLM_OUTPUT 
141139                elif  (
142-                     msg_history  ==  string .MOVIE_MSG_HISTORY 
140+                     messages  ==  string .MOVIE_MSG_HISTORY 
143141                    and  base_model  ==  pydantic .WITH_MSG_HISTORY 
144142                ):
145143                    out_text  =  pydantic .MSG_HISTORY_LLM_OUTPUT_INCORRECT 
146-                 elif  msg_history  ==  string .MOVIE_MSG_HISTORY :
144+                 elif  messages  ==  string .MOVIE_MSG_HISTORY :
147145                    out_text  =  string .MSG_LLM_OUTPUT_INCORRECT 
148146                else :
149-                     raise  ValueError ("msg_history  not found" )
147+                     raise  ValueError ("messages  not found" )
150148            else :
151149                raise  ValueError (
152-                     "specify either prompt and instructions "  "or msg_history " 
150+                     "specify either prompt and instructions "  "or messages " 
153151                )
154152            return  LLMResponse (
155153                output = out_text ,
@@ -160,7 +158,7 @@ def _invoke_llm(
160158            print ("Unrecognized prompt!" )
161159            print ("\n  prompt: \n " , prompt )
162160            print ("\n  instructions: \n " , instructions )
163-             print ("\n  msg_history : \n " , msg_history )
161+             print ("\n  messages : \n " , messages )
164162            raise  ValueError ("Compiled prompt not found" )
165163
166164
0 commit comments