@@ -92,21 +92,25 @@ def generate(
92
92
else :
93
93
missing_prompts .append (prompt )
94
94
missing_prompt_idxs .append (i )
95
- self .callback_manager .on_llm_start (
96
- {"name" : self .__class__ .__name__ }, missing_prompts , verbose = self .verbose
97
- )
98
- try :
99
- new_results = self ._generate (missing_prompts , stop = stop )
100
- except (KeyboardInterrupt , Exception ) as e :
101
- self .callback_manager .on_llm_error (e , verbose = self .verbose )
102
- raise e
103
- self .callback_manager .on_llm_end (new_results , verbose = self .verbose )
104
- for i , result in enumerate (new_results .generations ):
105
- existing_prompts [missing_prompt_idxs [i ]] = result
106
- prompt = prompts [missing_prompt_idxs [i ]]
107
- langchain .llm_cache .update (prompt , llm_string , result )
95
+ if len (missing_prompts ) > 0 :
96
+ self .callback_manager .on_llm_start (
97
+ {"name" : self .__class__ .__name__ }, missing_prompts , verbose = self .verbose
98
+ )
99
+ try :
100
+ new_results = self ._generate (missing_prompts , stop = stop )
101
+ except (KeyboardInterrupt , Exception ) as e :
102
+ self .callback_manager .on_llm_error (e , verbose = self .verbose )
103
+ raise e
104
+ self .callback_manager .on_llm_end (new_results , verbose = self .verbose )
105
+ for i , result in enumerate (new_results .generations ):
106
+ existing_prompts [missing_prompt_idxs [i ]] = result
107
+ prompt = prompts [missing_prompt_idxs [i ]]
108
+ langchain .llm_cache .update (prompt , llm_string , result )
109
+ llm_output = new_results .llm_output
110
+ else :
111
+ llm_output = {}
108
112
generations = [existing_prompts [i ] for i in range (len (prompts ))]
109
- return LLMResult (generations = generations , llm_output = new_results . llm_output )
113
+ return LLMResult (generations = generations , llm_output = llm_output )
110
114
111
115
def get_num_tokens (self , text : str ) -> int :
112
116
"""Get the number of tokens present in the text."""
0 commit comments