Skip to content

Commit 5f73d06

Browse files
hwchase17thepok
andauthored
Harrison/fix caching bug (#788)
Co-authored-by: thepok <[email protected]>
1 parent 248c297 commit 5f73d06

File tree

1 file changed

+18
-14
lines changed

1 file changed

+18
-14
lines changed

langchain/llms/base.py

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -92,21 +92,25 @@ def generate(
9292
else:
9393
missing_prompts.append(prompt)
9494
missing_prompt_idxs.append(i)
95-
self.callback_manager.on_llm_start(
96-
{"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose
97-
)
98-
try:
99-
new_results = self._generate(missing_prompts, stop=stop)
100-
except (KeyboardInterrupt, Exception) as e:
101-
self.callback_manager.on_llm_error(e, verbose=self.verbose)
102-
raise e
103-
self.callback_manager.on_llm_end(new_results, verbose=self.verbose)
104-
for i, result in enumerate(new_results.generations):
105-
existing_prompts[missing_prompt_idxs[i]] = result
106-
prompt = prompts[missing_prompt_idxs[i]]
107-
langchain.llm_cache.update(prompt, llm_string, result)
95+
if len(missing_prompts) > 0:
96+
self.callback_manager.on_llm_start(
97+
{"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose
98+
)
99+
try:
100+
new_results = self._generate(missing_prompts, stop=stop)
101+
except (KeyboardInterrupt, Exception) as e:
102+
self.callback_manager.on_llm_error(e, verbose=self.verbose)
103+
raise e
104+
self.callback_manager.on_llm_end(new_results, verbose=self.verbose)
105+
for i, result in enumerate(new_results.generations):
106+
existing_prompts[missing_prompt_idxs[i]] = result
107+
prompt = prompts[missing_prompt_idxs[i]]
108+
langchain.llm_cache.update(prompt, llm_string, result)
109+
llm_output = new_results.llm_output
110+
else:
111+
llm_output = {}
108112
generations = [existing_prompts[i] for i in range(len(prompts))]
109-
return LLMResult(generations=generations, llm_output=new_results.llm_output)
113+
return LLMResult(generations=generations, llm_output=llm_output)
110114

111115
def get_num_tokens(self, text: str) -> int:
112116
"""Get the number of tokens present in the text."""

0 commit comments

Comments
 (0)