Skip to content

Commit 8b943a7

Browse files
authored
fix: concurrent ResponseRelevancy (#2328)
## Issue Link / Problem Description <!-- Link to related issue or describe the problem this PR solves --> - Fixes #2324 ## Changes Made <!-- Describe what you changed and why --> - IndexError in generate_multiple when LLM returns fewer generations than requested
1 parent 80e77b6 commit 8b943a7

File tree

1 file changed

+22
-1
lines changed

1 file changed

+22
-1
lines changed

src/ragas/prompt/pydantic_prompt.py

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,28 @@ async def generate_multiple(
249249

250250
output_models = []
251251
parser = RagasOutputParser(pydantic_object=self.output_model)
252-
for i in range(n):
252+
253+
# Handle cases where LLM returns fewer generations than requested
254+
if is_langchain_llm(llm):
255+
available_generations = len(resp.generations)
256+
else:
257+
available_generations = len(resp.generations[0]) if resp.generations else 0
258+
259+
actual_n = min(n, available_generations)
260+
261+
if actual_n == 0:
262+
logger.error(
263+
f"LLM returned no generations when {n} were requested. Cannot proceed."
264+
)
265+
raise ValueError(f"LLM returned no generations when {n} were requested")
266+
267+
if actual_n < n:
268+
logger.warning(
269+
f"LLM returned {actual_n} generations instead of requested {n}. "
270+
f"Proceeding with {actual_n} generations."
271+
)
272+
273+
for i in range(actual_n):
253274
if is_langchain_llm(llm):
254275
# For LangChain LLMs, each generation is in a separate batch result
255276
output_string = resp.generations[i][0].text

0 commit comments

Comments
 (0)