Skip to content

Commit 842b7a8

Browse files
authored
Strip markdown fences when generating evals datasets (#3141)
1 parent 6635272 commit 842b7a8

File tree

1 file changed

+3
-1
lines changed

1 file changed

+3
-1
lines changed

pydantic_evals/pydantic_evals/generation.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from typing_extensions import TypeVar
1515

1616
from pydantic_ai import Agent, models
17+
from pydantic_ai._utils import strip_markdown_fences
1718
from pydantic_evals import Dataset
1819
from pydantic_evals.evaluators.evaluator import Evaluator
1920

@@ -73,8 +74,9 @@ async def generate_dataset(
7374
)
7475

7576
result = await agent.run(extra_instructions or 'Please generate the object.')
77+
output = strip_markdown_fences(result.output)
7678
try:
77-
result = dataset_type.from_text(result.output, fmt='json', custom_evaluator_types=custom_evaluator_types)
79+
result = dataset_type.from_text(output, fmt='json', custom_evaluator_types=custom_evaluator_types)
7880
except ValidationError as e: # pragma: no cover
7981
print(f'Raw response from model:\n{result.output}')
8082
raise e

0 commit comments

Comments
 (0)