From f2283172ec06c5f0e3e7bc83d77176403326bd49 Mon Sep 17 00:00:00 2001 From: Reda MOUNTASSIR Date: Fri, 9 Jan 2026 16:33:51 +0100 Subject: [PATCH 1/2] fix: map MIPROv2 trial history to AcceptedIteration schema to prevent validation errors --- deepeval/optimizer/algorithms/miprov2/miprov2.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/deepeval/optimizer/algorithms/miprov2/miprov2.py b/deepeval/optimizer/algorithms/miprov2/miprov2.py index 3e13544d4..a9d8c13bb 100644 --- a/deepeval/optimizer/algorithms/miprov2/miprov2.py +++ b/deepeval/optimizer/algorithms/miprov2/miprov2.py @@ -52,6 +52,7 @@ from deepeval.errors import DeepEvalError from deepeval.optimizer.utils import Aggregator, mean_of_all from deepeval.optimizer.types import ( + AcceptedIteration, PromptConfiguration, PromptConfigurationId, ModuleId, @@ -725,11 +726,21 @@ def _build_result( prompt_config_snapshots = build_prompt_config_snapshots( self.prompt_configurations_by_id ) + + accepted_iterations = [ + AcceptedIteration( + parent=self._instruction_candidates[0].id, + child=best.id, + module=self.SINGLE_MODULE_ID, + before=0.0, + after=trial.get("score", 0.0), + ) for trial in self._trial_history + ] report = OptimizationReport( optimization_id=self.optimization_id, best_id=best.id, - accepted_iterations=self._trial_history, + accepted_iterations=accepted_iterations, pareto_scores=self.pareto_score_table, parents=self.parents_by_id, prompt_configurations=prompt_config_snapshots, From d1f285745272177dfe4ca5674459c50c31c7de8c Mon Sep 17 00:00:00 2001 From: Reda MOUNTASSIR Date: Fri, 9 Jan 2026 17:10:31 +0100 Subject: [PATCH 2/2] Added Formatting --- deepeval/optimizer/algorithms/miprov2/miprov2.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deepeval/optimizer/algorithms/miprov2/miprov2.py b/deepeval/optimizer/algorithms/miprov2/miprov2.py index a9d8c13bb..98a42b633 100644 --- a/deepeval/optimizer/algorithms/miprov2/miprov2.py +++ b/deepeval/optimizer/algorithms/miprov2/miprov2.py @@ -726,7 +726,7 @@ def _build_result( prompt_config_snapshots = build_prompt_config_snapshots( self.prompt_configurations_by_id ) - + accepted_iterations = [ AcceptedIteration( parent=self._instruction_candidates[0].id, @@ -734,7 +734,8 @@ def _build_result( module=self.SINGLE_MODULE_ID, before=0.0, after=trial.get("score", 0.0), - ) for trial in self._trial_history + ) + for trial in self._trial_history ] report = OptimizationReport(