@@ -56,7 +56,10 @@ class LLMConfig(LLMModelConfig):
5656 retry_delay : int = 5
5757
5858 # n-model configuration for evolution LLM ensemble
59- models : List [LLMModelConfig ] = field (default_factory = lambda : [LLMModelConfig ()])
59+ models : List [LLMModelConfig ] = field (default_factory = lambda : [
60+ LLMModelConfig (name = "gpt-4o-mini" , weight = 0.8 ),
61+ LLMModelConfig (name = "gpt-4o" , weight = 0.2 )
62+ ])
6063
6164 # n-model configuration for evaluator LLM ensemble
6265 evaluator_models : List [LLMModelConfig ] = field (default_factory = lambda : [])
@@ -195,7 +198,7 @@ class EvaluatorConfig:
195198 cascade_thresholds : List [float ] = field (default_factory = lambda : [0.5 , 0.75 , 0.9 ])
196199
197200 # Parallel evaluation
198- parallel_evaluations : int = 4
201+ parallel_evaluations : int = 1
199202 distributed : bool = False
200203
201204 # LLM-based feedback
@@ -217,6 +220,7 @@ class Config:
217220 log_level : str = "INFO"
218221 log_dir : Optional [str ] = None
219222 random_seed : Optional [int ] = 42
223+ language : str = None
220224
221225 # Component configurations
222226 llm : LLMConfig = field (default_factory = LLMConfig )
@@ -361,4 +365,4 @@ def load_config(config_path: Optional[Union[str, Path]] = None) -> Config:
361365 # Make the system message available to the individual models, in case it is not provided from the prompt sampler
362366 config .llm .update_model_params ({"system_message" : config .prompt .system_message })
363367
364- return config
368+ return config
0 commit comments