File tree Expand file tree Collapse file tree 2 files changed +9
-8
lines changed
examples/function_minimization Expand file tree Collapse file tree 2 files changed +9
-8
lines changed Original file line number Diff line number Diff line change @@ -6,14 +6,15 @@ checkpoint_interval: 5
66llm :
77 primary_model : " gemini-2.5-flash-lite"
88 # primary_model: "llama3.1-8b"
9- primary_model_weight : 0.9
9+ primary_model_weight : 0.8
1010 secondary_model : " gemini-2.5-flash"
1111 # secondary_model: "llama-4-scout-17b-16e-instruct"
12- secondary_model_weight : 0.1
12+ secondary_model_weight : 0.2
1313 api_base : " https://generativelanguage.googleapis.com/v1beta/openai/"
1414 # api_base: "https://api.cerebras.ai/v1"
15- temperature : 0.4
16- max_tokens : 4000
15+ temperature : 0.6
16+ max_tokens : 10000
17+ timeout : 120
1718
1819# Prompt configuration
1920prompt :
@@ -30,9 +31,9 @@ database:
3031# Evaluator configuration
3132evaluator :
3233 timeout : 60
33- cascade_thresholds : [0.5, 0.75 ]
34+ cascade_thresholds : [1.45 ]
3435 parallel_evaluations : 3
3536
3637# Evolution settings
37- diff_based_evolution : false
38+ diff_based_evolution : true
3839max_code_length : 20000
Original file line number Diff line number Diff line change @@ -80,10 +80,10 @@ def __post_init__(self):
8080
8181 if self .secondary_model :
8282 # Create secondary model (only if weight > 0)
83- if not self .secondary_model_weight or self .secondary_model_weight > 0 :
83+ if self .secondary_model_weight is None or self .secondary_model_weight > 0 :
8484 secondary_model = LLMModelConfig (
8585 name = self .secondary_model ,
86- weight = self .secondary_model_weight or 0.2
86+ weight = self .secondary_model_weight if self . secondary_model_weight is not None else 0.2
8787 )
8888 self .models .append (secondary_model )
8989
You can’t perform that action at this time.
0 commit comments