99
1010from openevolve import run_evolution , evolve_function , evolve_code
1111from openevolve .config import Config , LLMModelConfig
12- from openevolve .controller import OpenEvolve
13- import sys
14- import os
15- sys .path .insert (0 , os .path .join (os .path .dirname (__file__ ), '..' ))
16- from test_utils import get_evolution_test_program , get_evolution_test_evaluator
17-
18-
19- def get_mock_config () -> Config :
20- """Get config with mock/fast settings for smoke tests"""
21- config = Config ()
22- config .max_iterations = 1
23- config .checkpoint_interval = 50
24- config .database .in_memory = True
25- config .evaluator .cascade_evaluation = False
26- config .evaluator .parallel_evaluations = 1
27- config .evaluator .timeout = 5 # Very short timeout
28-
29- # Use empty models list - will trigger validation but won't try to make LLM calls
30- config .llm .timeout = 5
31- config .llm .retries = 0
32- config .llm .models = []
33-
34- return config
3512
3613
3714class TestSmoke :
3815 """Fast smoke tests for CI"""
3916
40- def test_controller_initialization (self , test_program_file , test_evaluator_file ):
41- """Test that OpenEvolve controller can be initialized"""
42- config = get_mock_config ()
43-
44- controller = OpenEvolve (
45- initial_program_path = str (test_program_file ),
46- evaluation_file = str (test_evaluator_file ),
47- config = config ,
48- output_dir = tempfile .mkdtemp ()
49- )
50-
51- # Test basic initialization
52- assert controller is not None
53- assert controller .database is not None
54- assert controller .evaluator is not None
55- assert len (controller .database .programs ) == 1 # Initial program loaded
56-
57- def test_database_operations (self , test_program_file , test_evaluator_file ):
58- """Test database operations work correctly"""
59- config = get_mock_config ()
60-
61- controller = OpenEvolve (
62- initial_program_path = str (test_program_file ),
63- evaluation_file = str (test_evaluator_file ),
64- config = config ,
65- output_dir = tempfile .mkdtemp ()
66- )
67-
68- # Test database functionality
69- initial_count = len (controller .database .programs )
70- assert initial_count == 1
71-
72- # Test program retrieval
73- program_ids = list (controller .database .programs .keys ())
74- assert len (program_ids ) == 1
75-
76- first_program = controller .database .get (program_ids [0 ])
77- assert first_program is not None
78- assert hasattr (first_program , 'code' )
79- assert hasattr (first_program , 'metrics' )
80-
81- def test_evaluator_works (self , test_program_file , test_evaluator_file ):
82- """Test that evaluator can evaluate the initial program"""
83- config = get_mock_config ()
84-
85- controller = OpenEvolve (
86- initial_program_path = str (test_program_file ),
87- evaluation_file = str (test_evaluator_file ),
88- config = config ,
89- output_dir = tempfile .mkdtemp ()
90- )
91-
92- # The initial program should have been evaluated during initialization
93- programs = list (controller .database .programs .values ())
94- initial_program = programs [0 ]
95-
96- assert initial_program .metrics is not None
97- assert 'score' in initial_program .metrics
98- assert 'combined_score' in initial_program .metrics
99-
10017 def test_library_api_validation (self ):
10118 """Test library API gives proper error messages when not configured"""
10219 with tempfile .NamedTemporaryFile (mode = 'w' , suffix = '.py' , delete = False ) as f :
103- f .write (get_evolution_test_program ())
20+ f .write ("""
21+ # EVOLVE-BLOCK-START
22+ def solve(x):
23+ return x * 2
24+ # EVOLVE-BLOCK-END
25+ """ )
10426 program_file = f .name
10527
10628 def simple_evaluator (path ):
@@ -132,20 +54,42 @@ def test_config_validation(self):
13254 assert config .database .in_memory is True
13355 assert config .llm .retries >= 0
13456
57+ def test_llm_config_creation (self ):
58+ """Test that LLM configuration can be created properly"""
59+ config = Config ()
60+
61+ # Test adding a model configuration
62+ config .llm .models = [
63+ LLMModelConfig (
64+ name = "test-model" ,
65+ api_key = "test-key" ,
66+ api_base = "http://localhost:8000/v1" ,
67+ weight = 1.0 ,
68+ timeout = 60 ,
69+ retries = 0
70+ )
71+ ]
72+
73+ assert len (config .llm .models ) == 1
74+ assert config .llm .models [0 ].name == "test-model"
75+ assert config .llm .models [0 ].retries == 0
13576
136- @pytest .fixture
137- def test_program_file ():
138- """Create a temporary test program file"""
139- with tempfile .NamedTemporaryFile (mode = 'w' , suffix = '.py' , delete = False ) as f :
140- f .write (get_evolution_test_program ())
141- yield Path (f .name )
142- Path (f .name ).unlink ()
143-
144-
145- @pytest .fixture
146- def test_evaluator_file ():
147- """Create a temporary test evaluator file"""
148- with tempfile .NamedTemporaryFile (mode = 'w' , suffix = '.py' , delete = False ) as f :
149- f .write (get_evolution_test_evaluator ())
150- yield Path (f .name )
151- Path (f .name ).unlink ()
77+ def test_evolution_result_structure (self ):
78+ """Test that EvolutionResult has the expected structure"""
79+ from openevolve .api import EvolutionResult
80+ from openevolve .database import Program
81+
82+ # Test creating an EvolutionResult
83+ result = EvolutionResult (
84+ best_program = None ,
85+ best_score = 0.85 ,
86+ best_code = "def test(): pass" ,
87+ metrics = {"accuracy" : 0.85 , "speed" : 100 },
88+ output_dir = "/tmp/test"
89+ )
90+
91+ assert result .best_score == 0.85
92+ assert result .best_code == "def test(): pass"
93+ assert result .metrics ["accuracy" ] == 0.85
94+ assert result .output_dir == "/tmp/test"
95+ assert "0.8500" in str (result ) # Test __repr__
0 commit comments