|
17 | 17 |
|
18 | 18 | class MockEvaluator: |
19 | 19 | """Mock evaluator for testing""" |
20 | | - |
| 20 | + |
21 | 21 | def __init__(self): |
22 | 22 | self.call_count = 0 |
23 | | - |
| 23 | + |
24 | 24 | async def evaluate_program(self, code, program_id): |
25 | 25 | """Mock evaluation that returns predictable metrics""" |
26 | 26 | self.call_count += 1 |
27 | 27 | # Return slightly different metrics each time to simulate real evaluation |
28 | 28 | return { |
29 | 29 | "score": 0.5 + (self.call_count * 0.1) % 0.5, |
30 | | - "combined_score": 0.6 + (self.call_count * 0.05) % 0.4 |
| 30 | + "combined_score": 0.6 + (self.call_count * 0.05) % 0.4, |
31 | 31 | } |
32 | 32 |
|
33 | 33 |
|
34 | 34 | class TestCheckpointResume(unittest.TestCase): |
35 | 35 | """Tests for checkpoint resume functionality""" |
36 | | - |
| 36 | + |
37 | 37 | def setUp(self): |
38 | 38 | """Set up test environment""" |
39 | 39 | self.test_dir = tempfile.mkdtemp() |
40 | | - |
| 40 | + |
41 | 41 | # Create a simple test program |
42 | | - self.test_program_content = '''# EVOLVE-BLOCK-START |
| 42 | + self.test_program_content = """# EVOLVE-BLOCK-START |
43 | 43 | def test_function(): |
44 | 44 | return "test" |
45 | 45 | # EVOLVE-BLOCK-END |
46 | 46 |
|
47 | 47 | def main(): |
48 | 48 | return test_function() |
49 | | -''' |
50 | | - |
| 49 | +""" |
| 50 | + |
51 | 51 | self.test_program_path = os.path.join(self.test_dir, "test_program.py") |
52 | 52 | with open(self.test_program_path, "w") as f: |
53 | 53 | f.write(self.test_program_content) |
54 | | - |
| 54 | + |
55 | 55 | # Create a simple evaluator file |
56 | | - self.evaluator_content = ''' |
| 56 | + self.evaluator_content = """ |
57 | 57 | def evaluate(program_path): |
58 | 58 | return {"score": 0.5, "combined_score": 0.6} |
59 | | -''' |
60 | | - |
| 59 | +""" |
| 60 | + |
61 | 61 | self.evaluator_path = os.path.join(self.test_dir, "evaluator.py") |
62 | 62 | with open(self.evaluator_path, "w") as f: |
63 | 63 | f.write(self.evaluator_content) |
64 | | - |
| 64 | + |
65 | 65 | # Create test config |
66 | 66 | self.config = Config() |
67 | 67 | self.config.max_iterations = 2 # Keep tests fast |
68 | 68 | self.config.checkpoint_interval = 1 |
69 | 69 | self.config.database.in_memory = True |
70 | | - |
| 70 | + |
71 | 71 | def tearDown(self): |
72 | 72 | """Clean up test environment""" |
73 | 73 | import shutil |
| 74 | + |
74 | 75 | shutil.rmtree(self.test_dir, ignore_errors=True) |
75 | 76 |
|
76 | 77 | def test_fresh_start_adds_initial_program(self): |
77 | 78 | """Test that initial program is added when starting fresh""" |
78 | | - |
| 79 | + |
79 | 80 | async def run_test(): |
80 | | - with patch('openevolve.controller.Evaluator') as mock_evaluator_class: |
| 81 | + with patch("openevolve.controller.Evaluator") as mock_evaluator_class: |
81 | 82 | mock_evaluator = MockEvaluator() |
82 | 83 | mock_evaluator_class.return_value = mock_evaluator |
83 | | - |
| 84 | + |
84 | 85 | controller = OpenEvolve( |
85 | 86 | initial_program_path=self.test_program_path, |
86 | 87 | evaluation_file=self.evaluator_path, |
87 | 88 | config=self.config, |
88 | | - output_dir=self.test_dir |
| 89 | + output_dir=self.test_dir, |
89 | 90 | ) |
90 | | - |
| 91 | + |
91 | 92 | # Verify database is empty initially |
92 | 93 | self.assertEqual(len(controller.database.programs), 0) |
93 | 94 | self.assertEqual(controller.database.last_iteration, 0) |
94 | | - |
| 95 | + |
95 | 96 | # Mock the LLM to avoid actual API calls |
96 | | - with patch.object(controller.llm_ensemble, 'generate_with_context') as mock_llm: |
| 97 | + with patch.object(controller.llm_ensemble, "generate_with_context") as mock_llm: |
97 | 98 | mock_llm.return_value = "No changes needed" |
98 | | - |
| 99 | + |
99 | 100 | # Run for 0 iterations (just initialization) |
100 | 101 | result = await controller.run(iterations=0) |
101 | | - |
| 102 | + |
102 | 103 | # Verify initial program was added |
103 | 104 | self.assertEqual(len(controller.database.programs), 1) |
104 | | - |
| 105 | + |
105 | 106 | # Verify the initial program has the correct content |
106 | 107 | programs = list(controller.database.programs.values()) |
107 | 108 | initial_program = programs[0] |
108 | 109 | self.assertEqual(initial_program.code, self.test_program_content) |
109 | 110 | self.assertEqual(initial_program.iteration_found, 0) |
110 | | - |
| 111 | + |
111 | 112 | # Verify evaluator was called exactly once for initial program |
112 | 113 | self.assertEqual(mock_evaluator.call_count, 1) |
113 | | - |
| 114 | + |
114 | 115 | # Run the async test |
115 | 116 | asyncio.run(run_test()) |
116 | 117 |
|
117 | 118 | def test_duplicate_content_prevention(self): |
118 | 119 | """Test that programs with identical content are not added multiple times""" |
119 | | - |
| 120 | + |
120 | 121 | async def run_test(): |
121 | | - with patch('openevolve.controller.Evaluator') as mock_evaluator_class: |
| 122 | + with patch("openevolve.controller.Evaluator") as mock_evaluator_class: |
122 | 123 | mock_evaluator = MockEvaluator() |
123 | 124 | mock_evaluator_class.return_value = mock_evaluator |
124 | | - |
| 125 | + |
125 | 126 | controller = OpenEvolve( |
126 | 127 | initial_program_path=self.test_program_path, |
127 | 128 | evaluation_file=self.evaluator_path, |
128 | 129 | config=self.config, |
129 | | - output_dir=self.test_dir |
| 130 | + output_dir=self.test_dir, |
130 | 131 | ) |
131 | | - |
| 132 | + |
132 | 133 | # Add a program with different ID but same content as initial program |
133 | 134 | existing_program = Program( |
134 | 135 | id="different_id", |
135 | 136 | code=self.test_program_content, # Same content as initial program |
136 | 137 | language="python", |
137 | 138 | metrics={"score": 0.7, "combined_score": 0.8}, |
138 | | - iteration_found=0 |
| 139 | + iteration_found=0, |
139 | 140 | ) |
140 | | - |
| 141 | + |
141 | 142 | controller.database.add(existing_program) |
142 | | - |
| 143 | + |
143 | 144 | # Mock the LLM to avoid actual API calls |
144 | | - with patch.object(controller.llm_ensemble, 'generate_with_context') as mock_llm: |
| 145 | + with patch.object(controller.llm_ensemble, "generate_with_context") as mock_llm: |
145 | 146 | mock_llm.return_value = "No changes needed" |
146 | | - |
| 147 | + |
147 | 148 | # Run for 0 iterations (just initialization) |
148 | 149 | result = await controller.run(iterations=0) |
149 | | - |
| 150 | + |
150 | 151 | # Verify no additional program was added (still only 1 program) |
151 | 152 | self.assertEqual(len(controller.database.programs), 1) |
152 | | - |
| 153 | + |
153 | 154 | # Verify the existing program is still there |
154 | 155 | self.assertIn("different_id", controller.database.programs) |
155 | | - |
| 156 | + |
156 | 157 | # Verify evaluator was not called for initial program |
157 | 158 | self.assertEqual(mock_evaluator.call_count, 0) |
158 | 159 |
|
159 | 160 | def test_checkpoint_resume_skips_initial_program(self): |
160 | 161 | """Test that initial program is not re-added when resuming from checkpoint""" |
161 | | - |
| 162 | + |
162 | 163 | async def run_test(): |
163 | | - with patch('openevolve.controller.Evaluator') as mock_evaluator_class: |
| 164 | + with patch("openevolve.controller.Evaluator") as mock_evaluator_class: |
164 | 165 | mock_evaluator = MockEvaluator() |
165 | 166 | mock_evaluator_class.return_value = mock_evaluator |
166 | | - |
| 167 | + |
167 | 168 | controller = OpenEvolve( |
168 | 169 | initial_program_path=self.test_program_path, |
169 | 170 | evaluation_file=self.evaluator_path, |
170 | 171 | config=self.config, |
171 | | - output_dir=self.test_dir |
| 172 | + output_dir=self.test_dir, |
172 | 173 | ) |
173 | | - |
| 174 | + |
174 | 175 | # Simulate existing database state (as if loaded from checkpoint) |
175 | 176 | existing_program = Program( |
176 | 177 | id="existing_program_id", |
177 | 178 | code=self.test_program_content, # Same content as initial program |
178 | 179 | language="python", |
179 | 180 | metrics={"score": 0.7, "combined_score": 0.8}, |
180 | | - iteration_found=5 |
| 181 | + iteration_found=5, |
181 | 182 | ) |
182 | | - |
| 183 | + |
183 | 184 | controller.database.add(existing_program) |
184 | 185 | controller.database.last_iteration = 10 # Simulate resuming from iteration 10 |
185 | | - |
| 186 | + |
186 | 187 | # Verify database has the existing program |
187 | 188 | self.assertEqual(len(controller.database.programs), 1) |
188 | 189 | self.assertEqual(controller.database.last_iteration, 10) |
189 | | - |
| 190 | + |
190 | 191 | # Mock the LLM to avoid actual API calls |
191 | | - with patch.object(controller.llm_ensemble, 'generate_with_context') as mock_llm: |
| 192 | + with patch.object(controller.llm_ensemble, "generate_with_context") as mock_llm: |
192 | 193 | mock_llm.return_value = "No changes needed" |
193 | | - |
| 194 | + |
194 | 195 | # Run for 0 iterations (just initialization) |
195 | 196 | result = await controller.run(iterations=0) |
196 | | - |
| 197 | + |
197 | 198 | # Verify no additional program was added (still only 1 program) |
198 | 199 | self.assertEqual(len(controller.database.programs), 1) |
199 | | - |
| 200 | + |
200 | 201 | # Verify the existing program is still there with original ID |
201 | 202 | self.assertIn("existing_program_id", controller.database.programs) |
202 | | - |
| 203 | + |
203 | 204 | # Verify evaluator was not called for initial program (count should be 0) |
204 | 205 | self.assertEqual(mock_evaluator.call_count, 0) |
205 | | - |
| 206 | + |
206 | 207 | # Run the async test |
207 | 208 | asyncio.run(run_test()) |
208 | 209 |
|
209 | 210 | def test_non_empty_database_at_iteration_zero(self): |
210 | 211 | """Test that initial program is not added when database already has programs at iteration 0""" |
211 | | - |
| 212 | + |
212 | 213 | async def run_test(): |
213 | | - with patch('openevolve.controller.Evaluator') as mock_evaluator_class: |
| 214 | + with patch("openevolve.controller.Evaluator") as mock_evaluator_class: |
214 | 215 | mock_evaluator = MockEvaluator() |
215 | 216 | mock_evaluator_class.return_value = mock_evaluator |
216 | | - |
| 217 | + |
217 | 218 | controller = OpenEvolve( |
218 | 219 | initial_program_path=self.test_program_path, |
219 | 220 | evaluation_file=self.evaluator_path, |
220 | 221 | config=self.config, |
221 | | - output_dir=self.test_dir |
| 222 | + output_dir=self.test_dir, |
222 | 223 | ) |
223 | | - |
| 224 | + |
224 | 225 | # Add a program with different content to simulate pre-populated database |
225 | 226 | different_program = Program( |
226 | 227 | id="different_id", |
227 | 228 | code="def different_function(): pass", # Different content |
228 | 229 | language="python", |
229 | 230 | metrics={"score": 0.6, "combined_score": 0.7}, |
230 | | - iteration_found=0 |
| 231 | + iteration_found=0, |
231 | 232 | ) |
232 | | - |
| 233 | + |
233 | 234 | controller.database.add(different_program) |
234 | 235 | # Keep last_iteration at 0 to simulate fresh start with pre-populated DB |
235 | | - |
| 236 | + |
236 | 237 | # Verify database has the different program |
237 | 238 | self.assertEqual(len(controller.database.programs), 1) |
238 | 239 | self.assertEqual(controller.database.last_iteration, 0) |
239 | | - |
| 240 | + |
240 | 241 | # Mock the LLM to avoid actual API calls |
241 | | - with patch.object(controller.llm_ensemble, 'generate_with_context') as mock_llm: |
| 242 | + with patch.object(controller.llm_ensemble, "generate_with_context") as mock_llm: |
242 | 243 | mock_llm.return_value = "No changes needed" |
243 | | - |
| 244 | + |
244 | 245 | # Run for 0 iterations (just initialization) |
245 | 246 | result = await controller.run(iterations=0) |
246 | | - |
| 247 | + |
247 | 248 | # Verify no additional program was added (still only 1 program) |
248 | 249 | self.assertEqual(len(controller.database.programs), 1) |
249 | | - |
| 250 | + |
250 | 251 | # Verify the existing program is still there |
251 | 252 | self.assertIn("different_id", controller.database.programs) |
252 | | - |
| 253 | + |
253 | 254 | # Verify evaluator was not called for initial program |
254 | 255 | self.assertEqual(mock_evaluator.call_count, 0) |
255 | | - |
| 256 | + |
256 | 257 | # Run the async test |
257 | 258 | asyncio.run(run_test()) |
258 | 259 |
|
259 | 260 | def test_multiple_run_calls_no_pollution(self): |
260 | 261 | """Test that calling run() multiple times doesn't pollute the database""" |
261 | | - |
| 262 | + |
262 | 263 | async def run_test(): |
263 | | - with patch('openevolve.controller.Evaluator') as mock_evaluator_class: |
| 264 | + with patch("openevolve.controller.Evaluator") as mock_evaluator_class: |
264 | 265 | mock_evaluator = MockEvaluator() |
265 | 266 | mock_evaluator_class.return_value = mock_evaluator |
266 | | - |
| 267 | + |
267 | 268 | controller = OpenEvolve( |
268 | 269 | initial_program_path=self.test_program_path, |
269 | 270 | evaluation_file=self.evaluator_path, |
270 | 271 | config=self.config, |
271 | | - output_dir=self.test_dir |
| 272 | + output_dir=self.test_dir, |
272 | 273 | ) |
273 | | - |
| 274 | + |
274 | 275 | # Mock the LLM to avoid actual API calls |
275 | | - with patch.object(controller.llm_ensemble, 'generate_with_context') as mock_llm: |
| 276 | + with patch.object(controller.llm_ensemble, "generate_with_context") as mock_llm: |
276 | 277 | mock_llm.return_value = "No changes needed" |
277 | | - |
| 278 | + |
278 | 279 | # Run first time |
279 | 280 | result1 = await controller.run(iterations=0) |
280 | 281 | initial_count = len(controller.database.programs) |
281 | 282 | evaluator_calls_after_first = mock_evaluator.call_count |
282 | | - |
| 283 | + |
283 | 284 | # Run second time (simulating resume or restart) |
284 | 285 | result2 = await controller.run(iterations=0) |
285 | | - |
| 286 | + |
286 | 287 | # Run third time |
287 | 288 | result3 = await controller.run(iterations=0) |
288 | | - |
| 289 | + |
289 | 290 | # Verify database size didn't grow |
290 | 291 | self.assertEqual(len(controller.database.programs), initial_count) |
291 | | - |
| 292 | + |
292 | 293 | # Verify evaluator was only called once (for the initial program in first run) |
293 | 294 | self.assertEqual(mock_evaluator.call_count, evaluator_calls_after_first) |
294 | | - |
| 295 | + |
295 | 296 | # Run the async test |
296 | 297 | asyncio.run(run_test()) |
297 | 298 |
|
|
0 commit comments