Skip to content

Commit 7bead73

Browse files
committed
fix linter
1 parent b3918e0 commit 7bead73

File tree

2 files changed

+90
-85
lines changed

2 files changed

+90
-85
lines changed

openevolve/controller.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -142,9 +142,11 @@ async def run(
142142
# Only add initial program if starting fresh (not resuming from checkpoint)
143143
# Check if we're resuming AND no program matches initial code to avoid pollution
144144
should_add_initial = (
145-
start_iteration == 0 and
146-
len(self.database.programs) == 0 and
147-
not any(p.code == self.initial_program_code for p in self.database.programs.values())
145+
start_iteration == 0
146+
and len(self.database.programs) == 0
147+
and not any(
148+
p.code == self.initial_program_code for p in self.database.programs.values()
149+
)
148150
)
149151

150152
if should_add_initial:
@@ -166,7 +168,9 @@ async def run(
166168

167169
self.database.add(initial_program)
168170
else:
169-
logger.info(f"Skipping initial program addition (resuming from iteration {start_iteration} with {len(self.database.programs)} existing programs)")
171+
logger.info(
172+
f"Skipping initial program addition (resuming from iteration {start_iteration} with {len(self.database.programs)} existing programs)"
173+
)
170174

171175
# Main evolution loop
172176
total_iterations = start_iteration + max_iterations

tests/test_checkpoint_resume.py

Lines changed: 82 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -17,281 +17,282 @@
1717

1818
class MockEvaluator:
1919
"""Mock evaluator for testing"""
20-
20+
2121
def __init__(self):
2222
self.call_count = 0
23-
23+
2424
async def evaluate_program(self, code, program_id):
2525
"""Mock evaluation that returns predictable metrics"""
2626
self.call_count += 1
2727
# Return slightly different metrics each time to simulate real evaluation
2828
return {
2929
"score": 0.5 + (self.call_count * 0.1) % 0.5,
30-
"combined_score": 0.6 + (self.call_count * 0.05) % 0.4
30+
"combined_score": 0.6 + (self.call_count * 0.05) % 0.4,
3131
}
3232

3333

3434
class TestCheckpointResume(unittest.TestCase):
3535
"""Tests for checkpoint resume functionality"""
36-
36+
3737
def setUp(self):
3838
"""Set up test environment"""
3939
self.test_dir = tempfile.mkdtemp()
40-
40+
4141
# Create a simple test program
42-
self.test_program_content = '''# EVOLVE-BLOCK-START
42+
self.test_program_content = """# EVOLVE-BLOCK-START
4343
def test_function():
4444
return "test"
4545
# EVOLVE-BLOCK-END
4646
4747
def main():
4848
return test_function()
49-
'''
50-
49+
"""
50+
5151
self.test_program_path = os.path.join(self.test_dir, "test_program.py")
5252
with open(self.test_program_path, "w") as f:
5353
f.write(self.test_program_content)
54-
54+
5555
# Create a simple evaluator file
56-
self.evaluator_content = '''
56+
self.evaluator_content = """
5757
def evaluate(program_path):
5858
return {"score": 0.5, "combined_score": 0.6}
59-
'''
60-
59+
"""
60+
6161
self.evaluator_path = os.path.join(self.test_dir, "evaluator.py")
6262
with open(self.evaluator_path, "w") as f:
6363
f.write(self.evaluator_content)
64-
64+
6565
# Create test config
6666
self.config = Config()
6767
self.config.max_iterations = 2 # Keep tests fast
6868
self.config.checkpoint_interval = 1
6969
self.config.database.in_memory = True
70-
70+
7171
def tearDown(self):
7272
"""Clean up test environment"""
7373
import shutil
74+
7475
shutil.rmtree(self.test_dir, ignore_errors=True)
7576

7677
def test_fresh_start_adds_initial_program(self):
7778
"""Test that initial program is added when starting fresh"""
78-
79+
7980
async def run_test():
80-
with patch('openevolve.controller.Evaluator') as mock_evaluator_class:
81+
with patch("openevolve.controller.Evaluator") as mock_evaluator_class:
8182
mock_evaluator = MockEvaluator()
8283
mock_evaluator_class.return_value = mock_evaluator
83-
84+
8485
controller = OpenEvolve(
8586
initial_program_path=self.test_program_path,
8687
evaluation_file=self.evaluator_path,
8788
config=self.config,
88-
output_dir=self.test_dir
89+
output_dir=self.test_dir,
8990
)
90-
91+
9192
# Verify database is empty initially
9293
self.assertEqual(len(controller.database.programs), 0)
9394
self.assertEqual(controller.database.last_iteration, 0)
94-
95+
9596
# Mock the LLM to avoid actual API calls
96-
with patch.object(controller.llm_ensemble, 'generate_with_context') as mock_llm:
97+
with patch.object(controller.llm_ensemble, "generate_with_context") as mock_llm:
9798
mock_llm.return_value = "No changes needed"
98-
99+
99100
# Run for 0 iterations (just initialization)
100101
result = await controller.run(iterations=0)
101-
102+
102103
# Verify initial program was added
103104
self.assertEqual(len(controller.database.programs), 1)
104-
105+
105106
# Verify the initial program has the correct content
106107
programs = list(controller.database.programs.values())
107108
initial_program = programs[0]
108109
self.assertEqual(initial_program.code, self.test_program_content)
109110
self.assertEqual(initial_program.iteration_found, 0)
110-
111+
111112
# Verify evaluator was called exactly once for initial program
112113
self.assertEqual(mock_evaluator.call_count, 1)
113-
114+
114115
# Run the async test
115116
asyncio.run(run_test())
116117

117118
def test_duplicate_content_prevention(self):
118119
"""Test that programs with identical content are not added multiple times"""
119-
120+
120121
async def run_test():
121-
with patch('openevolve.controller.Evaluator') as mock_evaluator_class:
122+
with patch("openevolve.controller.Evaluator") as mock_evaluator_class:
122123
mock_evaluator = MockEvaluator()
123124
mock_evaluator_class.return_value = mock_evaluator
124-
125+
125126
controller = OpenEvolve(
126127
initial_program_path=self.test_program_path,
127128
evaluation_file=self.evaluator_path,
128129
config=self.config,
129-
output_dir=self.test_dir
130+
output_dir=self.test_dir,
130131
)
131-
132+
132133
# Add a program with different ID but same content as initial program
133134
existing_program = Program(
134135
id="different_id",
135136
code=self.test_program_content, # Same content as initial program
136137
language="python",
137138
metrics={"score": 0.7, "combined_score": 0.8},
138-
iteration_found=0
139+
iteration_found=0,
139140
)
140-
141+
141142
controller.database.add(existing_program)
142-
143+
143144
# Mock the LLM to avoid actual API calls
144-
with patch.object(controller.llm_ensemble, 'generate_with_context') as mock_llm:
145+
with patch.object(controller.llm_ensemble, "generate_with_context") as mock_llm:
145146
mock_llm.return_value = "No changes needed"
146-
147+
147148
# Run for 0 iterations (just initialization)
148149
result = await controller.run(iterations=0)
149-
150+
150151
# Verify no additional program was added (still only 1 program)
151152
self.assertEqual(len(controller.database.programs), 1)
152-
153+
153154
# Verify the existing program is still there
154155
self.assertIn("different_id", controller.database.programs)
155-
156+
156157
# Verify evaluator was not called for initial program
157158
self.assertEqual(mock_evaluator.call_count, 0)
158159

159160
def test_checkpoint_resume_skips_initial_program(self):
160161
"""Test that initial program is not re-added when resuming from checkpoint"""
161-
162+
162163
async def run_test():
163-
with patch('openevolve.controller.Evaluator') as mock_evaluator_class:
164+
with patch("openevolve.controller.Evaluator") as mock_evaluator_class:
164165
mock_evaluator = MockEvaluator()
165166
mock_evaluator_class.return_value = mock_evaluator
166-
167+
167168
controller = OpenEvolve(
168169
initial_program_path=self.test_program_path,
169170
evaluation_file=self.evaluator_path,
170171
config=self.config,
171-
output_dir=self.test_dir
172+
output_dir=self.test_dir,
172173
)
173-
174+
174175
# Simulate existing database state (as if loaded from checkpoint)
175176
existing_program = Program(
176177
id="existing_program_id",
177178
code=self.test_program_content, # Same content as initial program
178179
language="python",
179180
metrics={"score": 0.7, "combined_score": 0.8},
180-
iteration_found=5
181+
iteration_found=5,
181182
)
182-
183+
183184
controller.database.add(existing_program)
184185
controller.database.last_iteration = 10 # Simulate resuming from iteration 10
185-
186+
186187
# Verify database has the existing program
187188
self.assertEqual(len(controller.database.programs), 1)
188189
self.assertEqual(controller.database.last_iteration, 10)
189-
190+
190191
# Mock the LLM to avoid actual API calls
191-
with patch.object(controller.llm_ensemble, 'generate_with_context') as mock_llm:
192+
with patch.object(controller.llm_ensemble, "generate_with_context") as mock_llm:
192193
mock_llm.return_value = "No changes needed"
193-
194+
194195
# Run for 0 iterations (just initialization)
195196
result = await controller.run(iterations=0)
196-
197+
197198
# Verify no additional program was added (still only 1 program)
198199
self.assertEqual(len(controller.database.programs), 1)
199-
200+
200201
# Verify the existing program is still there with original ID
201202
self.assertIn("existing_program_id", controller.database.programs)
202-
203+
203204
# Verify evaluator was not called for initial program (count should be 0)
204205
self.assertEqual(mock_evaluator.call_count, 0)
205-
206+
206207
# Run the async test
207208
asyncio.run(run_test())
208209

209210
def test_non_empty_database_at_iteration_zero(self):
210211
"""Test that initial program is not added when database already has programs at iteration 0"""
211-
212+
212213
async def run_test():
213-
with patch('openevolve.controller.Evaluator') as mock_evaluator_class:
214+
with patch("openevolve.controller.Evaluator") as mock_evaluator_class:
214215
mock_evaluator = MockEvaluator()
215216
mock_evaluator_class.return_value = mock_evaluator
216-
217+
217218
controller = OpenEvolve(
218219
initial_program_path=self.test_program_path,
219220
evaluation_file=self.evaluator_path,
220221
config=self.config,
221-
output_dir=self.test_dir
222+
output_dir=self.test_dir,
222223
)
223-
224+
224225
# Add a program with different content to simulate pre-populated database
225226
different_program = Program(
226227
id="different_id",
227228
code="def different_function(): pass", # Different content
228229
language="python",
229230
metrics={"score": 0.6, "combined_score": 0.7},
230-
iteration_found=0
231+
iteration_found=0,
231232
)
232-
233+
233234
controller.database.add(different_program)
234235
# Keep last_iteration at 0 to simulate fresh start with pre-populated DB
235-
236+
236237
# Verify database has the different program
237238
self.assertEqual(len(controller.database.programs), 1)
238239
self.assertEqual(controller.database.last_iteration, 0)
239-
240+
240241
# Mock the LLM to avoid actual API calls
241-
with patch.object(controller.llm_ensemble, 'generate_with_context') as mock_llm:
242+
with patch.object(controller.llm_ensemble, "generate_with_context") as mock_llm:
242243
mock_llm.return_value = "No changes needed"
243-
244+
244245
# Run for 0 iterations (just initialization)
245246
result = await controller.run(iterations=0)
246-
247+
247248
# Verify no additional program was added (still only 1 program)
248249
self.assertEqual(len(controller.database.programs), 1)
249-
250+
250251
# Verify the existing program is still there
251252
self.assertIn("different_id", controller.database.programs)
252-
253+
253254
# Verify evaluator was not called for initial program
254255
self.assertEqual(mock_evaluator.call_count, 0)
255-
256+
256257
# Run the async test
257258
asyncio.run(run_test())
258259

259260
def test_multiple_run_calls_no_pollution(self):
260261
"""Test that calling run() multiple times doesn't pollute the database"""
261-
262+
262263
async def run_test():
263-
with patch('openevolve.controller.Evaluator') as mock_evaluator_class:
264+
with patch("openevolve.controller.Evaluator") as mock_evaluator_class:
264265
mock_evaluator = MockEvaluator()
265266
mock_evaluator_class.return_value = mock_evaluator
266-
267+
267268
controller = OpenEvolve(
268269
initial_program_path=self.test_program_path,
269270
evaluation_file=self.evaluator_path,
270271
config=self.config,
271-
output_dir=self.test_dir
272+
output_dir=self.test_dir,
272273
)
273-
274+
274275
# Mock the LLM to avoid actual API calls
275-
with patch.object(controller.llm_ensemble, 'generate_with_context') as mock_llm:
276+
with patch.object(controller.llm_ensemble, "generate_with_context") as mock_llm:
276277
mock_llm.return_value = "No changes needed"
277-
278+
278279
# Run first time
279280
result1 = await controller.run(iterations=0)
280281
initial_count = len(controller.database.programs)
281282
evaluator_calls_after_first = mock_evaluator.call_count
282-
283+
283284
# Run second time (simulating resume or restart)
284285
result2 = await controller.run(iterations=0)
285-
286+
286287
# Run third time
287288
result3 = await controller.run(iterations=0)
288-
289+
289290
# Verify database size didn't grow
290291
self.assertEqual(len(controller.database.programs), initial_count)
291-
292+
292293
# Verify evaluator was only called once (for the initial program in first run)
293294
self.assertEqual(mock_evaluator.call_count, evaluator_calls_after_first)
294-
295+
295296
# Run the async test
296297
asyncio.run(run_test())
297298

0 commit comments

Comments
 (0)