Skip to content

Commit f46233d

Browse files
committed
init
1 parent 4b099e3 commit f46233d

File tree

8 files changed

+285
-198
lines changed

8 files changed

+285
-198
lines changed

openevolve/config.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ class DatabaseConfig:
140140

141141
# Evolutionary parameters
142142
population_size: int = 1000
143+
allowed_population_overflow: int = 50
143144
archive_size: int = 100
144145
num_islands: int = 5
145146

@@ -196,6 +197,7 @@ class Config:
196197
log_level: str = "INFO"
197198
log_dir: Optional[str] = None
198199
random_seed: Optional[int] = None
200+
language: str = None
199201

200202
# Component configurations
201203
llm: LLMConfig = field(default_factory=LLMConfig)

openevolve/controller.py

Lines changed: 82 additions & 125 deletions
Original file line numberDiff line numberDiff line change
@@ -10,19 +10,16 @@
1010
import uuid
1111
from pathlib import Path
1212
from typing import Any, Dict, List, Optional, Tuple, Union
13+
import concurrent.futures
1314

1415
from openevolve.config import Config, load_config
1516
from openevolve.database import Program, ProgramDatabase
1617
from openevolve.evaluator import Evaluator
1718
from openevolve.llm.ensemble import LLMEnsemble
1819
from openevolve.prompt.sampler import PromptSampler
20+
from openevolve.iteration import run_iteration_sync, Result
1921
from openevolve.utils.code_utils import (
20-
apply_diff,
2122
extract_code_language,
22-
extract_diffs,
23-
format_diff_summary,
24-
parse_evolve_blocks,
25-
parse_full_rewrite,
2623
)
2724
from openevolve.utils.format_utils import (
2825
format_metrics_safe,
@@ -83,7 +80,8 @@ def __init__(
8380
# Load initial program
8481
self.initial_program_path = initial_program_path
8582
self.initial_program_code = self._load_initial_program()
86-
self.language = extract_code_language(self.initial_program_code)
83+
if not self.config.language:
84+
self.config.language = extract_code_language(self.initial_program_code)
8785

8886
# Extract file extension from initial program
8987
self.file_extension = os.path.splitext(initial_program_path)[1]
@@ -115,8 +113,9 @@ def __init__(
115113
self.llm_evaluator_ensemble,
116114
self.evaluator_prompt_sampler,
117115
)
116+
self.evaluation_file = evaluation_file
118117

119-
logger.info(f"Initialized OpenEvolve with {initial_program_path} " f"and {evaluation_file}")
118+
logger.info(f"Initialized OpenEvolve with {initial_program_path}")
120119

121120
def _setup_logging(self) -> None:
122121
"""Set up logging"""
@@ -189,7 +188,7 @@ async def run(
189188
initial_program = Program(
190189
id=initial_program_id,
191190
code=self.initial_program_code,
192-
language=self.language,
191+
language=self.config.language,
193192
metrics=initial_metrics,
194193
iteration_found=start_iteration,
195194
)
@@ -216,127 +215,85 @@ async def run(
216215
logger.info(f"Using island-based evolution with {self.config.database.num_islands} islands")
217216
self.database.log_island_status()
218217

219-
for i in range(start_iteration, total_iterations):
220-
iteration_start = time.time()
221-
222-
# Manage island evolution - switch islands periodically
223-
if i > start_iteration and current_island_counter >= programs_per_island:
224-
self.database.next_island()
225-
current_island_counter = 0
226-
logger.debug(f"Switched to island {self.database.current_island}")
227-
228-
current_island_counter += 1
229-
230-
# Sample parent and inspirations from current island
231-
parent, inspirations = self.database.sample()
232-
233-
# Build prompt
234-
prompt = self.prompt_sampler.build_prompt(
235-
current_program=parent.code,
236-
parent_program=parent.code, # We don't have the parent's code, use the same
237-
program_metrics=parent.metrics,
238-
previous_programs=[p.to_dict() for p in self.database.get_top_programs(3)],
239-
top_programs=[p.to_dict() for p in inspirations],
240-
language=self.language,
241-
evolution_round=i,
242-
allow_full_rewrite=self.config.allow_full_rewrites,
243-
)
244-
245-
# Generate code modification
246-
try:
247-
llm_response = await self.llm_ensemble.generate_with_context(
248-
system_message=prompt["system"],
249-
messages=[{"role": "user", "content": prompt["user"]}],
250-
)
251-
252-
# Parse the response
253-
if self.config.diff_based_evolution:
254-
diff_blocks = extract_diffs(llm_response)
255-
256-
if not diff_blocks:
257-
logger.warning(f"Iteration {i+1}: No valid diffs found in response")
258-
continue
259-
260-
# Apply the diffs
261-
child_code = apply_diff(parent.code, llm_response)
262-
changes_summary = format_diff_summary(diff_blocks)
263-
else:
264-
# Parse full rewrite
265-
new_code = parse_full_rewrite(llm_response, self.language)
266-
267-
if not new_code:
268-
logger.warning(f"Iteration {i+1}: No valid code found in response")
269-
continue
270-
271-
child_code = new_code
272-
changes_summary = "Full rewrite"
273-
274-
# Check code length
275-
if len(child_code) > self.config.max_code_length:
276-
logger.warning(
277-
f"Iteration {i+1}: Generated code exceeds maximum length "
278-
f"({len(child_code)} > {self.config.max_code_length})"
218+
# create temp file to save database snapshots to for process workers to load from
219+
temp_db_path = "temp/" + str(uuid.uuid4())
220+
self.database.save(temp_db_path, start_iteration)
221+
222+
with concurrent.futures.ProcessPoolExecutor(
223+
max_workers=self.config.evaluator.parallel_evaluations
224+
) as executor:
225+
futures = []
226+
for i in range(start_iteration, total_iterations):
227+
futures.append(
228+
executor.submit(
229+
run_iteration_sync, i, self.config, self.evaluation_file, temp_db_path
279230
)
280-
continue
281-
282-
# Evaluate the child program
283-
child_id = str(uuid.uuid4())
284-
child_metrics = await self.evaluator.evaluate_program(child_code, child_id)
285-
286-
# Create a child program
287-
child_program = Program(
288-
id=child_id,
289-
code=child_code,
290-
language=self.language,
291-
parent_id=parent.id,
292-
generation=parent.generation + 1,
293-
metrics=child_metrics,
294-
metadata={
295-
"changes": changes_summary,
296-
"parent_metrics": parent.metrics,
297-
},
298231
)
299232

300-
# Add to database (will be added to current island)
301-
self.database.add(child_program, iteration=i + 1)
302-
303-
# Increment generation for current island
304-
self.database.increment_island_generation()
305-
306-
# Check if migration should occur
307-
if self.database.should_migrate():
308-
logger.info(f"Performing migration at iteration {i+1}")
309-
self.database.migrate_programs()
310-
self.database.log_island_status()
311-
312-
# Log progress
313-
iteration_time = time.time() - iteration_start
314-
self._log_iteration(i, parent, child_program, iteration_time)
315-
316-
# Specifically check if this is the new best program
317-
if self.database.best_program_id == child_program.id:
318-
logger.info(
319-
f"🌟 New best solution found at iteration {i+1}: {child_program.id}"
233+
iteration = start_iteration + 1
234+
for future in concurrent.futures.as_completed(futures):
235+
logger.info(f"Completed iteration {iteration}")
236+
try:
237+
result: Result = future.result()
238+
# Manage island evolution - switch islands periodically
239+
if (
240+
iteration - 1 > start_iteration
241+
and current_island_counter >= programs_per_island
242+
):
243+
self.database.next_island()
244+
current_island_counter = 0
245+
logger.debug(f"Switched to island {self.database.current_island}")
246+
247+
current_island_counter += 1
248+
249+
# Add to database (will be added to current island)
250+
self.database.add(result.child_program, iteration=iteration)
251+
252+
# Increment generation for current island
253+
self.database.increment_island_generation()
254+
255+
# Check if migration should occur
256+
if self.database.should_migrate():
257+
logger.info(f"Performing migration at iteration {iteration}")
258+
self.database.migrate_programs()
259+
self.database.log_island_status()
260+
261+
# Log progress
262+
self._log_iteration(
263+
iteration, result.parent, result.child_program, result.iteration_time
320264
)
321-
logger.info(f"Metrics: {format_metrics_safe(child_program.metrics)}")
322-
323-
# Save checkpoint
324-
if (i + 1) % self.config.checkpoint_interval == 0:
325-
self._save_checkpoint(i + 1)
326-
# Also log island status at checkpoints
327-
logger.info(f"Island status at checkpoint {i+1}:")
328-
self.database.log_island_status()
329-
330-
# Check if target score reached
331-
if target_score is not None:
332-
avg_score = sum(child_metrics.values()) / max(1, len(child_metrics))
333-
if avg_score >= target_score:
334-
logger.info(f"Target score {target_score} reached after {i+1} iterations")
335-
break
336-
337-
except Exception as e:
338-
logger.error(f"Error in iteration {i+1}: {str(e)}")
339-
continue
265+
266+
# Specifically check if this is the new best program
267+
if self.database.best_program_id == result.child_program.id:
268+
logger.info(
269+
f"🌟 New best solution found at iteration {iteration}: {result.child_program.id}"
270+
)
271+
logger.info(f"Metrics: {format_metrics_safe(result.child_program.metrics)}")
272+
273+
# Save checkpoint
274+
if (iteration) % self.config.checkpoint_interval == 0:
275+
self._save_checkpoint(iteration)
276+
# Also log island status at checkpoints
277+
logger.info(f"Island status at checkpoint {iteration}:")
278+
self.database.log_island_status()
279+
280+
# Check if target score reached
281+
if target_score is not None:
282+
avg_score = sum(result["child_metrics"].values()) / max(
283+
1, len(result.child_metrics)
284+
)
285+
if avg_score >= target_score:
286+
logger.info(
287+
f"Target score {target_score} reached after {iteration} iterations"
288+
)
289+
break
290+
291+
self.database.save(temp_db_path, iteration)
292+
iteration += 1
293+
except Exception:
294+
logger.exception(f"Error in iteration {iteration}:")
295+
continue
296+
os.rmdir(temp_db_path)
340297

341298
# Get the best program using our tracking mechanism
342299
best_program = None

0 commit comments

Comments
 (0)