Skip to content

Commit 8850a2d

Browse files
authored
Merge branch 'main' into feat-lm-eval
2 parents af8499a + 166f77f commit 8850a2d

File tree

10 files changed

+289
-100
lines changed

10 files changed

+289
-100
lines changed

examples/function_minimization/evaluator.py

Lines changed: 42 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,9 @@
55
import importlib.util
66
import numpy as np
77
import time
8-
import multiprocessing
8+
import concurrent.futures
99
import traceback
10+
import signal
1011

1112

1213
def run_with_timeout(func, args=(), kwargs={}, timeout_seconds=5):
@@ -22,31 +23,13 @@ def run_with_timeout(func, args=(), kwargs={}, timeout_seconds=5):
2223
Returns:
2324
Result of the function or raises TimeoutError
2425
"""
25-
26-
def wrapper(queue, func, args, kwargs):
26+
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
27+
future = executor.submit(func, *args, **kwargs)
2728
try:
28-
result = func(*args, **kwargs)
29-
queue.put(("success", result))
30-
except Exception as e:
31-
queue.put(("error", e))
32-
33-
queue = multiprocessing.Queue()
34-
process = multiprocessing.Process(target=wrapper, args=(queue, func, args, kwargs))
35-
process.start()
36-
process.join(timeout=timeout_seconds)
37-
38-
if process.is_alive():
39-
process.terminate()
40-
process.join()
41-
raise TimeoutError(f"Function timed out after {timeout_seconds} seconds")
42-
43-
if queue.empty():
44-
raise TimeoutError("Function ended without returning a result")
45-
46-
status, result = queue.get()
47-
if status == "error":
48-
raise result
49-
return result
29+
result = future.result(timeout=timeout_seconds)
30+
return result
31+
except concurrent.futures.TimeoutError:
32+
raise TimeoutError(f"Function timed out after {timeout_seconds} seconds")
5033

5134

5235
def safe_float(value):
@@ -107,15 +90,27 @@ def evaluate(program_path):
10790
# Run with timeout
10891
result = run_with_timeout(program.run_search, timeout_seconds=5)
10992

110-
# Check if we got a tuple of 3 values
111-
if not isinstance(result, tuple) or len(result) != 3:
93+
# Handle different result formats
94+
if isinstance(result, tuple):
95+
if len(result) == 3:
96+
x, y, value = result
97+
elif len(result) == 2:
98+
# Assume it's (x, y) and calculate value
99+
x, y = result
100+
# Calculate the function value since it wasn't returned
101+
value = np.sin(x) * np.cos(y) + np.sin(x * y) + (x**2 + y**2) / 20
102+
print(f"Trial {trial}: Got 2 values, calculated function value: {value}")
103+
else:
104+
print(
105+
f"Trial {trial}: Invalid result format, expected tuple of 2 or 3 values but got {len(result)}"
106+
)
107+
continue
108+
else:
112109
print(
113-
f"Trial {trial}: Invalid result format, expected tuple of 3 values but got {type(result)}"
110+
f"Trial {trial}: Invalid result format, expected tuple but got {type(result)}"
114111
)
115112
continue
116113

117-
x, y, value = result
118-
119114
end_time = time.time()
120115

121116
# Ensure all values are float
@@ -264,15 +259,25 @@ def evaluate_stage1(program_path):
264259
# Run a single trial with timeout
265260
result = run_with_timeout(program.run_search, timeout_seconds=5)
266261

267-
# Check if we got a tuple of 3 values
268-
if not isinstance(result, tuple) or len(result) != 3:
269-
print(
270-
f"Stage 1: Invalid result format, expected tuple of 3 values but got {type(result)}"
271-
)
262+
# Handle different result formats
263+
if isinstance(result, tuple):
264+
if len(result) == 3:
265+
x, y, value = result
266+
elif len(result) == 2:
267+
# Assume it's (x, y) and calculate value
268+
x, y = result
269+
# Calculate the function value since it wasn't returned
270+
value = np.sin(x) * np.cos(y) + np.sin(x * y) + (x**2 + y**2) / 20
271+
print(f"Stage 1: Got 2 values, calculated function value: {value}")
272+
else:
273+
print(
274+
f"Stage 1: Invalid result format, expected tuple of 2 or 3 values but got {len(result)}"
275+
)
276+
return {"runs_successfully": 0.0, "error": "Invalid result format"}
277+
else:
278+
print(f"Stage 1: Invalid result format, expected tuple but got {type(result)}")
272279
return {"runs_successfully": 0.0, "error": "Invalid result format"}
273280

274-
x, y, value = result
275-
276281
# Ensure all values are float
277282
x = safe_float(x)
278283
y = safe_float(y)
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
scipy

openevolve/controller.py

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@
2424
parse_evolve_blocks,
2525
parse_full_rewrite,
2626
)
27+
from openevolve.utils.format_utils import (
28+
format_metrics_safe,
29+
format_improvement_safe,
30+
)
2731

2832
logger = logging.getLogger(__name__)
2933

@@ -314,9 +318,7 @@ async def run(
314318
logger.info(
315319
f"🌟 New best solution found at iteration {i+1}: {child_program.id}"
316320
)
317-
logger.info(
318-
f"Metrics: {', '.join(f'{name}={value:.4f}' for name, value in child_program.metrics.items())}"
319-
)
321+
logger.info(f"Metrics: {format_metrics_safe(child_program.metrics)}")
320322

321323
# Save checkpoint
322324
if (i + 1) % self.config.checkpoint_interval == 0:
@@ -371,7 +373,7 @@ async def run(
371373
if best_program:
372374
logger.info(
373375
f"Evolution complete. Best program has metrics: "
374-
f"{', '.join(f'{name}={value:.4f}' for name, value in best_program.metrics.items())}"
376+
f"{format_metrics_safe(best_program.metrics)}"
375377
)
376378

377379
# Save the best program (using our tracked best program)
@@ -399,19 +401,13 @@ def _log_iteration(
399401
child: Child program
400402
elapsed_time: Elapsed time in seconds
401403
"""
402-
# Calculate improvement
403-
improvement = {}
404-
for metric, value in child.metrics.items():
405-
if metric in parent.metrics:
406-
diff = value - parent.metrics[metric]
407-
improvement[metric] = diff
408-
409-
improvement_str = ", ".join(f"{name}={diff:+.4f}" for name, diff in improvement.items())
404+
# Calculate improvement using safe formatting
405+
improvement_str = format_improvement_safe(parent.metrics, child.metrics)
410406

411407
logger.info(
412408
f"Iteration {iteration+1}: Child {child.id} from parent {parent.id} "
413409
f"in {elapsed_time:.2f}s. Metrics: "
414-
f"{', '.join(f'{name}={value:.4f}' for name, value in child.metrics.items())} "
410+
f"{format_metrics_safe(child.metrics)} "
415411
f"(Δ: {improvement_str})"
416412
)
417413

@@ -467,7 +463,7 @@ def _save_checkpoint(self, iteration: int) -> None:
467463

468464
logger.info(
469465
f"Saved best program at checkpoint {iteration} with metrics: "
470-
f"{', '.join(f'{name}={value:.4f}' for name, value in best_program.metrics.items())}"
466+
f"{format_metrics_safe(best_program.metrics)}"
471467
)
472468

473469
logger.info(f"Saved checkpoint at iteration {iteration} to {checkpoint_path}")

openevolve/database.py

Lines changed: 13 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515

1616
from openevolve.config import DatabaseConfig
1717
from openevolve.utils.code_utils import calculate_edit_distance
18+
from openevolve.utils.metrics_utils import safe_numeric_average
1819

1920
logger = logging.getLogger(__name__)
2021

@@ -227,10 +228,10 @@ def get_best_program(self, metric: Optional[str] = None) -> Optional[Program]:
227228
if sorted_programs:
228229
logger.debug(f"Found best program by combined_score: {sorted_programs[0].id}")
229230
else:
230-
# Sort by average of all metrics as fallback
231+
# Sort by average of all numeric metrics as fallback
231232
sorted_programs = sorted(
232233
self.programs.values(),
233-
key=lambda p: sum(p.metrics.values()) / max(1, len(p.metrics)),
234+
key=lambda p: safe_numeric_average(p.metrics),
234235
reverse=True,
235236
)
236237
if sorted_programs:
@@ -281,10 +282,10 @@ def get_top_programs(self, n: int = 10, metric: Optional[str] = None) -> List[Pr
281282
reverse=True,
282283
)
283284
else:
284-
# Sort by average of all metrics
285+
# Sort by average of all numeric metrics
285286
sorted_programs = sorted(
286287
self.programs.values(),
287-
key=lambda p: sum(p.metrics.values()) / max(1, len(p.metrics)),
288+
key=lambda p: safe_numeric_average(p.metrics),
288289
reverse=True,
289290
)
290291

@@ -436,7 +437,7 @@ def _calculate_feature_coords(self, program: Program) -> List[int]:
436437
if not program.metrics:
437438
bin_idx = 0
438439
else:
439-
avg_score = sum(program.metrics.values()) / len(program.metrics)
440+
avg_score = safe_numeric_average(program.metrics)
440441
bin_idx = min(int(avg_score * self.feature_bins), self.feature_bins - 1)
441442
coords.append(bin_idx)
442443
elif dim in program.metrics:
@@ -487,9 +488,9 @@ def _is_better(self, program1: Program, program2: Program) -> bool:
487488
if "combined_score" in program1.metrics and "combined_score" in program2.metrics:
488489
return program1.metrics["combined_score"] > program2.metrics["combined_score"]
489490

490-
# Fallback to average of all metrics
491-
avg1 = sum(program1.metrics.values()) / len(program1.metrics)
492-
avg2 = sum(program2.metrics.values()) / len(program2.metrics)
491+
# Fallback to average of all numeric metrics
492+
avg1 = safe_numeric_average(program1.metrics)
493+
avg2 = safe_numeric_average(program2.metrics)
493494

494495
return avg1 > avg2
495496

@@ -507,9 +508,7 @@ def _update_archive(self, program: Program) -> None:
507508

508509
# Otherwise, find worst program in archive
509510
archive_programs = [self.programs[pid] for pid in self.archive]
510-
worst_program = min(
511-
archive_programs, key=lambda p: sum(p.metrics.values()) / max(1, len(p.metrics))
512-
)
511+
worst_program = min(archive_programs, key=lambda p: safe_numeric_average(p.metrics))
513512

514513
# Replace if new program is better
515514
if self._is_better(program, worst_program):
@@ -716,7 +715,7 @@ def _enforce_population_limit(self) -> None:
716715
# Sort by average metric (worst first)
717716
sorted_programs = sorted(
718717
all_programs,
719-
key=lambda p: sum(p.metrics.values()) / max(1, len(p.metrics)) if p.metrics else 0.0,
718+
key=lambda p: safe_numeric_average(p.metrics),
720719
)
721720

722721
# Remove worst programs, but never remove the best program
@@ -811,9 +810,7 @@ def migrate_programs(self) -> None:
811810

812811
# Sort by fitness (using combined_score or average metrics)
813812
island_programs.sort(
814-
key=lambda p: p.metrics.get(
815-
"combined_score", sum(p.metrics.values()) / max(1, len(p.metrics))
816-
),
813+
key=lambda p: p.metrics.get("combined_score", safe_numeric_average(p.metrics)),
817814
reverse=True,
818815
)
819816

@@ -858,9 +855,7 @@ def get_island_stats(self) -> List[dict]:
858855

859856
if island_programs:
860857
scores = [
861-
p.metrics.get(
862-
"combined_score", sum(p.metrics.values()) / max(1, len(p.metrics))
863-
)
858+
p.metrics.get("combined_score", safe_numeric_average(p.metrics))
864859
for p in island_programs
865860
]
866861

openevolve/evaluator.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from openevolve.llm.ensemble import LLMEnsemble
2121
from openevolve.utils.async_utils import TaskPool, run_in_executor
2222
from openevolve.prompt.sampler import PromptSampler
23+
from openevolve.utils.format_utils import format_metrics_safe
2324

2425
logger = logging.getLogger(__name__)
2526

@@ -123,7 +124,7 @@ async def evaluate_program(
123124
elapsed = time.time() - start_time
124125
logger.info(
125126
f"Evaluated program{program_id_str} in {elapsed:.2f}s: "
126-
f"{', '.join(f'{name}={value:.4f}' for name, value in metrics.items())}"
127+
f"{format_metrics_safe(metrics)}"
127128
)
128129

129130
return metrics

0 commit comments

Comments
 (0)