Skip to content

Commit ed2de61

Browse files
committed
fixes
1 parent 91cc7e2 commit ed2de61

File tree

9 files changed

+285
-89
lines changed

9 files changed

+285
-89
lines changed

examples/function_minimization/evaluator.py

Lines changed: 42 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,9 @@
55
import importlib.util
66
import numpy as np
77
import time
8-
import multiprocessing
8+
import concurrent.futures
99
import traceback
10+
import signal
1011

1112

1213
def run_with_timeout(func, args=(), kwargs={}, timeout_seconds=5):
@@ -22,31 +23,13 @@ def run_with_timeout(func, args=(), kwargs={}, timeout_seconds=5):
2223
Returns:
2324
Result of the function or raises TimeoutError
2425
"""
25-
26-
def wrapper(queue, func, args, kwargs):
26+
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
27+
future = executor.submit(func, *args, **kwargs)
2728
try:
28-
result = func(*args, **kwargs)
29-
queue.put(("success", result))
30-
except Exception as e:
31-
queue.put(("error", e))
32-
33-
queue = multiprocessing.Queue()
34-
process = multiprocessing.Process(target=wrapper, args=(queue, func, args, kwargs))
35-
process.start()
36-
process.join(timeout=timeout_seconds)
37-
38-
if process.is_alive():
39-
process.terminate()
40-
process.join()
41-
raise TimeoutError(f"Function timed out after {timeout_seconds} seconds")
42-
43-
if queue.empty():
44-
raise TimeoutError("Function ended without returning a result")
45-
46-
status, result = queue.get()
47-
if status == "error":
48-
raise result
49-
return result
29+
result = future.result(timeout=timeout_seconds)
30+
return result
31+
except concurrent.futures.TimeoutError:
32+
raise TimeoutError(f"Function timed out after {timeout_seconds} seconds")
5033

5134

5235
def safe_float(value):
@@ -107,15 +90,27 @@ def evaluate(program_path):
10790
# Run with timeout
10891
result = run_with_timeout(program.run_search, timeout_seconds=5)
10992

110-
# Check if we got a tuple of 3 values
111-
if not isinstance(result, tuple) or len(result) != 3:
93+
# Handle different result formats
94+
if isinstance(result, tuple):
95+
if len(result) == 3:
96+
x, y, value = result
97+
elif len(result) == 2:
98+
# Assume it's (x, y) and calculate value
99+
x, y = result
100+
# Calculate the function value since it wasn't returned
101+
value = np.sin(x) * np.cos(y) + np.sin(x * y) + (x**2 + y**2) / 20
102+
print(f"Trial {trial}: Got 2 values, calculated function value: {value}")
103+
else:
104+
print(
105+
f"Trial {trial}: Invalid result format, expected tuple of 2 or 3 values but got {len(result)}"
106+
)
107+
continue
108+
else:
112109
print(
113-
f"Trial {trial}: Invalid result format, expected tuple of 3 values but got {type(result)}"
110+
f"Trial {trial}: Invalid result format, expected tuple but got {type(result)}"
114111
)
115112
continue
116113

117-
x, y, value = result
118-
119114
end_time = time.time()
120115

121116
# Ensure all values are float
@@ -264,15 +259,27 @@ def evaluate_stage1(program_path):
264259
# Run a single trial with timeout
265260
result = run_with_timeout(program.run_search, timeout_seconds=5)
266261

267-
# Check if we got a tuple of 3 values
268-
if not isinstance(result, tuple) or len(result) != 3:
262+
# Handle different result formats
263+
if isinstance(result, tuple):
264+
if len(result) == 3:
265+
x, y, value = result
266+
elif len(result) == 2:
267+
# Assume it's (x, y) and calculate value
268+
x, y = result
269+
# Calculate the function value since it wasn't returned
270+
value = np.sin(x) * np.cos(y) + np.sin(x * y) + (x**2 + y**2) / 20
271+
print(f"Stage 1: Got 2 values, calculated function value: {value}")
272+
else:
273+
print(
274+
f"Stage 1: Invalid result format, expected tuple of 2 or 3 values but got {len(result)}"
275+
)
276+
return {"runs_successfully": 0.0, "error": "Invalid result format"}
277+
else:
269278
print(
270-
f"Stage 1: Invalid result format, expected tuple of 3 values but got {type(result)}"
279+
f"Stage 1: Invalid result format, expected tuple but got {type(result)}"
271280
)
272281
return {"runs_successfully": 0.0, "error": "Invalid result format"}
273282

274-
x, y, value = result
275-
276283
# Ensure all values are float
277284
x = safe_float(x)
278285
y = safe_float(y)
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
scipy

openevolve/controller.py

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@
2424
parse_evolve_blocks,
2525
parse_full_rewrite,
2626
)
27+
from openevolve.utils.format_utils import (
28+
format_metrics_safe,
29+
format_improvement_safe,
30+
)
2731

2832
logger = logging.getLogger(__name__)
2933

@@ -305,7 +309,7 @@ async def run(
305309
f"🌟 New best solution found at iteration {i+1}: {child_program.id}"
306310
)
307311
logger.info(
308-
f"Metrics: {', '.join(f'{name}={value:.4f}' for name, value in child_program.metrics.items())}"
312+
f"Metrics: {format_metrics_safe(child_program.metrics)}"
309313
)
310314

311315
# Save checkpoint
@@ -361,7 +365,7 @@ async def run(
361365
if best_program:
362366
logger.info(
363367
f"Evolution complete. Best program has metrics: "
364-
f"{', '.join(f'{name}={value:.4f}' for name, value in best_program.metrics.items())}"
368+
f"{format_metrics_safe(best_program.metrics)}"
365369
)
366370

367371
# Save the best program (using our tracked best program)
@@ -389,19 +393,13 @@ def _log_iteration(
389393
child: Child program
390394
elapsed_time: Elapsed time in seconds
391395
"""
392-
# Calculate improvement
393-
improvement = {}
394-
for metric, value in child.metrics.items():
395-
if metric in parent.metrics:
396-
diff = value - parent.metrics[metric]
397-
improvement[metric] = diff
398-
399-
improvement_str = ", ".join(f"{name}={diff:+.4f}" for name, diff in improvement.items())
396+
# Calculate improvement using safe formatting
397+
improvement_str = format_improvement_safe(parent.metrics, child.metrics)
400398

401399
logger.info(
402400
f"Iteration {iteration+1}: Child {child.id} from parent {parent.id} "
403401
f"in {elapsed_time:.2f}s. Metrics: "
404-
f"{', '.join(f'{name}={value:.4f}' for name, value in child.metrics.items())} "
402+
f"{format_metrics_safe(child.metrics)} "
405403
f"(Δ: {improvement_str})"
406404
)
407405

@@ -457,7 +455,7 @@ def _save_checkpoint(self, iteration: int) -> None:
457455

458456
logger.info(
459457
f"Saved best program at checkpoint {iteration} with metrics: "
460-
f"{', '.join(f'{name}={value:.4f}' for name, value in best_program.metrics.items())}"
458+
f"{format_metrics_safe(best_program.metrics)}"
461459
)
462460

463461
logger.info(f"Saved checkpoint at iteration {iteration} to {checkpoint_path}")

openevolve/database.py

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515

1616
from openevolve.config import DatabaseConfig
1717
from openevolve.utils.code_utils import calculate_edit_distance
18+
from openevolve.utils.metrics_utils import safe_numeric_average
1819

1920
logger = logging.getLogger(__name__)
2021

@@ -227,10 +228,10 @@ def get_best_program(self, metric: Optional[str] = None) -> Optional[Program]:
227228
if sorted_programs:
228229
logger.debug(f"Found best program by combined_score: {sorted_programs[0].id}")
229230
else:
230-
# Sort by average of all metrics as fallback
231+
# Sort by average of all numeric metrics as fallback
231232
sorted_programs = sorted(
232233
self.programs.values(),
233-
key=lambda p: sum(p.metrics.values()) / max(1, len(p.metrics)),
234+
key=lambda p: safe_numeric_average(p.metrics),
234235
reverse=True,
235236
)
236237
if sorted_programs:
@@ -281,10 +282,10 @@ def get_top_programs(self, n: int = 10, metric: Optional[str] = None) -> List[Pr
281282
reverse=True,
282283
)
283284
else:
284-
# Sort by average of all metrics
285+
# Sort by average of all numeric metrics
285286
sorted_programs = sorted(
286287
self.programs.values(),
287-
key=lambda p: sum(p.metrics.values()) / max(1, len(p.metrics)),
288+
key=lambda p: safe_numeric_average(p.metrics),
288289
reverse=True,
289290
)
290291

@@ -436,7 +437,7 @@ def _calculate_feature_coords(self, program: Program) -> List[int]:
436437
if not program.metrics:
437438
bin_idx = 0
438439
else:
439-
avg_score = sum(program.metrics.values()) / len(program.metrics)
440+
avg_score = safe_numeric_average(program.metrics)
440441
bin_idx = min(int(avg_score * self.feature_bins), self.feature_bins - 1)
441442
coords.append(bin_idx)
442443
elif dim in program.metrics:
@@ -487,9 +488,9 @@ def _is_better(self, program1: Program, program2: Program) -> bool:
487488
if "combined_score" in program1.metrics and "combined_score" in program2.metrics:
488489
return program1.metrics["combined_score"] > program2.metrics["combined_score"]
489490

490-
# Fallback to average of all metrics
491-
avg1 = sum(program1.metrics.values()) / len(program1.metrics)
492-
avg2 = sum(program2.metrics.values()) / len(program2.metrics)
491+
# Fallback to average of all numeric metrics
492+
avg1 = safe_numeric_average(program1.metrics)
493+
avg2 = safe_numeric_average(program2.metrics)
493494

494495
return avg1 > avg2
495496

@@ -508,7 +509,7 @@ def _update_archive(self, program: Program) -> None:
508509
# Otherwise, find worst program in archive
509510
archive_programs = [self.programs[pid] for pid in self.archive]
510511
worst_program = min(
511-
archive_programs, key=lambda p: sum(p.metrics.values()) / max(1, len(p.metrics))
512+
archive_programs, key=lambda p: safe_numeric_average(p.metrics)
512513
)
513514

514515
# Replace if new program is better
@@ -716,7 +717,7 @@ def _enforce_population_limit(self) -> None:
716717
# Sort by average metric (worst first)
717718
sorted_programs = sorted(
718719
all_programs,
719-
key=lambda p: sum(p.metrics.values()) / max(1, len(p.metrics)) if p.metrics else 0.0,
720+
key=lambda p: safe_numeric_average(p.metrics),
720721
)
721722

722723
# Remove worst programs, but never remove the best program
@@ -812,7 +813,7 @@ def migrate_programs(self) -> None:
812813
# Sort by fitness (using combined_score or average metrics)
813814
island_programs.sort(
814815
key=lambda p: p.metrics.get(
815-
"combined_score", sum(p.metrics.values()) / max(1, len(p.metrics))
816+
"combined_score", safe_numeric_average(p.metrics)
816817
),
817818
reverse=True,
818819
)
@@ -859,7 +860,7 @@ def get_island_stats(self) -> List[dict]:
859860
if island_programs:
860861
scores = [
861862
p.metrics.get(
862-
"combined_score", sum(p.metrics.values()) / max(1, len(p.metrics))
863+
"combined_score", safe_numeric_average(p.metrics)
863864
)
864865
for p in island_programs
865866
]

openevolve/evaluator.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from openevolve.config import EvaluatorConfig
1919
from openevolve.llm.ensemble import LLMEnsemble
2020
from openevolve.utils.async_utils import TaskPool, run_in_executor
21+
from openevolve.utils.format_utils import format_metrics_safe
2122

2223
logger = logging.getLogger(__name__)
2324

@@ -119,7 +120,7 @@ async def evaluate_program(
119120
elapsed = time.time() - start_time
120121
logger.info(
121122
f"Evaluated program{program_id_str} in {elapsed:.2f}s: "
122-
f"{', '.join(f'{name}={value:.4f}' for name, value in metrics.items())}"
123+
f"{format_metrics_safe(metrics)}"
123124
)
124125

125126
return metrics

0 commit comments

Comments
 (0)