Skip to content

Commit 6e321b5

Browse files
committed
fix liner
1 parent 5119c95 commit 6e321b5

File tree

4 files changed

+45
-25
lines changed

4 files changed

+45
-25
lines changed

openevolve/controller.py

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -292,9 +292,7 @@ async def run(
292292
logger.info(
293293
f"🌟 New best solution found at iteration {i+1}: {child_program.id}"
294294
)
295-
logger.info(
296-
f"Metrics: {_format_metrics(child_program.metrics)}"
297-
)
295+
logger.info(f"Metrics: {_format_metrics(child_program.metrics)}")
298296

299297
# Save checkpoint
300298
if (i + 1) % self.config.checkpoint_interval == 0:
@@ -303,11 +301,17 @@ async def run(
303301
# Check if target score reached
304302
if target_score is not None:
305303
# Only consider numeric metrics for target score calculation
306-
numeric_metrics = [v for v in child_metrics.values() if isinstance(v, (int, float)) and not isinstance(v, bool)]
304+
numeric_metrics = [
305+
v
306+
for v in child_metrics.values()
307+
if isinstance(v, (int, float)) and not isinstance(v, bool)
308+
]
307309
if numeric_metrics:
308310
avg_score = sum(numeric_metrics) / len(numeric_metrics)
309311
if avg_score >= target_score:
310-
logger.info(f"Target score {target_score} reached after {i+1} iterations")
312+
logger.info(
313+
f"Target score {target_score} reached after {i+1} iterations"
314+
)
311315
break
312316

313317
except Exception as e:
@@ -382,7 +386,12 @@ def _log_iteration(
382386
for metric, value in child.metrics.items():
383387
if metric in parent.metrics:
384388
# Only calculate diff for numeric values
385-
if isinstance(value, (int, float)) and isinstance(parent.metrics[metric], (int, float)) and not isinstance(value, bool) and not isinstance(parent.metrics[metric], bool):
389+
if (
390+
isinstance(value, (int, float))
391+
and isinstance(parent.metrics[metric], (int, float))
392+
and not isinstance(value, bool)
393+
and not isinstance(parent.metrics[metric], bool)
394+
):
386395
try:
387396
diff = value - parent.metrics[metric]
388397
improvement[metric] = diff

openevolve/database.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,17 @@
2121

2222
def _safe_sum_metrics(metrics: Dict[str, Any]) -> float:
2323
"""Safely sum only numeric metric values, ignoring strings and other types"""
24-
numeric_values = [v for v in metrics.values() if isinstance(v, (int, float)) and not isinstance(v, bool)]
24+
numeric_values = [
25+
v for v in metrics.values() if isinstance(v, (int, float)) and not isinstance(v, bool)
26+
]
2527
return sum(numeric_values) if numeric_values else 0.0
2628

2729

2830
def _safe_avg_metrics(metrics: Dict[str, Any]) -> float:
2931
"""Safely calculate average of only numeric metric values"""
30-
numeric_values = [v for v in metrics.values() if isinstance(v, (int, float)) and not isinstance(v, bool)]
32+
numeric_values = [
33+
v for v in metrics.values() if isinstance(v, (int, float)) and not isinstance(v, bool)
34+
]
3135
return sum(numeric_values) / max(1, len(numeric_values)) if numeric_values else 0.0
3236

3337

@@ -483,9 +487,7 @@ def _update_archive(self, program: Program) -> None:
483487

484488
# Otherwise, find worst program in archive
485489
archive_programs = [self.programs[pid] for pid in self.archive]
486-
worst_program = min(
487-
archive_programs, key=lambda p: _safe_avg_metrics(p.metrics)
488-
)
490+
worst_program = min(archive_programs, key=lambda p: _safe_avg_metrics(p.metrics))
489491

490492
# Replace if new program is better
491493
if self._is_better(program, worst_program):

openevolve/evaluator.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -114,18 +114,17 @@ async def evaluate_program(
114114

115115
elapsed = time.time() - start_time
116116
program_id_str = f" {program_id}" if program_id else ""
117-
117+
118118
# Format metrics properly, handling both numeric and string values
119119
metric_strs = []
120120
for name, value in metrics.items():
121121
if isinstance(value, (int, float)):
122-
metric_strs.append(f'{name}={value:.4f}')
122+
metric_strs.append(f"{name}={value:.4f}")
123123
else:
124-
metric_strs.append(f'{name}={value}')
125-
124+
metric_strs.append(f"{name}={value}")
125+
126126
logger.info(
127-
f"Evaluated program{program_id_str} in {elapsed:.2f}s: "
128-
f"{', '.join(metric_strs)}"
127+
f"Evaluated program{program_id_str} in {elapsed:.2f}s: " f"{', '.join(metric_strs)}"
129128
)
130129

131130
return metrics

openevolve/prompt/sampler.py

Lines changed: 18 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -164,16 +164,18 @@ def _identify_improvement_areas(
164164
# Only compare numeric metrics
165165
if not isinstance(value, (int, float)) or isinstance(value, bool):
166166
continue
167-
167+
168168
improved = True
169169
regressed = True
170170

171171
for attempt in recent_attempts:
172172
attempt_value = attempt["metrics"].get(metric, 0)
173173
# Skip comparison if attempt value is not numeric
174-
if not isinstance(attempt_value, (int, float)) or isinstance(attempt_value, bool):
174+
if not isinstance(attempt_value, (int, float)) or isinstance(
175+
attempt_value, bool
176+
):
175177
continue
176-
178+
177179
if attempt_value <= value:
178180
regressed = False
179181
if attempt_value >= value:
@@ -240,18 +242,22 @@ def _format_evolution_history(
240242

241243
# Get only numeric metrics for comparison
242244
current_numeric_metrics = {
243-
m: v for m, v in program.get("metrics", {}).items()
245+
m: v
246+
for m, v in program.get("metrics", {}).items()
244247
if isinstance(v, (int, float)) and not isinstance(v, bool)
245248
}
246249
parent_numeric_metrics = {
247-
m: v for m, v in parent_metrics.items()
250+
m: v
251+
for m, v in parent_metrics.items()
248252
if isinstance(v, (int, float)) and not isinstance(v, bool)
249253
}
250254

251255
if current_numeric_metrics and parent_numeric_metrics:
252256
# Only compare metrics that exist in both
253-
common_metrics = set(current_numeric_metrics.keys()) & set(parent_numeric_metrics.keys())
254-
257+
common_metrics = set(current_numeric_metrics.keys()) & set(
258+
parent_numeric_metrics.keys()
259+
)
260+
255261
if common_metrics:
256262
if all(
257263
current_numeric_metrics.get(m, 0) >= parent_numeric_metrics.get(m, 0)
@@ -287,7 +293,11 @@ def _format_evolution_history(
287293

288294
# Calculate a composite score from only numeric metrics
289295
metrics_dict = program.get("metrics", {})
290-
numeric_values = [v for v in metrics_dict.values() if isinstance(v, (int, float)) and not isinstance(v, bool)]
296+
numeric_values = [
297+
v
298+
for v in metrics_dict.values()
299+
if isinstance(v, (int, float)) and not isinstance(v, bool)
300+
]
291301
score = sum(numeric_values) / max(1, len(numeric_values)) if numeric_values else 0.0
292302

293303
# Extract key features (this could be more sophisticated)

0 commit comments

Comments
 (0)