Skip to content

Commit cd99e3d

Browse files
committed
fix linter
1 parent ed2de61 commit cd99e3d

File tree

6 files changed

+27
-37
lines changed

6 files changed

+27
-37
lines changed

examples/function_minimization/evaluator.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -275,9 +275,7 @@ def evaluate_stage1(program_path):
275275
)
276276
return {"runs_successfully": 0.0, "error": "Invalid result format"}
277277
else:
278-
print(
279-
f"Stage 1: Invalid result format, expected tuple but got {type(result)}"
280-
)
278+
print(f"Stage 1: Invalid result format, expected tuple but got {type(result)}")
281279
return {"runs_successfully": 0.0, "error": "Invalid result format"}
282280

283281
# Ensure all values are float

openevolve/controller.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -308,9 +308,7 @@ async def run(
308308
logger.info(
309309
f"🌟 New best solution found at iteration {i+1}: {child_program.id}"
310310
)
311-
logger.info(
312-
f"Metrics: {format_metrics_safe(child_program.metrics)}"
313-
)
311+
logger.info(f"Metrics: {format_metrics_safe(child_program.metrics)}")
314312

315313
# Save checkpoint
316314
if (i + 1) % self.config.checkpoint_interval == 0:

openevolve/database.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -508,9 +508,7 @@ def _update_archive(self, program: Program) -> None:
508508

509509
# Otherwise, find worst program in archive
510510
archive_programs = [self.programs[pid] for pid in self.archive]
511-
worst_program = min(
512-
archive_programs, key=lambda p: safe_numeric_average(p.metrics)
513-
)
511+
worst_program = min(archive_programs, key=lambda p: safe_numeric_average(p.metrics))
514512

515513
# Replace if new program is better
516514
if self._is_better(program, worst_program):
@@ -812,9 +810,7 @@ def migrate_programs(self) -> None:
812810

813811
# Sort by fitness (using combined_score or average metrics)
814812
island_programs.sort(
815-
key=lambda p: p.metrics.get(
816-
"combined_score", safe_numeric_average(p.metrics)
817-
),
813+
key=lambda p: p.metrics.get("combined_score", safe_numeric_average(p.metrics)),
818814
reverse=True,
819815
)
820816

@@ -859,9 +855,7 @@ def get_island_stats(self) -> List[dict]:
859855

860856
if island_programs:
861857
scores = [
862-
p.metrics.get(
863-
"combined_score", safe_numeric_average(p.metrics)
864-
)
858+
p.metrics.get("combined_score", safe_numeric_average(p.metrics))
865859
for p in island_programs
866860
]
867861

openevolve/prompt/sampler.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -246,27 +246,27 @@ def _format_evolution_history(
246246

247247
# Safely compare only numeric metrics
248248
program_metrics = program.get("metrics", {})
249-
249+
250250
# Check if all numeric metrics improved
251251
numeric_comparisons_improved = []
252252
numeric_comparisons_regressed = []
253-
253+
254254
for m in program_metrics:
255255
prog_value = program_metrics.get(m, 0)
256256
parent_value = parent_metrics.get(m, 0)
257-
257+
258258
# Only compare if both values are numeric
259259
if isinstance(prog_value, (int, float)) and isinstance(parent_value, (int, float)):
260260
if prog_value >= parent_value:
261261
numeric_comparisons_improved.append(True)
262262
else:
263263
numeric_comparisons_improved.append(False)
264-
264+
265265
if prog_value <= parent_value:
266266
numeric_comparisons_regressed.append(True)
267267
else:
268268
numeric_comparisons_regressed.append(False)
269-
269+
270270
# Determine outcome based on numeric comparisons
271271
if numeric_comparisons_improved and all(numeric_comparisons_improved):
272272
outcome = "Improvement in all metrics"

openevolve/utils/format_utils.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,16 +8,16 @@
88
def format_metrics_safe(metrics: Dict[str, Any]) -> str:
99
"""
1010
Safely format metrics dictionary for logging, handling both numeric and string values.
11-
11+
1212
Args:
1313
metrics: Dictionary of metric names to values
14-
14+
1515
Returns:
1616
Formatted string representation of metrics
1717
"""
1818
if not metrics:
1919
return ""
20-
20+
2121
formatted_parts = []
2222
for name, value in metrics.items():
2323
# Check if value is numeric (int, float)
@@ -31,24 +31,24 @@ def format_metrics_safe(metrics: Dict[str, Any]) -> str:
3131
else:
3232
# For non-numeric values (strings, etc.), just convert to string
3333
formatted_parts.append(f"{name}={value}")
34-
34+
3535
return ", ".join(formatted_parts)
3636

3737

3838
def format_improvement_safe(parent_metrics: Dict[str, Any], child_metrics: Dict[str, Any]) -> str:
3939
"""
4040
Safely format improvement metrics for logging.
41-
41+
4242
Args:
4343
parent_metrics: Parent program metrics
4444
child_metrics: Child program metrics
45-
45+
4646
Returns:
4747
Formatted string representation of improvements
4848
"""
4949
if not parent_metrics or not child_metrics:
5050
return ""
51-
51+
5252
improvement_parts = []
5353
for metric, child_value in child_metrics.items():
5454
if metric in parent_metrics:
@@ -61,5 +61,5 @@ def format_improvement_safe(parent_metrics: Dict[str, Any], child_metrics: Dict[
6161
except (ValueError, TypeError):
6262
# Skip non-numeric comparisons
6363
continue
64-
64+
6565
return ", ".join(improvement_parts)

openevolve/utils/metrics_utils.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -9,16 +9,16 @@ def safe_numeric_average(metrics: Dict[str, Any]) -> float:
99
"""
1010
Calculate the average of numeric values in a metrics dictionary,
1111
safely ignoring non-numeric values like strings.
12-
12+
1313
Args:
1414
metrics: Dictionary of metric names to values
15-
15+
1616
Returns:
1717
Average of numeric values, or 0.0 if no numeric values found
1818
"""
1919
if not metrics:
2020
return 0.0
21-
21+
2222
numeric_values = []
2323
for value in metrics.values():
2424
if isinstance(value, (int, float)):
@@ -30,27 +30,27 @@ def safe_numeric_average(metrics: Dict[str, Any]) -> float:
3030
except (ValueError, TypeError, OverflowError):
3131
# Skip invalid numeric values
3232
continue
33-
33+
3434
if not numeric_values:
3535
return 0.0
36-
36+
3737
return sum(numeric_values) / len(numeric_values)
3838

3939

4040
def safe_numeric_sum(metrics: Dict[str, Any]) -> float:
4141
"""
4242
Calculate the sum of numeric values in a metrics dictionary,
4343
safely ignoring non-numeric values like strings.
44-
44+
4545
Args:
4646
metrics: Dictionary of metric names to values
47-
47+
4848
Returns:
4949
Sum of numeric values, or 0.0 if no numeric values found
5050
"""
5151
if not metrics:
5252
return 0.0
53-
53+
5454
numeric_sum = 0.0
5555
for value in metrics.values():
5656
if isinstance(value, (int, float)):
@@ -62,5 +62,5 @@ def safe_numeric_sum(metrics: Dict[str, Any]) -> float:
6262
except (ValueError, TypeError, OverflowError):
6363
# Skip invalid numeric values
6464
continue
65-
65+
6666
return numeric_sum

0 commit comments

Comments
 (0)