Skip to content

Commit 67bb780

Browse files
committed
fix evaluator
1 parent 897cb78 commit 67bb780

File tree

1 file changed

+9
-8
lines changed

1 file changed

+9
-8
lines changed

examples/rust_adaptive_sort/evaluator.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,19 +4,17 @@
44

55
import asyncio
66
import json
7-
import os
87
import subprocess
98
import tempfile
10-
import time
119
from pathlib import Path
12-
from typing import Dict, Any, List
10+
from openevolve.evaluation_result import EvaluationResult
1311

14-
import numpy as np
1512

16-
from openevolve.evaluation_result import EvaluationResult
13+
def evaluate(program_path: str) -> EvaluationResult:
14+
return asyncio.run(_evaluate(program_path))
1715

1816

19-
async def evaluate(program_path: str) -> EvaluationResult:
17+
async def _evaluate(program_path: str) -> EvaluationResult:
2018
"""
2119
Evaluate a Rust sorting algorithm implementation.
2220
@@ -41,7 +39,10 @@ async def evaluate(program_path: str) -> EvaluationResult:
4139
if result.returncode != 0:
4240
return EvaluationResult(
4341
metrics={"score": 0.0, "compile_success": 0.0},
44-
artifacts={"error": "Failed to create Cargo project", "stderr": result.stderr},
42+
artifacts={
43+
"error": "Failed to create Cargo project",
44+
"stderr": result.stderr,
45+
},
4546
)
4647

4748
# Copy the program to src/lib.rs
@@ -305,7 +306,7 @@ async def evaluate(program_path: str) -> EvaluationResult:
305306
import sys
306307

307308
if len(sys.argv) > 1:
308-
result = asyncio.run(evaluate(sys.argv[1]))
309+
result = evaluate(sys.argv[1])
309310
print(f"Score: {result.metrics['score']:.4f}")
310311
print(f"Correctness: {result.metrics['correctness']:.4f}")
311312
print(f"Performance: {result.metrics['performance_score']:.4f}")

0 commit comments

Comments
 (0)