44
55import asyncio
66import json
7- import os
87import subprocess
98import tempfile
10- import time
119from pathlib import Path
12- from typing import Dict , Any , List
10+ from openevolve . evaluation_result import EvaluationResult
1311
14- import numpy as np
1512
16- from openevolve .evaluation_result import EvaluationResult
13+ def evaluate (program_path : str ) -> EvaluationResult :
14+ return asyncio .run (_evaluate (program_path ))
1715
1816
19- async def evaluate (program_path : str ) -> EvaluationResult :
17+ async def _evaluate (program_path : str ) -> EvaluationResult :
2018 """
2119 Evaluate a Rust sorting algorithm implementation.
2220
@@ -41,7 +39,10 @@ async def evaluate(program_path: str) -> EvaluationResult:
4139 if result .returncode != 0 :
4240 return EvaluationResult (
4341 metrics = {"score" : 0.0 , "compile_success" : 0.0 },
44- artifacts = {"error" : "Failed to create Cargo project" , "stderr" : result .stderr },
42+ artifacts = {
43+ "error" : "Failed to create Cargo project" ,
44+ "stderr" : result .stderr ,
45+ },
4546 )
4647
4748 # Copy the program to src/lib.rs
@@ -305,7 +306,7 @@ async def evaluate(program_path: str) -> EvaluationResult:
305306 import sys
306307
307308 if len (sys .argv ) > 1 :
308- result = asyncio . run ( evaluate (sys .argv [1 ]) )
309+ result = evaluate (sys .argv [1 ])
309310 print (f"Score: { result .metrics ['score' ]:.4f} " )
310311 print (f"Correctness: { result .metrics ['correctness' ]:.4f} " )
311312 print (f"Performance: { result .metrics ['performance_score' ]:.4f} " )
0 commit comments