88import time
99import subprocess
1010import requests
11- from typing import Optional
11+ import socket
12+ from typing import Optional , Tuple
1213from openai import OpenAI
1314from openevolve .config import Config , LLMModelConfig
1415
1718DEFAULT_PORT = 8000
1819DEFAULT_BASE_URL = f"http://localhost:{ DEFAULT_PORT } /v1"
1920
21+ def find_free_port (start_port : int = 8000 , max_tries : int = 100 ) -> int :
22+ """Find a free port starting from start_port"""
23+ for port in range (start_port , start_port + max_tries ):
24+ sock = socket .socket (socket .AF_INET , socket .SOCK_STREAM )
25+ try :
26+ sock .bind (('localhost' , port ))
27+ sock .close ()
28+ return port
29+ except OSError :
30+ continue
31+ finally :
32+ sock .close ()
33+ raise RuntimeError (f"Could not find free port in range { start_port } -{ start_port + max_tries } " )
34+
2035def setup_test_env ():
2136 """Set up test environment with local inference"""
2237 os .environ ["OPTILLM_API_KEY" ] = "optillm"
@@ -26,42 +41,62 @@ def get_test_client(base_url: str = DEFAULT_BASE_URL) -> OpenAI:
2641 """Get OpenAI client configured for local optillm"""
2742 return OpenAI (api_key = "optillm" , base_url = base_url )
2843
29- def start_test_server (model : str = TEST_MODEL , port : int = DEFAULT_PORT ) -> subprocess .Popen :
44+ def start_test_server (model : str = TEST_MODEL , port : Optional [ int ] = None ) -> Tuple [ subprocess .Popen , int ] :
3045 """
3146 Start optillm server for testing
32- Returns the process handle
47+ Returns tuple of (process_handle, actual_port_used)
3348 """
49+ if port is None :
50+ port = find_free_port ()
51+
3452 # Set environment for local inference
3553 env = os .environ .copy ()
3654 env ["OPTILLM_API_KEY" ] = "optillm"
3755
56+ print (f"Starting optillm server on port { port } ..." )
57+
3858 # Start server
3959 proc = subprocess .Popen ([
4060 "optillm" ,
4161 "--model" , model ,
4262 "--port" , str (port )
43- ], env = env , stdout = subprocess .PIPE , stderr = subprocess .PIPE )
63+ ], env = env , stdout = subprocess .PIPE , stderr = subprocess .PIPE , text = True )
4464
4565 # Wait for server to start
46- for _ in range (30 ):
66+ for i in range (30 ):
4767 try :
4868 response = requests .get (f"http://localhost:{ port } /health" , timeout = 2 )
4969 if response .status_code == 200 :
50- break
51- except :
70+ print (f"✅ optillm server started successfully on port { port } " )
71+ return proc , port
72+ except Exception as e :
73+ if i < 5 : # Only print for first few attempts to avoid spam
74+ print (f"Attempt { i + 1 } : Waiting for server... ({ e } )" )
5275 pass
5376 time .sleep (1 )
54- else :
55- # Server didn't start in time
56- try :
57- proc .terminate ()
58- proc .wait (timeout = 5 )
59- except subprocess .TimeoutExpired :
60- proc .kill ()
61- proc .wait ()
62- raise RuntimeError (f"optillm server failed to start on port { port } " )
6377
64- return proc
78+ # Server didn't start in time - collect error info
79+ try :
80+ stdout , stderr = proc .communicate (timeout = 2 )
81+ error_msg = f"optillm server failed to start on port { port } "
82+ if stdout :
83+ error_msg += f"\n STDOUT: { stdout [:500 ]} "
84+ if stderr :
85+ error_msg += f"\n STDERR: { stderr [:500 ]} "
86+ except subprocess .TimeoutExpired :
87+ proc .kill ()
88+ stdout , stderr = proc .communicate ()
89+ error_msg = f"optillm server failed to start on port { port } (timeout)"
90+
91+ # Clean up
92+ try :
93+ proc .terminate ()
94+ proc .wait (timeout = 5 )
95+ except subprocess .TimeoutExpired :
96+ proc .kill ()
97+ proc .wait ()
98+
99+ raise RuntimeError (error_msg )
65100
66101def stop_test_server (proc : subprocess .Popen ):
67102 """Stop the test server"""
0 commit comments