Skip to content

Commit 1b6e046

Browse files
committed
have the client manage the TPE
1 parent 5a122a9 commit 1b6e046

File tree

2 files changed

+7
-5
lines changed

2 files changed

+7
-5
lines changed

codeflash/api/aiservice.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,9 +35,6 @@
3535
from codeflash.models.models import AIServiceCodeRepairRequest, AIServiceRefinerRequest
3636
from codeflash.result.explanation import Explanation
3737

38-
multi_model_executor = concurrent.futures.ThreadPoolExecutor(max_workers=10, thread_name_prefix="multi_model")
39-
40-
4138
class AiServiceClient:
4239
def __init__(self) -> None:
4340
self.base_url = self.get_aiservice_base_url()
@@ -251,6 +248,7 @@ def optimize_python_code_multi_model(
251248
*,
252249
is_async: bool = False,
253250
sequence_offset: int = 0,
251+
executor: concurrent.futures.ThreadPoolExecutor | None = None,
254252
) -> tuple[list[OptimizedCandidate], int]:
255253
"""Generate optimizations using multiple models in parallel."""
256254
logger.info("Generating optimized candidates…")
@@ -264,7 +262,7 @@ def optimize_python_code_multi_model(
264262
call_trace_id = f"{base_trace_id[:-3]}0{call_index:02x}"
265263
call_sequence = sequence_offset + call_index + 1
266264
call_index += 1
267-
future = multi_model_executor.submit(
265+
future = executor.submit(
268266
self.optimize_python_code,
269267
source_code,
270268
dependency_code,
@@ -299,6 +297,7 @@ def optimize_python_code_line_profiler_multi_model(
299297
model_distribution: list[tuple[str, int]],
300298
experiment_metadata: ExperimentMetadata | None = None,
301299
sequence_offset: int = 0,
300+
executor: concurrent.futures.ThreadPoolExecutor | None = None,
302301
) -> tuple[list[OptimizedCandidate], int]:
303302
"""Generate line profiler optimizations using multiple models in parallel."""
304303
logger.info("Generating optimized candidates with line profiler…")
@@ -312,7 +311,7 @@ def optimize_python_code_line_profiler_multi_model(
312311
call_trace_id = f"{base_trace_id[:-3]}1{call_index:02x}"
313312
call_sequence = sequence_offset + call_index + 1
314313
call_index += 1
315-
future = multi_model_executor.submit(
314+
future = executor.submit(
316315
self.optimize_python_code_line_profiler,
317316
source_code,
318317
dependency_code,

codeflash/optimization/function_optimizer.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -961,6 +961,7 @@ def determine_best_candidate(
961961
if self.experiment_id
962962
else None,
963963
sequence_offset=self.optimize_calls_count,
964+
executor=self.executor,
964965
)
965966

966967
processor = CandidateProcessor(
@@ -1395,6 +1396,7 @@ def generate_optimizations(
13951396
ExperimentMetadata(id=self.experiment_id, group="control") if run_experiment else None,
13961397
is_async=self.function_to_optimize.is_async,
13971398
sequence_offset=N_TESTS_TO_GENERATE_EFFECTIVE,
1399+
executor=self.executor,
13981400
)
13991401

14001402
future_references = self.executor.submit(
@@ -1419,6 +1421,7 @@ def generate_optimizations(
14191421
ExperimentMetadata(id=self.experiment_id, group="experiment"),
14201422
is_async=self.function_to_optimize.is_async,
14211423
sequence_offset=N_TESTS_TO_GENERATE_EFFECTIVE,
1424+
executor=self.executor,
14221425
)
14231426
futures.append(future_candidates_exp)
14241427

0 commit comments

Comments
 (0)