@@ -232,7 +232,8 @@ def optimize_function(self) -> Result[BestOptimization, str]:
232232 ):
233233 cleanup_paths (paths_to_cleanup )
234234 return Failure ("The threshold for test coverage was not met." )
235- # request for new optimizations but don't block execution, check for completion later, only adding to control set right now
235+ # request for new optimizations but don't block execution, check for completion later
236+ # adding to control and experiment set but with same traceid
236237 best_optimization = None
237238
238239 for _u , candidates in enumerate ([optimizations_set .control , optimizations_set .experiment ]):
@@ -359,30 +360,36 @@ def determine_best_candidate(
359360 )
360361 console .rule ()
361362 candidates = deque (candidates )
363+ # Start a new thread for AI service request, start loop in main thread
364+ # check if aiservice request is complete, when it is complete, append result to the candidates list
362365 with concurrent .futures .ThreadPoolExecutor () as executor :
363- future_line_profile_results = executor .submit (self .aiservice_client .optimize_python_code_line_profiler ,
364- source_code = code_context .read_writable_code ,
365- dependency_code = code_context .read_only_context_code ,
366- trace_id = self .function_trace_id ,
367- line_profiler_results = original_code_baseline .line_profile_results ['str_out' ],
368- num_candidates = 10 ,
369- experiment_metadata = None )
366+ future_line_profile_results = executor .submit (
367+ self .aiservice_client .optimize_python_code_line_profiler ,
368+ source_code = code_context .read_writable_code ,
369+ dependency_code = code_context .read_only_context_code ,
370+ trace_id = self .function_trace_id ,
371+ line_profiler_results = original_code_baseline .line_profile_results ["str_out" ],
372+ num_candidates = 10 ,
373+ experiment_metadata = None ,
374+ )
370375 try :
371376 candidate_index = 0
372377 done = False
378+ original_len = len (candidates )
373379 while candidates :
374- # for candidate_index, candidate in enumerate(candidates, start=1):
380+ # for candidate_index, candidate in enumerate(candidates, start=1):
375381 done = True if future_line_profile_results is None else future_line_profile_results .done ()
376382 if done and (future_line_profile_results is not None ):
377383 line_profile_results = future_line_profile_results .result ()
378384 candidates .extend (line_profile_results )
379- logger .info (f"Added result from line profiler to candidates: { len (line_profile_results )} " )
385+ original_len += len (candidates )
386+ logger .info (f"Added results from line profiler to candidates, total candidates now: { original_len } " )
380387 future_line_profile_results = None
381388 candidate_index += 1
382389 candidate = candidates .popleft ()
383390 get_run_tmp_file (Path (f"test_return_values_{ candidate_index } .bin" )).unlink (missing_ok = True )
384391 get_run_tmp_file (Path (f"test_return_values_{ candidate_index } .sqlite" )).unlink (missing_ok = True )
385- logger .info (f"Optimization candidate { candidate_index } /{ len ( candidates ) } :" )
392+ logger .info (f"Optimization candidate { candidate_index } /{ original_len } :" )
386393 code_print (candidate .source_code )
387394 try :
388395 did_update = self .replace_function_and_helpers_with_optimized_code (
@@ -397,7 +404,9 @@ def determine_best_candidate(
397404 except (ValueError , SyntaxError , cst .ParserSyntaxError , AttributeError ) as e :
398405 logger .error (e )
399406 self .write_code_and_helpers (
400- self .function_to_optimize_source_code , original_helper_code , self .function_to_optimize .file_path
407+ self .function_to_optimize_source_code ,
408+ original_helper_code ,
409+ self .function_to_optimize .file_path ,
401410 )
402411 continue
403412
@@ -781,7 +790,7 @@ def establish_original_code_baseline(
781790 original_helper_code : dict [Path , str ],
782791 file_path_to_helper_classes : dict [Path , set [str ]],
783792 ) -> Result [tuple [OriginalCodeBaseline , list [str ]], str ]:
784- line_profile_results = {' timings' : {},' unit' : 0 , ' str_out' : '' }
793+ line_profile_results = {" timings" : {}, " unit" : 0 , " str_out" : "" }
785794 # For the original function - run the tests and get the runtime, plus coverage
786795 with progress_bar (f"Establishing original code baseline for { self .function_to_optimize .function_name } " ):
787796 assert (test_framework := self .args .test_framework ) in ["pytest" , "unittest" ]
@@ -826,8 +835,7 @@ def establish_original_code_baseline(
826835 return Failure ("The threshold for test coverage was not met." )
827836 if test_framework == "pytest" :
828837 try :
829- line_profiler_output_file = add_decorator_imports (
830- self .function_to_optimize , code_context )
838+ line_profiler_output_file = add_decorator_imports (self .function_to_optimize , code_context )
831839 line_profile_results , _ = self .run_and_parse_tests (
832840 testing_type = TestingMode .LINE_PROFILE ,
833841 test_env = test_env ,
@@ -843,7 +851,7 @@ def establish_original_code_baseline(
843851 self .write_code_and_helpers (
844852 self .function_to_optimize_source_code , original_helper_code , self .function_to_optimize .file_path
845853 )
846- if line_profile_results [' str_out' ] == '' :
854+ if line_profile_results [" str_out" ] == "" :
847855 logger .warning (
848856 f"Couldn't run line profiler for original function { self .function_to_optimize .function_name } "
849857 )
@@ -1081,7 +1089,7 @@ def run_and_parse_tests(
10811089 pytest_min_loops = 1 ,
10821090 pytest_max_loops = 1 ,
10831091 test_framework = self .test_cfg .test_framework ,
1084- line_profiler_output_file = line_profiler_output_file
1092+ line_profiler_output_file = line_profiler_output_file ,
10851093 )
10861094 elif testing_type == TestingMode .PERFORMANCE :
10871095 result_file_path , run_result = run_benchmarking_tests (
0 commit comments