3737)
3838from codeflash .code_utils .formatter import format_code , sort_imports
3939from codeflash .code_utils .instrument_existing_tests import inject_profiling_into_existing_test
40+ from codeflash .code_utils .lprof_utils import add_decorator_imports
4041from codeflash .code_utils .remove_generated_tests import remove_functions_from_generated_tests
4142from codeflash .code_utils .static_analysis import get_first_top_level_function_or_method_ast
4243from codeflash .code_utils .time_utils import humanize_runtime
43- from codeflash .code_utils .lprof_utils import add_decorator_imports , prepare_lprofiler_files
4444from codeflash .context import code_context_extractor
4545from codeflash .discovery .functions_to_optimize import FunctionToOptimize
4646from codeflash .either import Failure , Success , is_successful
6565from codeflash .verification .concolic_testing import generate_concolic_tests
6666from codeflash .verification .equivalence import compare_test_results
6767from codeflash .verification .instrument_codeflash_capture import instrument_codeflash_capture
68- from codeflash .verification .parse_test_output import parse_test_results
6968from codeflash .verification .parse_lprof_test_output import parse_lprof_results
69+ from codeflash .verification .parse_test_output import parse_test_results
7070from codeflash .verification .test_results import TestResults , TestType
71- from codeflash .verification .test_runner import run_behavioral_tests , run_benchmarking_tests
71+ from codeflash .verification .test_runner import run_behavioral_tests , run_benchmarking_tests , run_lprof_tests
7272from codeflash .verification .verification_utils import get_test_file_path
7373from codeflash .verification .verifier import generate_tests
7474
7878 from codeflash .either import Result
7979 from codeflash .models .models import CoverageData , FunctionSource , OptimizedCandidate
8080 from codeflash .verification .verification_utils import TestConfig
81- from collections import deque
81+
8282
8383class FunctionOptimizer :
8484 def __init__ (
@@ -209,6 +209,7 @@ def optimize_function(self) -> Result[BestOptimization, str]:
209209 and "." in function_source .qualified_name
210210 ):
211211 file_path_to_helper_classes [function_source .file_path ].add (function_source .qualified_name .split ("." )[0 ])
212+
212213 baseline_result = self .establish_original_code_baseline ( # this needs better typing
213214 code_context = code_context ,
214215 original_helper_code = original_helper_code ,
@@ -232,27 +233,7 @@ def optimize_function(self) -> Result[BestOptimization, str]:
232233 return Failure ("The threshold for test coverage was not met." )
233234
234235 best_optimization = None
235- lprof_generated_results = []
236- logger .info (f"Adding more candidates based on lineprof info, calling ai service" )
237- with concurrent .futures .ThreadPoolExecutor (max_workers = N_TESTS_TO_GENERATE + 2 ) as executor :
238- future_optimization_candidates_lp = executor .submit (self .aiservice_client .optimize_python_code_line_profiler ,
239- source_code = code_context .read_writable_code ,
240- dependency_code = code_context .read_only_context_code ,
241- trace_id = self .function_trace_id ,
242- line_profiler_results = original_code_baseline .lprof_results ,
243- num_candidates = 10 ,
244- experiment_metadata = None )
245- future = [future_optimization_candidates_lp ]
246- concurrent .futures .wait (future )
247- lprof_generated_results = future [0 ].result ()
248- if len (lprof_generated_results )== 0 :
249- logger .info (f"Generated tests with line profiler failed." )
250- else :
251- logger .info (f"Generated tests with line profiler succeeded. Appending to optimization candidates." )
252- logger .info (f"initial optimization candidates: { len (optimizations_set .control )} " )
253- optimizations_set .control .extend (lprof_generated_results )
254- logger .info (f"After adding optimization candidates: { len (optimizations_set .control )} " )
255- #append to optimization candidates
236+
256237 for _u , candidates in enumerate ([optimizations_set .control , optimizations_set .experiment ]):
257238 if candidates is None :
258239 continue
@@ -782,7 +763,7 @@ def establish_original_code_baseline(
782763 with progress_bar (f"Establishing original code baseline for { self .function_to_optimize .function_name } " ):
783764 assert (test_framework := self .args .test_framework ) in ["pytest" , "unittest" ]
784765 success = True
785- lprof_results = ''
766+
786767 test_env = os .environ .copy ()
787768 test_env ["CODEFLASH_TEST_ITERATION" ] = "0"
788769 test_env ["CODEFLASH_TRACER_DISABLE" ] = "1"
@@ -793,7 +774,6 @@ def establish_original_code_baseline(
793774 test_env ["PYTHONPATH" ] += os .pathsep + str (self .args .project_root )
794775
795776 coverage_results = None
796- lprofiler_results = None
797777 # Instrument codeflash capture
798778 try :
799779 instrument_codeflash_capture (
@@ -806,7 +786,6 @@ def establish_original_code_baseline(
806786 optimization_iteration = 0 ,
807787 testing_time = TOTAL_LOOPING_TIME ,
808788 enable_coverage = test_framework == "pytest" ,
809- enable_lprofiler = False ,
810789 code_context = code_context ,
811790 )
812791 finally :
@@ -822,42 +801,30 @@ def establish_original_code_baseline(
822801 return Failure ("Failed to establish a baseline for the original code - bevhavioral tests failed." )
823802 if not coverage_critic (coverage_results , self .args .test_framework ):
824803 return Failure ("The threshold for test coverage was not met." )
825- #Running lprof now
826- try :
827- #add decorator here and import too
828- lprofiler_database_file = prepare_lprofiler_files ("baseline" )
829- #add decorator config to file, need to delete afterwards
830- files_to_instrument = [self .function_to_optimize .file_path ]
831- fns_to_instrument = [self .function_to_optimize .function_name ]
832- for helper_obj in code_context .helper_functions :
833- files_to_instrument .append (helper_obj .file_path )
834- fns_to_instrument .append (helper_obj .qualified_name )
835- add_decorator_imports (files_to_instrument ,fns_to_instrument , lprofiler_database_file )
836- #output doesn't matter, just need to run it
837- lprof_cmd_results , _ = self .run_and_parse_tests (
838- testing_type = TestingMode .BEHAVIOR ,
839- test_env = test_env ,
840- test_files = self .test_files ,
841- optimization_iteration = 0 ,
842- testing_time = TOTAL_LOOPING_TIME ,
843- enable_coverage = False ,
844- enable_lprofiler = test_framework == "pytest" ,
845- code_context = code_context ,
846- lprofiler_database_file = lprofiler_database_file ,
847- )
848- #real magic happens here
849- lprof_results = parse_lprof_results (lprofiler_database_file )
850- except Exception as e :
851- logger .warning (f"Failed to run lprof for { self .function_to_optimize .function_name } . SKIPPING OPTIMIZING THIS FUNCTION." )
852- console .rule ()
853- console .print (f"Failed to run lprof for { self .function_to_optimize .function_name } " )
854- console .rule ()
855- finally :
856- # Remove decorators and lineprof import
857- self .write_code_and_helpers (
858- self .function_to_optimize_source_code , original_helper_code , self .function_to_optimize .file_path
859- )
860804 if test_framework == "pytest" :
805+ try :
806+ lprofiler_database_file = add_decorator_imports (
807+ self .function_to_optimize , code_context )
808+ lprof_results , _ = self .run_and_parse_tests (
809+ testing_type = TestingMode .LPROF ,
810+ test_env = test_env ,
811+ test_files = self .test_files ,
812+ optimization_iteration = 0 ,
813+ testing_time = TOTAL_LOOPING_TIME ,
814+ enable_coverage = False ,
815+ code_context = code_context ,
816+ lprofiler_database_file = lprofiler_database_file ,
817+ )
818+ finally :
819+ # Remove codeflash capture
820+ self .write_code_and_helpers (
821+ self .function_to_optimize_source_code , original_helper_code , self .function_to_optimize .file_path
822+ )
823+ if not lprof_results :
824+ logger .warning (
825+ f"Couldn't run line profiler for original function { self .function_to_optimize .function_name } "
826+ )
827+ console .rule ()
861828 benchmarking_results , _ = self .run_and_parse_tests (
862829 testing_type = TestingMode .PERFORMANCE ,
863830 test_env = test_env ,
@@ -867,6 +834,7 @@ def establish_original_code_baseline(
867834 enable_coverage = False ,
868835 code_context = code_context ,
869836 )
837+
870838 else :
871839 benchmarking_results = TestResults ()
872840 start_time : float = time .time ()
@@ -928,7 +896,7 @@ def establish_original_code_baseline(
928896 benchmarking_test_results = benchmarking_results ,
929897 runtime = total_timing ,
930898 coverage_results = coverage_results ,
931- lprof_results = lprof_results ,
899+ lprofiler_test_results = lprof_results ,
932900 ),
933901 functions_to_remove ,
934902 )
@@ -1060,12 +1028,11 @@ def run_and_parse_tests(
10601028 testing_time : float = TOTAL_LOOPING_TIME ,
10611029 * ,
10621030 enable_coverage : bool = False ,
1063- enable_lprofiler : bool = False ,
10641031 pytest_min_loops : int = 5 ,
10651032 pytest_max_loops : int = 100_000 ,
10661033 code_context : CodeOptimizationContext | None = None ,
10671034 unittest_loop_index : int | None = None ,
1068- lprofiler_database_file : str | None = None ,
1035+ lprofiler_database_file : Path | None = None ,
10691036 ) -> tuple [TestResults , CoverageData | None ]:
10701037 coverage_database_file = None
10711038 coverage_config_file = None
@@ -1079,7 +1046,19 @@ def run_and_parse_tests(
10791046 pytest_timeout = INDIVIDUAL_TESTCASE_TIMEOUT ,
10801047 verbose = True ,
10811048 enable_coverage = enable_coverage ,
1082- enable_lprofiler = enable_lprofiler ,
1049+ )
1050+ elif testing_type == TestingMode .LPROF :
1051+ result_file_path , run_result = run_lprof_tests (
1052+ test_files ,
1053+ cwd = self .project_root ,
1054+ test_env = test_env ,
1055+ pytest_cmd = self .test_cfg .pytest_cmd ,
1056+ pytest_timeout = INDIVIDUAL_TESTCASE_TIMEOUT ,
1057+ pytest_target_runtime_seconds = testing_time ,
1058+ pytest_min_loops = pytest_min_loops ,
1059+ pytest_max_loops = pytest_max_loops ,
1060+ test_framework = self .test_cfg .test_framework ,
1061+ lprofiler_database_file = lprofiler_database_file
10831062 )
10841063 elif testing_type == TestingMode .PERFORMANCE :
10851064 result_file_path , run_result = run_benchmarking_tests (
@@ -1108,7 +1087,7 @@ def run_and_parse_tests(
11081087 f"stdout: { run_result .stdout } \n "
11091088 f"stderr: { run_result .stderr } \n "
11101089 )
1111- if not enable_lprofiler :
1090+ if testing_type in [ TestingMode . BEHAVIOR , TestingMode . PERFORMANCE ] :
11121091 results , coverage_results = parse_test_results (
11131092 test_xml_path = result_file_path ,
11141093 test_files = test_files ,
@@ -1122,11 +1101,9 @@ def run_and_parse_tests(
11221101 coverage_database_file = coverage_database_file ,
11231102 coverage_config_file = coverage_config_file ,
11241103 )
1125- return results , coverage_results
11261104 else :
1127- #maintaining the function signature for the lprofiler
1128- return TestResults (), None
1129-
1105+ results , coverage_results = parse_lprof_results (lprofiler_database_file = lprofiler_database_file )
1106+ return results , coverage_results
11301107
11311108 def generate_and_instrument_tests (
11321109 self ,
0 commit comments