4242)
4343from codeflash .code_utils .config_consts import (
4444 INDIVIDUAL_TESTCASE_TIMEOUT ,
45- N_CANDIDATES ,
46- N_TESTS_TO_GENERATE ,
4745 REPEAT_OPTIMIZATION_PROBABILITY ,
48- TOTAL_LOOPING_TIME ,
46+ get_n_candidates ,
47+ get_n_tests_to_generate ,
48+ get_total_looping_time ,
4949)
5050from codeflash .code_utils .edit_generated_tests import (
5151 add_runtime_comments_to_generated_tests ,
@@ -227,8 +227,9 @@ def __init__(
227227 self .generate_and_instrument_tests_results : (
228228 tuple [GeneratedTestsList , dict [str , set [FunctionCalledInTest ]], OptimizationSet ] | None
229229 ) = None
230+ n_tests = get_n_tests_to_generate ()
230231 self .executor = concurrent .futures .ThreadPoolExecutor (
231- max_workers = N_TESTS_TO_GENERATE + 2 if self .experiment_id is None else N_TESTS_TO_GENERATE + 3
232+ max_workers = n_tests + 2 if self .experiment_id is None else n_tests + 3
232233 )
233234
234235 def can_be_optimized (self ) -> Result [tuple [bool , CodeOptimizationContext , dict [Path , str ]], str ]:
@@ -278,17 +279,18 @@ def generate_and_instrument_tests(
278279 ]
279280 ]:
280281 """Generate and instrument tests, returning all necessary data for optimization."""
282+ n_tests = get_n_tests_to_generate ()
281283 generated_test_paths = [
282284 get_test_file_path (
283285 self .test_cfg .tests_root , self .function_to_optimize .function_name , test_index , test_type = "unit"
284286 )
285- for test_index in range (N_TESTS_TO_GENERATE )
287+ for test_index in range (n_tests )
286288 ]
287289 generated_perf_test_paths = [
288290 get_test_file_path (
289291 self .test_cfg .tests_root , self .function_to_optimize .function_name , test_index , test_type = "perf"
290292 )
291- for test_index in range (N_TESTS_TO_GENERATE )
293+ for test_index in range (n_tests )
292294 ]
293295
294296 with progress_bar (
@@ -971,7 +973,8 @@ def generate_tests_and_optimizations(
971973 generated_perf_test_paths : list [Path ],
972974 run_experiment : bool = False , # noqa: FBT001, FBT002
973975 ) -> Result [tuple [GeneratedTestsList , dict [str , set [FunctionCalledInTest ]], OptimizationSet ], str ]:
974- assert len (generated_test_paths ) == N_TESTS_TO_GENERATE
976+ n_tests = get_n_tests_to_generate ()
977+ assert len (generated_test_paths ) == n_tests
975978 console .rule ()
976979 # Submit the test generation task as future
977980 future_tests = self .submit_test_generation_tasks (
@@ -981,12 +984,13 @@ def generate_tests_and_optimizations(
981984 generated_test_paths ,
982985 generated_perf_test_paths ,
983986 )
987+ n_candidates = get_n_candidates ()
984988 future_optimization_candidates = self .executor .submit (
985989 self .aiservice_client .optimize_python_code ,
986990 read_writable_code .markdown ,
987991 read_only_context_code ,
988992 self .function_trace_id [:- 4 ] + "EXP0" if run_experiment else self .function_trace_id ,
989- N_CANDIDATES ,
993+ n_candidates ,
990994 ExperimentMetadata (id = self .experiment_id , group = "control" ) if run_experiment else None ,
991995 )
992996 future_candidates_exp = None
@@ -1001,7 +1005,7 @@ def generate_tests_and_optimizations(
10011005 read_writable_code .markdown ,
10021006 read_only_context_code ,
10031007 self .function_trace_id [:- 4 ] + "EXP1" ,
1004- N_CANDIDATES ,
1008+ n_candidates ,
10051009 ExperimentMetadata (id = self .experiment_id , group = "experiment" ),
10061010 )
10071011 futures .append (future_candidates_exp )
@@ -1375,12 +1379,13 @@ def establish_original_code_baseline(
13751379 instrument_codeflash_capture (
13761380 self .function_to_optimize , file_path_to_helper_classes , self .test_cfg .tests_root
13771381 )
1382+ total_looping_time = get_total_looping_time ()
13781383 behavioral_results , coverage_results = self .run_and_parse_tests (
13791384 testing_type = TestingMode .BEHAVIOR ,
13801385 test_env = test_env ,
13811386 test_files = self .test_files ,
13821387 optimization_iteration = 0 ,
1383- testing_time = TOTAL_LOOPING_TIME ,
1388+ testing_time = total_looping_time ,
13841389 enable_coverage = test_framework == "pytest" ,
13851390 code_context = code_context ,
13861391 )
@@ -1407,15 +1412,15 @@ def establish_original_code_baseline(
14071412 test_env = test_env ,
14081413 test_files = self .test_files ,
14091414 optimization_iteration = 0 ,
1410- testing_time = TOTAL_LOOPING_TIME ,
1415+ testing_time = total_looping_time ,
14111416 enable_coverage = False ,
14121417 code_context = code_context ,
14131418 )
14141419 else :
14151420 benchmarking_results = TestResults ()
14161421 start_time : float = time .time ()
14171422 for i in range (100 ):
1418- if i >= 5 and time .time () - start_time >= TOTAL_LOOPING_TIME * 1.5 :
1423+ if i >= 5 and time .time () - start_time >= total_looping_time * 1.5 :
14191424 # * 1.5 to give unittest a bit more time to run
14201425 break
14211426 test_env ["CODEFLASH_LOOP_INDEX" ] = str (i + 1 )
@@ -1424,7 +1429,7 @@ def establish_original_code_baseline(
14241429 test_env = test_env ,
14251430 test_files = self .test_files ,
14261431 optimization_iteration = 0 ,
1427- testing_time = TOTAL_LOOPING_TIME ,
1432+ testing_time = total_looping_time ,
14281433 enable_coverage = False ,
14291434 code_context = code_context ,
14301435 unittest_loop_index = i + 1 ,
@@ -1514,12 +1519,13 @@ def run_optimized_candidate(
15141519 self .function_to_optimize , file_path_to_helper_classes , self .test_cfg .tests_root
15151520 )
15161521
1522+ total_looping_time = get_total_looping_time ()
15171523 candidate_behavior_results , _ = self .run_and_parse_tests (
15181524 testing_type = TestingMode .BEHAVIOR ,
15191525 test_env = test_env ,
15201526 test_files = self .test_files ,
15211527 optimization_iteration = optimization_candidate_index ,
1522- testing_time = TOTAL_LOOPING_TIME ,
1528+ testing_time = total_looping_time ,
15231529 enable_coverage = False ,
15241530 )
15251531 # Remove instrumentation
@@ -1548,7 +1554,7 @@ def run_optimized_candidate(
15481554 test_env = test_env ,
15491555 test_files = self .test_files ,
15501556 optimization_iteration = optimization_candidate_index ,
1551- testing_time = TOTAL_LOOPING_TIME ,
1557+ testing_time = total_looping_time ,
15521558 enable_coverage = False ,
15531559 )
15541560 loop_count = (
@@ -1566,7 +1572,7 @@ def run_optimized_candidate(
15661572 start_time : float = time .time ()
15671573 loop_count = 0
15681574 for i in range (100 ):
1569- if i >= 5 and time .time () - start_time >= TOTAL_LOOPING_TIME * 1.5 :
1575+ if i >= 5 and time .time () - start_time >= get_total_looping_time () * 1.5 :
15701576 # * 1.5 to give unittest a bit more time to run
15711577 break
15721578 test_env ["CODEFLASH_LOOP_INDEX" ] = str (i + 1 )
@@ -1575,7 +1581,7 @@ def run_optimized_candidate(
15751581 test_env = test_env ,
15761582 test_files = self .test_files ,
15771583 optimization_iteration = optimization_candidate_index ,
1578- testing_time = TOTAL_LOOPING_TIME ,
1584+ testing_time = get_total_looping_time () ,
15791585 unittest_loop_index = i + 1 ,
15801586 )
15811587 loop_count = i + 1
@@ -1614,7 +1620,7 @@ def run_and_parse_tests(
16141620 test_env : dict [str , str ],
16151621 test_files : TestFiles ,
16161622 optimization_iteration : int ,
1617- testing_time : float = TOTAL_LOOPING_TIME ,
1623+ testing_time : float = get_total_looping_time () ,
16181624 * ,
16191625 enable_coverage : bool = False ,
16201626 pytest_min_loops : int = 5 ,
@@ -1753,6 +1759,9 @@ def line_profiler_step(
17531759 self , code_context : CodeOptimizationContext , original_helper_code : dict [Path , str ], candidate_index : int
17541760 ) -> dict :
17551761 try :
1762+ logger .info ("Running line profiling to identify performance bottlenecks…" )
1763+ console .rule ()
1764+
17561765 test_env = self .get_test_env (
17571766 codeflash_loop_index = 0 , codeflash_test_iteration = candidate_index , codeflash_tracer_disable = 1
17581767 )
@@ -1762,7 +1771,7 @@ def line_profiler_step(
17621771 test_env = test_env ,
17631772 test_files = self .test_files ,
17641773 optimization_iteration = 0 ,
1765- testing_time = TOTAL_LOOPING_TIME ,
1774+ testing_time = get_total_looping_time () ,
17661775 enable_coverage = False ,
17671776 code_context = code_context ,
17681777 line_profiler_output_file = line_profiler_output_file ,
0 commit comments