4343)
4444from codeflash .code_utils .config_consts import (
4545 INDIVIDUAL_TESTCASE_TIMEOUT ,
46+ N_CANDIDATES_EFFECTIVE ,
47+ N_CANDIDATES_LP_EFFECTIVE ,
48+ N_TESTS_TO_GENERATE_EFFECTIVE ,
4649 REPEAT_OPTIMIZATION_PROBABILITY ,
47- get_n_candidates ,
48- get_n_candidates_lp ,
49- get_n_tests_to_generate ,
50- get_total_looping_time ,
50+ TOTAL_LOOPING_TIME_EFFECTIVE ,
5151)
5252from codeflash .code_utils .deduplicate_code import normalize_code
5353from codeflash .code_utils .edit_generated_tests import (
@@ -232,7 +232,7 @@ def __init__(
232232 self .generate_and_instrument_tests_results : (
233233 tuple [GeneratedTestsList , dict [str , set [FunctionCalledInTest ]], OptimizationSet ] | None
234234 ) = None
235- n_tests = get_n_tests_to_generate ()
235+ n_tests = N_TESTS_TO_GENERATE_EFFECTIVE
236236 self .executor = concurrent .futures .ThreadPoolExecutor (
237237 max_workers = n_tests + 2 if self .experiment_id is None else n_tests + 3
238238 )
@@ -284,7 +284,7 @@ def generate_and_instrument_tests(
284284 ]
285285 ]:
286286 """Generate and instrument tests, returning all necessary data for optimization."""
287- n_tests = get_n_tests_to_generate ()
287+ n_tests = N_TESTS_TO_GENERATE_EFFECTIVE
288288 generated_test_paths = [
289289 get_test_file_path (
290290 self .test_cfg .tests_root , self .function_to_optimize .function_name , test_index , test_type = "unit"
@@ -477,7 +477,7 @@ def determine_best_candidate(
477477 dependency_code = code_context .read_only_context_code ,
478478 trace_id = self .function_trace_id [:- 4 ] + exp_type if self .experiment_id else self .function_trace_id ,
479479 line_profiler_results = original_code_baseline .line_profile_results ["str_out" ],
480- num_candidates = get_n_candidates_lp () ,
480+ num_candidates = N_CANDIDATES_LP_EFFECTIVE ,
481481 experiment_metadata = ExperimentMetadata (
482482 id = self .experiment_id , group = "control" if exp_type == "EXP0" else "experiment"
483483 )
@@ -1013,7 +1013,7 @@ def generate_tests_and_optimizations(
10131013 generated_perf_test_paths : list [Path ],
10141014 run_experiment : bool = False , # noqa: FBT001, FBT002
10151015 ) -> Result [tuple [GeneratedTestsList , dict [str , set [FunctionCalledInTest ]], OptimizationSet ], str ]:
1016- n_tests = get_n_tests_to_generate ()
1016+ n_tests = N_TESTS_TO_GENERATE_EFFECTIVE
10171017 assert len (generated_test_paths ) == n_tests
10181018 console .rule ()
10191019 # Submit the test generation task as future
@@ -1024,7 +1024,7 @@ def generate_tests_and_optimizations(
10241024 generated_test_paths ,
10251025 generated_perf_test_paths ,
10261026 )
1027- n_candidates = get_n_candidates ()
1027+ n_candidates = N_CANDIDATES_EFFECTIVE
10281028 future_optimization_candidates = self .executor .submit (
10291029 self .aiservice_client .optimize_python_code ,
10301030 read_writable_code .markdown ,
@@ -1421,7 +1421,7 @@ def establish_original_code_baseline(
14211421 instrument_codeflash_capture (
14221422 self .function_to_optimize , file_path_to_helper_classes , self .test_cfg .tests_root
14231423 )
1424- total_looping_time = get_total_looping_time ()
1424+ total_looping_time = TOTAL_LOOPING_TIME_EFFECTIVE
14251425 behavioral_results , coverage_results = self .run_and_parse_tests (
14261426 testing_type = TestingMode .BEHAVIOR ,
14271427 test_env = test_env ,
@@ -1561,7 +1561,7 @@ def run_optimized_candidate(
15611561 self .function_to_optimize , file_path_to_helper_classes , self .test_cfg .tests_root
15621562 )
15631563
1564- total_looping_time = get_total_looping_time ()
1564+ total_looping_time = TOTAL_LOOPING_TIME_EFFECTIVE
15651565 candidate_behavior_results , _ = self .run_and_parse_tests (
15661566 testing_type = TestingMode .BEHAVIOR ,
15671567 test_env = test_env ,
@@ -1614,7 +1614,7 @@ def run_optimized_candidate(
16141614 start_time : float = time .time ()
16151615 loop_count = 0
16161616 for i in range (100 ):
1617- if i >= 5 and time .time () - start_time >= get_total_looping_time () * 1.5 :
1617+ if i >= 5 and time .time () - start_time >= TOTAL_LOOPING_TIME_EFFECTIVE * 1.5 :
16181618 # * 1.5 to give unittest a bit more time to run
16191619 break
16201620 test_env ["CODEFLASH_LOOP_INDEX" ] = str (i + 1 )
@@ -1623,7 +1623,7 @@ def run_optimized_candidate(
16231623 test_env = test_env ,
16241624 test_files = self .test_files ,
16251625 optimization_iteration = optimization_candidate_index ,
1626- testing_time = get_total_looping_time () ,
1626+ testing_time = TOTAL_LOOPING_TIME_EFFECTIVE ,
16271627 unittest_loop_index = i + 1 ,
16281628 )
16291629 loop_count = i + 1
@@ -1662,7 +1662,7 @@ def run_and_parse_tests(
16621662 test_env : dict [str , str ],
16631663 test_files : TestFiles ,
16641664 optimization_iteration : int ,
1665- testing_time : float = get_total_looping_time () ,
1665+ testing_time : float = TOTAL_LOOPING_TIME_EFFECTIVE ,
16661666 * ,
16671667 enable_coverage : bool = False ,
16681668 pytest_min_loops : int = 5 ,
@@ -1813,7 +1813,7 @@ def line_profiler_step(
18131813 test_env = test_env ,
18141814 test_files = self .test_files ,
18151815 optimization_iteration = 0 ,
1816- testing_time = get_total_looping_time () ,
1816+ testing_time = TOTAL_LOOPING_TIME_EFFECTIVE ,
18171817 enable_coverage = False ,
18181818 code_context = code_context ,
18191819 line_profiler_output_file = line_profiler_output_file ,
0 commit comments