Skip to content

Commit 4247780

Browse files
committed
working demo of new opt candidates with lineprof info
1 parent 51b8e27 commit 4247780

File tree

4 files changed

+117
-20
lines changed

4 files changed

+117
-20
lines changed

codeflash/api/aiservice.py

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,72 @@ def optimize_python_code(
135135
console.rule()
136136
return []
137137

138+
def optimize_python_code_line_profiler(
139+
self,
140+
source_code: str,
141+
dependency_code: str,
142+
trace_id: str,
143+
line_profiler_results: str,
144+
num_candidates: int = 10,
145+
experiment_metadata: ExperimentMetadata | None = None,
146+
) -> list[OptimizedCandidate]:
147+
"""Optimize the given python code for performance by making a request to the Django endpoint.
148+
149+
Parameters
150+
----------
151+
- source_code (str): The python code to optimize.
152+
- dependency_code (str): The dependency code used as read-only context for the optimization
153+
- trace_id (str): Trace id of optimization run
154+
- num_candidates (int): Number of optimization variants to generate. Default is 10.
155+
- experiment_metadata (Optional[ExperimentalMetadata, None]): Any available experiment metadata for this optimization
156+
157+
Returns
158+
-------
159+
- List[OptimizationCandidate]: A list of Optimization Candidates.
160+
161+
"""
162+
payload = {
163+
"source_code": source_code,
164+
"dependency_code": dependency_code,
165+
"num_variants": num_candidates,
166+
"line_profiler_results": line_profiler_results,
167+
"trace_id": trace_id,
168+
"python_version": platform.python_version(),
169+
"experiment_metadata": experiment_metadata,
170+
"codeflash_version": codeflash_version,
171+
}
172+
173+
logger.info("Generating optimized candidates…")
174+
console.rule()
175+
try:
176+
response = self.make_ai_service_request("/optimize-line-profiler", payload=payload, timeout=600)
177+
except requests.exceptions.RequestException as e:
178+
logger.exception(f"Error generating optimized candidates: {e}")
179+
ph("cli-optimize-error-caught", {"error": str(e)})
180+
return []
181+
182+
if response.status_code == 200:
183+
optimizations_json = response.json()["optimizations"]
184+
logger.info(f"Generated {len(optimizations_json)} candidates.")
185+
console.rule()
186+
return [
187+
OptimizedCandidate(
188+
source_code=opt["source_code"],
189+
explanation=opt["explanation"],
190+
optimization_id=opt["optimization_id"],
191+
)
192+
for opt in optimizations_json
193+
]
194+
try:
195+
error = response.json()["error"]
196+
except Exception:
197+
error = response.text
198+
logger.error(f"Error generating optimized candidates: {response.status_code} - {error}")
199+
ph("cli-optimize-error-response", {"response_status_code": response.status_code, "error": error})
200+
console.rule()
201+
return []
202+
203+
138204
def log_results(
139205
self,
140206
function_trace_id: str,

codeflash/models/models.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,7 @@ class OriginalCodeBaseline(BaseModel):
218218
benchmarking_test_results: TestResults
219219
runtime: int
220220
coverage_results: Optional[CoverageData]
221+
lprof_results: str
221222

222223

223224
class CoverageStatus(Enum):

codeflash/optimization/function_optimizer.py

Lines changed: 48 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
from codeflash.api.aiservice import AiServiceClient, LocalAiServiceClient
2222
from codeflash.cli_cmds.console import code_print, console, logger, progress_bar
2323
from codeflash.code_utils import env_utils
24-
from codeflash.code_utils.code_extractor import add_needed_imports_from_module, extract_code
2524
from codeflash.code_utils.code_replacer import replace_function_definitions_in_module, add_decorator_imports
2625
from codeflash.code_utils.code_utils import (
2726
cleanup_paths,
@@ -208,7 +207,7 @@ def optimize_function(self) -> Result[BestOptimization, str]:
208207
and "." in function_source.qualified_name
209208
):
210209
file_path_to_helper_classes[function_source.file_path].add(function_source.qualified_name.split(".")[0])
211-
210+
pass
212211
baseline_result = self.establish_original_code_baseline( # this needs better typing
213212
code_context=code_context,
214213
original_helper_code=original_helper_code,
@@ -232,7 +231,29 @@ def optimize_function(self) -> Result[BestOptimization, str]:
232231
return Failure("The threshold for test coverage was not met.")
233232

234233
best_optimization = None
235-
234+
logger.info(f"Adding more candidates based on lineprof info, calling ai service")
235+
with progress_bar(
236+
f"Generating new optimizations for function {self.function_to_optimize.function_name} with line profiler information",
237+
transient=True,
238+
):
239+
pass
240+
lprof_generated_results = self.aiservice_client.optimize_python_code_line_profiler(
241+
242+
source_code=code_context.read_writable_code,
243+
dependency_code=code_context.read_only_context_code,
244+
trace_id=self.function_trace_id,
245+
line_profiler_results=original_code_baseline.lprof_results,
246+
num_candidates = 10,
247+
experiment_metadata = None)
248+
249+
if len(lprof_generated_results)==0:
250+
logger.info(f"Generated tests with line profiler failed.")
251+
else:
252+
logger.info(f"Generated tests with line profiler succeeded. Appending to optimization candidates.")
253+
print("initial optimization candidates",len(optimizations_set.control))
254+
optimizations_set.control.extend(lprof_generated_results)
255+
print("after adding optimization candidates",len(optimizations_set.control))
256+
#append to optimization candidates
236257
for _u, candidates in enumerate([optimizations_set.control, optimizations_set.experiment]):
237258
if candidates is None:
238259
continue
@@ -813,7 +834,7 @@ def establish_original_code_baseline(
813834
files_to_instrument.append(helper_obj.file_path)
814835
fns_to_instrument.append(helper_obj.qualified_name)
815836
add_decorator_imports(files_to_instrument,fns_to_instrument, lprofiler_database_file)
816-
behavioral_results, coverage_results = self.run_and_parse_tests(
837+
lprof_results, _ = self.run_and_parse_tests(
817838
testing_type=TestingMode.BEHAVIOR,
818839
test_env=test_env,
819840
test_files=self.test_files,
@@ -822,7 +843,9 @@ def establish_original_code_baseline(
822843
enable_coverage=False,
823844
enable_lprofiler=test_framework == "pytest",
824845
code_context=code_context,
825-
)
846+
lprofiler_database_file=lprofiler_database_file,
847+
)
848+
pass
826849
except Exception as e:
827850
logger.warning(f"Failed to run lprof for {self.function_to_optimize.function_name}. SKIPPING OPTIMIZING THIS FUNCTION.")
828851
console.rule()
@@ -905,6 +928,7 @@ def establish_original_code_baseline(
905928
benchmarking_test_results=benchmarking_results,
906929
runtime=total_timing,
907930
coverage_results=coverage_results,
931+
lprof_results=lprof_results,
908932
),
909933
functions_to_remove,
910934
)
@@ -1041,6 +1065,7 @@ def run_and_parse_tests(
10411065
pytest_max_loops: int = 100_000,
10421066
code_context: CodeOptimizationContext | None = None,
10431067
unittest_loop_index: int | None = None,
1068+
lprofiler_database_file: str | None = None,
10441069
) -> tuple[TestResults, CoverageData | None]:
10451070
coverage_database_file = None
10461071
coverage_config_file = None
@@ -1083,20 +1108,24 @@ def run_and_parse_tests(
10831108
f"stdout: {run_result.stdout}\n"
10841109
f"stderr: {run_result.stderr}\n"
10851110
)
1086-
1087-
results, coverage_results = parse_test_results(
1088-
test_xml_path=result_file_path,
1089-
test_files=test_files,
1090-
test_config=self.test_cfg,
1091-
optimization_iteration=optimization_iteration,
1092-
run_result=run_result,
1093-
unittest_loop_index=unittest_loop_index,
1094-
function_name=self.function_to_optimize.function_name,
1095-
source_file=self.function_to_optimize.file_path,
1096-
code_context=code_context,
1097-
coverage_database_file=coverage_database_file,
1098-
coverage_config_file=coverage_config_file,
1099-
)
1111+
if not enable_lprofiler:
1112+
results, coverage_results = parse_test_results(
1113+
test_xml_path=result_file_path,
1114+
test_files=test_files,
1115+
test_config=self.test_cfg,
1116+
optimization_iteration=optimization_iteration,
1117+
run_result=run_result,
1118+
unittest_loop_index=unittest_loop_index,
1119+
function_name=self.function_to_optimize.function_name,
1120+
source_file=self.function_to_optimize.file_path,
1121+
code_context=code_context,
1122+
coverage_database_file=coverage_database_file,
1123+
coverage_config_file=coverage_config_file,
1124+
)
1125+
else:
1126+
pass
1127+
file_contents = Path(str(lprofiler_database_file)+".txt").read_text("utf-8")
1128+
return file_contents, None
11001129
return results, coverage_results
11011130

11021131
def generate_and_instrument_tests(

codeflash/verification/test_runner.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,8 @@ def run_behavioral_tests(
131131
else:
132132
msg = f"Unsupported test framework: {test_framework}"
133133
raise ValueError(msg)
134-
134+
if enable_lprofiler:
135+
pass
135136
return result_file_path, results, coverage_database_file if enable_coverage else None, coverage_config_file if enable_coverage else None
136137

137138

0 commit comments

Comments
 (0)