Skip to content

Commit d5adf4b

Browse files
authored
Merge pull request #125 from codeflash-ai/llm-explanations
Better LLM Explanations CF-597
2 parents 2e46088 + 26d7f63 commit d5adf4b

File tree

2 files changed

+102
-4
lines changed

2 files changed

+102
-4
lines changed

codeflash/api/aiservice.py

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -278,6 +278,79 @@ def optimize_python_code_refinement(self, request: list[AIServiceRefinerRequest]
278278
console.rule()
279279
return []
280280

281+
def get_new_explanation( # noqa: D417
282+
self,
283+
source_code: str,
284+
optimized_code: str,
285+
dependency_code: str,
286+
trace_id: str,
287+
original_line_profiler_results: str,
288+
optimized_line_profiler_results: str,
289+
original_code_runtime: str,
290+
optimized_code_runtime: str,
291+
speedup: str,
292+
annotated_tests: str,
293+
optimization_id: str,
294+
original_explanation: str,
295+
) -> str:
296+
"""Optimize the given python code for performance by making a request to the Django endpoint.
297+
298+
Parameters
299+
----------
300+
- source_code (str): The python code to optimize.
301+
- optimized_code (str): The python code generated by the AI service.
302+
- dependency_code (str): The dependency code used as read-only context for the optimization
303+
- original_line_profiler_results: str - line profiler results for the baseline code
304+
- optimized_line_profiler_results: str - line profiler results for the optimized code
305+
- original_code_runtime: str - runtime for the baseline code
306+
- optimized_code_runtime: str - runtime for the optimized code
307+
- speedup: str - speedup of the optimized code
308+
- annotated_tests: str - test functions annotated with runtime
309+
- optimization_id: str - unique id of opt candidate
310+
- original_explanation: str - original_explanation generated for the opt candidate
311+
312+
Returns
313+
-------
314+
- List[OptimizationCandidate]: A list of Optimization Candidates.
315+
316+
"""
317+
payload = {
318+
"trace_id": trace_id,
319+
"source_code": source_code,
320+
"optimized_code": optimized_code,
321+
"original_line_profiler_results": original_line_profiler_results,
322+
"optimized_line_profiler_results": optimized_line_profiler_results,
323+
"original_code_runtime": original_code_runtime,
324+
"optimized_code_runtime": optimized_code_runtime,
325+
"speedup": speedup,
326+
"annotated_tests": annotated_tests,
327+
"optimization_id": optimization_id,
328+
"original_explanation": original_explanation,
329+
"dependency_code": dependency_code,
330+
}
331+
logger.info("Generating explanation")
332+
console.rule()
333+
try:
334+
response = self.make_ai_service_request("/explain", payload=payload, timeout=60)
335+
except requests.exceptions.RequestException as e:
336+
logger.exception(f"Error generating explanations: {e}")
337+
ph("cli-optimize-error-caught", {"error": str(e)})
338+
return ""
339+
340+
if response.status_code == 200:
341+
explanation: str = response.json()["explanation"]
342+
logger.debug(f"New Explanation: {explanation}")
343+
console.rule()
344+
return explanation
345+
try:
346+
error = response.json()["error"]
347+
except Exception:
348+
error = response.text
349+
logger.error(f"Error generating optimized candidates: {response.status_code} - {error}")
350+
ph("cli-optimize-error-response", {"response_status_code": response.status_code, "error": error})
351+
console.rule()
352+
return ""
353+
281354
def log_results( # noqa: D417
282355
self,
283356
function_trace_id: str,

codeflash/optimization/function_optimizer.py

Lines changed: 29 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1100,6 +1100,7 @@ def find_and_process_best_optimization(
11001100
function_to_all_tests,
11011101
exp_type,
11021102
original_helper_code,
1103+
code_context,
11031104
)
11041105
self.log_successful_optimization(explanation, generated_tests, exp_type)
11051106
return best_optimization
@@ -1117,6 +1118,7 @@ def process_review(
11171118
function_to_all_tests: dict[str, set[FunctionCalledInTest]],
11181119
exp_type: str,
11191120
original_helper_code: dict[Path, str],
1121+
code_context: CodeOptimizationContext,
11201122
) -> None:
11211123
coverage_message = (
11221124
original_code_baseline.coverage_results.build_message()
@@ -1137,11 +1139,11 @@ def process_review(
11371139
generated_tests, original_runtime_by_test, optimized_runtime_by_test
11381140
)
11391141

1140-
generated_tests_str = "\n\n".join(
1142+
generated_tests_str = "\n#------------------------------------------------\n".join(
11411143
[test.generated_original_test_source for test in generated_tests.generated_tests]
11421144
)
11431145
if concolic_test_str:
1144-
generated_tests_str += "\n\n" + concolic_test_str
1146+
generated_tests_str += "\n#------------------------------------------------\n" + concolic_test_str
11451147

11461148
existing_tests = existing_tests_source_for(
11471149
self.function_to_optimize.qualified_name_with_modules_from_root(self.project_root),
@@ -1150,11 +1152,34 @@ def process_review(
11501152
original_runtimes_all=original_runtime_by_test,
11511153
optimized_runtimes_all=optimized_runtime_by_test,
11521154
)
1153-
1155+
new_explanation_raw_str = self.aiservice_client.get_new_explanation(
1156+
source_code=code_context.read_writable_code,
1157+
dependency_code=code_context.read_only_context_code,
1158+
trace_id=self.function_trace_id[:-4] + exp_type if self.experiment_id else self.function_trace_id,
1159+
optimized_code=best_optimization.candidate.source_code,
1160+
original_line_profiler_results=original_code_baseline.line_profile_results["str_out"],
1161+
optimized_line_profiler_results=best_optimization.line_profiler_test_results["str_out"],
1162+
original_code_runtime=humanize_runtime(original_code_baseline.runtime),
1163+
optimized_code_runtime=humanize_runtime(best_optimization.runtime),
1164+
speedup=f"{int(performance_gain(original_runtime_ns=original_code_baseline.runtime, optimized_runtime_ns=best_optimization.runtime) * 100)}%",
1165+
annotated_tests=generated_tests_str,
1166+
optimization_id=best_optimization.candidate.optimization_id,
1167+
original_explanation=best_optimization.candidate.explanation,
1168+
)
1169+
new_explanation = Explanation(
1170+
raw_explanation_message=new_explanation_raw_str or explanation.raw_explanation_message,
1171+
winning_behavior_test_results=explanation.winning_behavior_test_results,
1172+
winning_benchmarking_test_results=explanation.winning_benchmarking_test_results,
1173+
original_runtime_ns=explanation.original_runtime_ns,
1174+
best_runtime_ns=explanation.best_runtime_ns,
1175+
function_name=explanation.function_name,
1176+
file_path=explanation.file_path,
1177+
benchmark_details=explanation.benchmark_details,
1178+
)
11541179
data = {
11551180
"original_code": original_code_combined,
11561181
"new_code": new_code_combined,
1157-
"explanation": explanation,
1182+
"explanation": new_explanation,
11581183
"existing_tests_source": existing_tests,
11591184
"generated_original_test_source": generated_tests_str,
11601185
"function_trace_id": self.function_trace_id[:-4] + exp_type

0 commit comments

Comments
 (0)