Skip to content
Merged
Show file tree
Hide file tree
Changes from 18 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 73 additions & 0 deletions codeflash/api/aiservice.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,79 @@ def optimize_python_code_refinement(self, request: list[AIServiceRefinerRequest]
console.rule()
return []

def get_new_explanation( # noqa: D417
self,
source_code: str,
optimized_code: str,
dependency_code: str,
trace_id: str,
original_line_profiler_results: str,
optimized_line_profiler_results: str,
original_code_runtime: str,
optimized_code_runtime: str,
speedup: str,
annotated_tests: str,
optimization_id: str,
original_explanation: str,
) -> str:
"""Optimize the given python code for performance by making a request to the Django endpoint.

Parameters
----------
- source_code (str): The python code to optimize.
- optimized_code (str): The python code generated by the AI service.
- dependency_code (str): The dependency code used as read-only context for the optimization
- original_line_profiler_results: str - line profiler results for the baseline code
- optimized_line_profiler_results: str - line profiler results for the optimized code
- original_code_runtime: str - runtime for the baseline code
- optimized_code_runtime: str - runtime for the optimized code
- speedup: str - speedup of the optimized code
- annotated_tests: str - test functions annotated with runtime
- optimization_id: str - unique id of opt candidate
- original_explanation: str - original_explanation generated for the opt candidate

Returns
-------
- List[OptimizationCandidate]: A list of Optimization Candidates.

"""
payload = {
"trace_id": trace_id,
"source_code": source_code,
"optimized_code": optimized_code,
"original_line_profiler_results": original_line_profiler_results,
"optimized_line_profiler_results": optimized_line_profiler_results,
"original_code_runtime": original_code_runtime,
"optimized_code_runtime": optimized_code_runtime,
"speedup": speedup,
"annotated_tests": annotated_tests,
"optimization_id": optimization_id,
"original_explanation": original_explanation,
"dependency_code": dependency_code,
}
logger.info("Generating explanation")
console.rule()
try:
response = self.make_ai_service_request("/explain", payload=payload, timeout=60)
except requests.exceptions.RequestException as e:
logger.exception(f"Error generating explanations: {e}")
ph("cli-optimize-error-caught", {"error": str(e)})
return ""

if response.status_code == 200:
explanation: str = response.json()["explanation"]
logger.debug(f"New Explanation: {explanation}")
console.rule()
return explanation
try:
error = response.json()["error"]
except Exception:
error = response.text
logger.error(f"Error generating optimized candidates: {response.status_code} - {error}")
ph("cli-optimize-error-response", {"response_status_code": response.status_code, "error": error})
console.rule()
return ""

def log_results( # noqa: D417
self,
function_trace_id: str,
Expand Down
35 changes: 31 additions & 4 deletions codeflash/optimization/function_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1108,7 +1108,7 @@ def find_and_process_best_optimization(
generated_tests = add_runtime_comments_to_generated_tests(
generated_tests, original_runtime_by_test, optimized_runtime_by_test
)
generated_tests_str = "\n\n".join(
generated_tests_str = "\n#------------------------------------------------\n".join(
[test.generated_original_test_source for test in generated_tests.generated_tests]
)
existing_tests = existing_tests_source_for(
Expand All @@ -1119,12 +1119,39 @@ def find_and_process_best_optimization(
optimized_runtimes_all=optimized_runtime_by_test,
)
if concolic_test_str:
generated_tests_str += "\n\n" + concolic_test_str

generated_tests_str += (
"\n#------------------------------------------------\n" + concolic_test_str
)
new_explanation_raw_str = self.aiservice_client.get_new_explanation(
source_code=code_context.read_writable_code,
dependency_code=code_context.read_only_context_code,
trace_id=self.function_trace_id[:-4] + exp_type
if self.experiment_id
else self.function_trace_id,
optimized_code=best_optimization.candidate.source_code,
original_line_profiler_results=original_code_baseline.line_profile_results["str_out"],
optimized_line_profiler_results=best_optimization.line_profiler_test_results["str_out"],
original_code_runtime=humanize_runtime(original_code_baseline.runtime),
optimized_code_runtime=humanize_runtime(best_optimization.runtime),
speedup=f"{int(performance_gain(original_runtime_ns=original_code_baseline.runtime, optimized_runtime_ns=best_optimization.runtime) * 100)}%",
annotated_tests=generated_tests_str,
optimization_id=best_optimization.candidate.optimization_id,
original_explanation=best_optimization.candidate.explanation,
)
new_explanation = Explanation(
raw_explanation_message=new_explanation_raw_str or explanation.raw_explanation_message,
winning_behavior_test_results=explanation.winning_behavior_test_results,
winning_benchmarking_test_results=explanation.winning_benchmarking_test_results,
original_runtime_ns=explanation.original_runtime_ns,
best_runtime_ns=explanation.best_runtime_ns,
function_name=explanation.function_name,
file_path=explanation.file_path,
benchmark_details=explanation.benchmark_details,
)
check_create_pr(
original_code=original_code_combined,
new_code=new_code_combined,
explanation=explanation,
explanation=new_explanation,
existing_tests_source=existing_tests,
generated_original_test_source=generated_tests_str,
function_trace_id=self.function_trace_id[:-4] + exp_type
Expand Down
Loading