Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 1 addition & 67 deletions codeflash/lsp/beta.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,10 @@
from codeflash.discovery.functions_to_optimize import filter_functions, get_functions_within_git_diff
from codeflash.either import is_successful
from codeflash.lsp.server import CodeflashLanguageServer, CodeflashLanguageServerProtocol
from codeflash.result.explanation import Explanation

if TYPE_CHECKING:
from lsprotocol import types

from codeflash.models.models import GeneratedTestsList, OptimizationSet


@dataclass
class OptimizableFunctionsParams:
Expand Down Expand Up @@ -179,67 +176,6 @@ def provide_api_key(server: CodeflashLanguageServer, params: ProvideApiKeyParams
return {"status": "error", "message": "something went wrong while saving the api key"}


@server.feature("prepareOptimization")
def prepare_optimization(server: CodeflashLanguageServer, params: FunctionOptimizationParams) -> dict[str, str]:
current_function = server.optimizer.current_function_being_optimized

module_prep_result = server.optimizer.prepare_module_for_optimization(current_function.file_path)
validated_original_code, original_module_ast = module_prep_result

function_optimizer = server.optimizer.create_function_optimizer(
current_function,
function_to_optimize_source_code=validated_original_code[current_function.file_path].source_code,
original_module_ast=original_module_ast,
original_module_path=current_function.file_path,
)

server.optimizer.current_function_optimizer = function_optimizer
if not function_optimizer:
return {"functionName": params.functionName, "status": "error", "message": "No function optimizer found"}

initialization_result = function_optimizer.can_be_optimized()
if not is_successful(initialization_result):
return {"functionName": params.functionName, "status": "error", "message": initialization_result.failure()}

return {"functionName": params.functionName, "status": "success", "message": "Optimization preparation completed"}


@server.feature("generateTests")
def generate_tests(server: CodeflashLanguageServer, params: FunctionOptimizationParams) -> dict[str, str]:
function_optimizer = server.optimizer.current_function_optimizer
if not function_optimizer:
return {"functionName": params.functionName, "status": "error", "message": "No function optimizer found"}

initialization_result = function_optimizer.can_be_optimized()
if not is_successful(initialization_result):
return {"functionName": params.functionName, "status": "error", "message": initialization_result.failure()}

should_run_experiment, code_context, original_helper_code = initialization_result.unwrap()

test_setup_result = function_optimizer.generate_and_instrument_tests(
code_context, should_run_experiment=should_run_experiment
)
if not is_successful(test_setup_result):
return {"functionName": params.functionName, "status": "error", "message": test_setup_result.failure()}
generated_tests_list: GeneratedTestsList
optimizations_set: OptimizationSet
generated_tests_list, _, concolic__test_str, optimizations_set = test_setup_result.unwrap()

generated_tests: list[str] = [
generated_test.generated_original_test_source for generated_test in generated_tests_list.generated_tests
]
optimizations_dict = {
candidate.optimization_id: {"source_code": candidate.source_code.markdown, "explanation": candidate.explanation}
for candidate in optimizations_set.control + optimizations_set.experiment
}

return {
"functionName": params.functionName,
"status": "success",
"message": {"generated_tests": generated_tests, "optimizations": optimizations_dict},
}


@server.feature("performFunctionOptimization")
def perform_function_optimization( # noqa: PLR0911
server: CodeflashLanguageServer, params: FunctionOptimizationParams
Expand Down Expand Up @@ -351,16 +287,14 @@ def perform_function_optimization( # noqa: PLR0911

server.show_message_log(f"Optimization completed for {params.functionName} with {speedup:.2f}x speedup", "Info")

explanation = best_optimization.candidate.explanation
explanation_str = explanation.explanation_message() if isinstance(explanation, Explanation) else explanation
return {
"functionName": params.functionName,
"status": "success",
"message": "Optimization completed successfully",
"extra": f"Speedup: {speedup:.2f}x faster",
"optimization": optimized_source,
"patch_file": str(patch_file),
"explanation": explanation_str,
"explanation": best_optimization.explanation_v2,
}
finally:
cleanup_the_optimizer(server)
Expand Down
3 changes: 2 additions & 1 deletion codeflash/models/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ def __hash__(self) -> int:

class BestOptimization(BaseModel):
candidate: OptimizedCandidate
explanation_v2: Optional[str] = None
helper_functions: list[FunctionSource]
code_context: CodeOptimizationContext
runtime: int
Expand Down Expand Up @@ -343,7 +344,7 @@ class TestsInFile:
test_type: TestType


@dataclass
@dataclass(frozen=True)
class OptimizedCandidate:
source_code: CodeStringsMarkdown
explanation: str
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Q: why we arent having old explanation and new explanation as sep field here?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i would propose to keep it frozen. we can instantiate a new candidate with the new explanation. we don't want to corrupt the existing data we have.

Expand Down
6 changes: 2 additions & 4 deletions codeflash/optimization/function_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1158,7 +1158,6 @@ def find_and_process_best_optimization(
original_helper_code,
code_context,
)
self.log_successful_optimization(explanation, generated_tests, exp_type)
return best_optimization

def process_review(
Expand Down Expand Up @@ -1232,10 +1231,9 @@ def process_review(
file_path=explanation.file_path,
benchmark_details=explanation.benchmark_details,
)
self.log_successful_optimization(new_explanation, generated_tests, exp_type)

best_optimization.candidate.explanation = new_explanation

console.print(Panel(new_explanation_raw_str, title="Best Candidate Explanation", border_style="blue"))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

print should remain there @mohammedahmed18

Copy link
Contributor Author

@mohammedahmed18 mohammedahmed18 Aug 19, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@aseembits93
already printed here

1234|        self.log_successful_optimization(new_explanation, generated_tests, exp_type)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sounds good @mohammedahmed18

best_optimization.explanation_v2 = new_explanation.explanation_message()

data = {
"original_code": original_code_combined,
Expand Down
Loading