diff --git a/codeflash/lsp/beta.py b/codeflash/lsp/beta.py index f83be319f..e0848ab09 100644 --- a/codeflash/lsp/beta.py +++ b/codeflash/lsp/beta.py @@ -14,13 +14,10 @@ from codeflash.discovery.functions_to_optimize import filter_functions, get_functions_within_git_diff from codeflash.either import is_successful from codeflash.lsp.server import CodeflashLanguageServer, CodeflashLanguageServerProtocol -from codeflash.result.explanation import Explanation if TYPE_CHECKING: from lsprotocol import types - from codeflash.models.models import GeneratedTestsList, OptimizationSet - @dataclass class OptimizableFunctionsParams: @@ -179,67 +176,6 @@ def provide_api_key(server: CodeflashLanguageServer, params: ProvideApiKeyParams return {"status": "error", "message": "something went wrong while saving the api key"} -@server.feature("prepareOptimization") -def prepare_optimization(server: CodeflashLanguageServer, params: FunctionOptimizationParams) -> dict[str, str]: - current_function = server.optimizer.current_function_being_optimized - - module_prep_result = server.optimizer.prepare_module_for_optimization(current_function.file_path) - validated_original_code, original_module_ast = module_prep_result - - function_optimizer = server.optimizer.create_function_optimizer( - current_function, - function_to_optimize_source_code=validated_original_code[current_function.file_path].source_code, - original_module_ast=original_module_ast, - original_module_path=current_function.file_path, - ) - - server.optimizer.current_function_optimizer = function_optimizer - if not function_optimizer: - return {"functionName": params.functionName, "status": "error", "message": "No function optimizer found"} - - initialization_result = function_optimizer.can_be_optimized() - if not is_successful(initialization_result): - return {"functionName": params.functionName, "status": "error", "message": initialization_result.failure()} - - return {"functionName": params.functionName, "status": "success", "message": "Optimization preparation completed"} - - -@server.feature("generateTests") -def generate_tests(server: CodeflashLanguageServer, params: FunctionOptimizationParams) -> dict[str, str]: - function_optimizer = server.optimizer.current_function_optimizer - if not function_optimizer: - return {"functionName": params.functionName, "status": "error", "message": "No function optimizer found"} - - initialization_result = function_optimizer.can_be_optimized() - if not is_successful(initialization_result): - return {"functionName": params.functionName, "status": "error", "message": initialization_result.failure()} - - should_run_experiment, code_context, original_helper_code = initialization_result.unwrap() - - test_setup_result = function_optimizer.generate_and_instrument_tests( - code_context, should_run_experiment=should_run_experiment - ) - if not is_successful(test_setup_result): - return {"functionName": params.functionName, "status": "error", "message": test_setup_result.failure()} - generated_tests_list: GeneratedTestsList - optimizations_set: OptimizationSet - generated_tests_list, _, concolic__test_str, optimizations_set = test_setup_result.unwrap() - - generated_tests: list[str] = [ - generated_test.generated_original_test_source for generated_test in generated_tests_list.generated_tests - ] - optimizations_dict = { - candidate.optimization_id: {"source_code": candidate.source_code.markdown, "explanation": candidate.explanation} - for candidate in optimizations_set.control + optimizations_set.experiment - } - - return { - "functionName": params.functionName, - "status": "success", - "message": {"generated_tests": generated_tests, "optimizations": optimizations_dict}, - } - - @server.feature("performFunctionOptimization") def perform_function_optimization( # noqa: PLR0911 server: CodeflashLanguageServer, params: FunctionOptimizationParams @@ -351,8 +287,6 @@ def perform_function_optimization( # noqa: PLR0911 server.show_message_log(f"Optimization completed for {params.functionName} with {speedup:.2f}x speedup", "Info") - explanation = best_optimization.candidate.explanation - explanation_str = explanation.explanation_message() if isinstance(explanation, Explanation) else explanation return { "functionName": params.functionName, "status": "success", @@ -360,7 +294,7 @@ def perform_function_optimization( # noqa: PLR0911 "extra": f"Speedup: {speedup:.2f}x faster", "optimization": optimized_source, "patch_file": str(patch_file), - "explanation": explanation_str, + "explanation": best_optimization.explanation_v2, } finally: cleanup_the_optimizer(server) diff --git a/codeflash/models/models.py b/codeflash/models/models.py index 1d47fb7d4..b99d16c5f 100644 --- a/codeflash/models/models.py +++ b/codeflash/models/models.py @@ -91,6 +91,7 @@ def __hash__(self) -> int: class BestOptimization(BaseModel): candidate: OptimizedCandidate + explanation_v2: Optional[str] = None helper_functions: list[FunctionSource] code_context: CodeOptimizationContext runtime: int @@ -343,7 +344,7 @@ class TestsInFile: test_type: TestType -@dataclass +@dataclass(frozen=True) class OptimizedCandidate: source_code: CodeStringsMarkdown explanation: str diff --git a/codeflash/optimization/function_optimizer.py b/codeflash/optimization/function_optimizer.py index bd4976cf2..45c61b7f7 100644 --- a/codeflash/optimization/function_optimizer.py +++ b/codeflash/optimization/function_optimizer.py @@ -1158,7 +1158,6 @@ def find_and_process_best_optimization( original_helper_code, code_context, ) - self.log_successful_optimization(explanation, generated_tests, exp_type) return best_optimization def process_review( @@ -1232,10 +1231,9 @@ def process_review( file_path=explanation.file_path, benchmark_details=explanation.benchmark_details, ) + self.log_successful_optimization(new_explanation, generated_tests, exp_type) - best_optimization.candidate.explanation = new_explanation - - console.print(Panel(new_explanation_raw_str, title="Best Candidate Explanation", border_style="blue")) + best_optimization.explanation_v2 = new_explanation.explanation_message() data = { "original_code": original_code_combined,