|
19 | 19 | from codeflash.github.PrComment import FileDiffContent, PrComment |
20 | 20 |
|
21 | 21 | if TYPE_CHECKING: |
22 | | - from codeflash.models.models import FunctionCalledInTest |
| 22 | + from codeflash.models.models import FunctionCalledInTest, TestResults |
23 | 23 | from codeflash.result.explanation import Explanation |
24 | 24 |
|
25 | 25 |
|
26 | 26 | def existing_tests_source_for( |
27 | 27 | function_qualified_name_with_modules_from_root: str, |
28 | 28 | function_to_tests: dict[str, set[FunctionCalledInTest]], |
29 | 29 | tests_root: Path, |
| 30 | + original_test_results: Optional[TestResults] = None, |
| 31 | + optimized_test_results: Optional[TestResults] = None, |
30 | 32 | ) -> str: |
31 | 33 | test_files = function_to_tests.get(function_qualified_name_with_modules_from_root) |
32 | 34 | existing_tests_unique = set() |
| 35 | + |
33 | 36 | if test_files: |
| 37 | + # Group test cases by test file |
| 38 | + test_files_grouped = {} |
34 | 39 | for test_file in test_files: |
35 | | - existing_tests_unique.add("- " + str(Path(test_file.tests_in_file.test_file).relative_to(tests_root))) |
| 40 | + file_path = Path(test_file.tests_in_file.test_file) |
| 41 | + relative_path = str(file_path.relative_to(tests_root)) |
| 42 | + |
| 43 | + if relative_path not in test_files_grouped: |
| 44 | + test_files_grouped[relative_path] = [] |
| 45 | + test_files_grouped[relative_path].append(test_file) |
| 46 | + |
| 47 | + # Create detailed report for each test file |
| 48 | + for relative_path, tests_in_file in sorted(test_files_grouped.items()): |
| 49 | + file_line = f"- {relative_path}" |
| 50 | + |
| 51 | + # Add test case details with timing information if available |
| 52 | + if original_test_results and optimized_test_results: |
| 53 | + test_case_details = [] |
| 54 | + |
| 55 | + # Use the same pattern as add_runtime_comments_to_generated_tests |
| 56 | + original_runtime_by_test = original_test_results.usable_runtime_data_by_test_case() |
| 57 | + optimized_runtime_by_test = optimized_test_results.usable_runtime_data_by_test_case() |
| 58 | + |
| 59 | + # Collect test function names for this file |
| 60 | + test_functions_in_file = {test_file.tests_in_file.test_function for test_file in tests_in_file} |
| 61 | + |
| 62 | + # Create timing report for each test function |
| 63 | + for test_function_name in sorted(test_functions_in_file): |
| 64 | + # Find matching runtime data |
| 65 | + original_runtimes = [] |
| 66 | + optimized_runtimes = [] |
| 67 | + |
| 68 | + for invocation_id, runtimes in original_runtime_by_test.items(): |
| 69 | + if invocation_id.test_function_name == test_function_name: |
| 70 | + original_runtimes.extend(runtimes) |
| 71 | + |
| 72 | + for invocation_id, runtimes in optimized_runtime_by_test.items(): |
| 73 | + if invocation_id.test_function_name == test_function_name: |
| 74 | + optimized_runtimes.extend(runtimes) |
| 75 | + |
| 76 | + if original_runtimes and optimized_runtimes: |
| 77 | + # Use minimum timing like the generated tests function does |
| 78 | + original_time = min(original_runtimes) |
| 79 | + optimized_time = min(optimized_runtimes) |
| 80 | + |
| 81 | + from codeflash.code_utils.time_utils import format_time |
| 82 | + |
| 83 | + original_str = format_time(original_time) |
| 84 | + optimized_str = format_time(optimized_time) |
| 85 | + |
| 86 | + test_case_details.append(f" - {test_function_name}: {original_str} -> {optimized_str}") |
| 87 | + |
| 88 | + if test_case_details: |
| 89 | + file_line += "\n" + "\n".join(test_case_details) |
| 90 | + |
| 91 | + existing_tests_unique.add(file_line) |
| 92 | + |
36 | 93 | return "\n".join(sorted(existing_tests_unique)) |
37 | 94 |
|
38 | 95 |
|
|
0 commit comments