1+ from __future__ import annotations
2+ from typing import Optional
3+
14from rich .console import Console
25from rich .table import Table
36
47from codeflash .cli_cmds .console import logger
8+ from codeflash .code_utils .time_utils import humanize_runtime
9+ from codeflash .models .models import ProcessedBenchmarkInfo , BenchmarkDetail
510
611
712def validate_and_format_benchmark_table (function_benchmark_timings : dict [str , dict [str , int ]],
@@ -61,4 +66,58 @@ def print_benchmark_table(function_to_results: dict[str, list[tuple[str, float,
6166 )
6267
6368 # Print the table
64- console .print (table )
69+ console .print (table )
70+
71+
72+ def process_benchmark_data (
73+ replay_performance_gain : float ,
74+ fto_benchmark_timings : dict [str , int ],
75+ total_benchmark_timings : dict [str , int ]
76+ ) -> Optional [ProcessedBenchmarkInfo ]:
77+ """Process benchmark data and generate detailed benchmark information.
78+
79+ Args:
80+ replay_performance_gain: The performance gain from replay
81+ fto_benchmark_timings: Function to optimize benchmark timings
82+ total_benchmark_timings: Total benchmark timings
83+
84+ Returns:
85+ ProcessedBenchmarkInfo containing processed benchmark details
86+
87+ """
88+ if not replay_performance_gain or not fto_benchmark_timings or not total_benchmark_timings :
89+ return None
90+
91+ benchmark_details = []
92+
93+ for benchmark_key , og_benchmark_timing in fto_benchmark_timings .items ():
94+ try :
95+ benchmark_file_name , benchmark_test_function , line_number = benchmark_key .split ("::" )
96+ except ValueError :
97+ continue # Skip malformed benchmark keys
98+
99+ total_benchmark_timing = total_benchmark_timings .get (benchmark_key , 0 )
100+
101+ if total_benchmark_timing == 0 :
102+ continue # Skip benchmarks with zero timing
103+
104+ # Calculate expected new benchmark timing
105+ expected_new_benchmark_timing = total_benchmark_timing - og_benchmark_timing + (
106+ 1 / (replay_performance_gain + 1 )
107+ ) * og_benchmark_timing
108+
109+ # Calculate speedup
110+ benchmark_speedup_ratio = total_benchmark_timing / expected_new_benchmark_timing
111+ benchmark_speedup_percent = (benchmark_speedup_ratio - 1 ) * 100
112+
113+ benchmark_details .append (
114+ BenchmarkDetail (
115+ benchmark_name = benchmark_file_name ,
116+ test_function = benchmark_test_function ,
117+ original_timing = humanize_runtime (int (total_benchmark_timing )),
118+ expected_new_timing = humanize_runtime (int (expected_new_benchmark_timing )),
119+ speedup_percent = benchmark_speedup_percent
120+ )
121+ )
122+
123+ return ProcessedBenchmarkInfo (benchmark_details = benchmark_details )
0 commit comments