Skip to content

Commit 102de11

Browse files
committed
works, need to follow type hints
1 parent 93623cb commit 102de11

File tree

3 files changed

+116
-11
lines changed

3 files changed

+116
-11
lines changed

codeflash/optimization/function_optimizer.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@
6464
from codeflash.verification.concolic_testing import generate_concolic_tests
6565
from codeflash.verification.equivalence import compare_test_results
6666
from codeflash.verification.instrument_codeflash_capture import instrument_codeflash_capture
67-
from codeflash.verification.parse_test_output import parse_test_results
67+
from codeflash.verification.parse_test_output import parse_test_results, parse_lprof_results
6868
from codeflash.verification.test_results import TestResults, TestType
6969
from codeflash.verification.test_runner import run_behavioral_tests, run_benchmarking_tests
7070
from codeflash.verification.verification_utils import get_test_file_path
@@ -237,7 +237,6 @@ def optimize_function(self) -> Result[BestOptimization, str]:
237237
f"Generating new optimizations for function {self.function_to_optimize.function_name} with line profiler information",
238238
transient=True,
239239
):
240-
pass
241240
with concurrent.futures.ThreadPoolExecutor(max_workers= N_TESTS_TO_GENERATE + 2) as executor:
242241
future_optimization_candidates_lp = executor.submit(self.aiservice_client.optimize_python_code_line_profiler,
243242
source_code=code_context.read_writable_code,
@@ -247,7 +246,6 @@ def optimize_function(self) -> Result[BestOptimization, str]:
247246
num_candidates = 10,
248247
experiment_metadata = None)
249248
future = [future_optimization_candidates_lp]
250-
pass
251249
concurrent.futures.wait(future)
252250
lprof_generated_results = future[0].result()
253251
if len(lprof_generated_results)==0:
@@ -838,7 +836,8 @@ def establish_original_code_baseline(
838836
files_to_instrument.append(helper_obj.file_path)
839837
fns_to_instrument.append(helper_obj.qualified_name)
840838
add_decorator_imports(files_to_instrument,fns_to_instrument, lprofiler_database_file)
841-
lprof_results, _ = self.run_and_parse_tests(
839+
#output doesn't matter, just need to run it
840+
lprof_cmd_results, _ = self.run_and_parse_tests(
842841
testing_type=TestingMode.BEHAVIOR,
843842
test_env=test_env,
844843
test_files=self.test_files,
@@ -849,7 +848,8 @@ def establish_original_code_baseline(
849848
code_context=code_context,
850849
lprofiler_database_file=lprofiler_database_file,
851850
)
852-
pass
851+
#real magic happens here
852+
lprof_results = parse_lprof_results(lprofiler_database_file)
853853
except Exception as e:
854854
logger.warning(f"Failed to run lprof for {self.function_to_optimize.function_name}. SKIPPING OPTIMIZING THIS FUNCTION.")
855855
console.rule()
@@ -1126,11 +1126,10 @@ def run_and_parse_tests(
11261126
coverage_database_file=coverage_database_file,
11271127
coverage_config_file=coverage_config_file,
11281128
)
1129+
return results, coverage_results
11291130
else:
1130-
pass
1131-
file_contents = Path(str(lprofiler_database_file)+".txt").read_text("utf-8")
1132-
return file_contents, None
1133-
return results, coverage_results
1131+
return result_file_path, None
1132+
11341133

11351134
def generate_and_instrument_tests(
11361135
self,

codeflash/verification/parse_test_output.py

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@
2828
TestType,
2929
VerificationType,
3030
)
31+
import linecache
32+
import inspect
33+
import pandas as pd
3134

3235
if TYPE_CHECKING:
3336
import subprocess
@@ -531,3 +534,108 @@ def parse_test_results(
531534
)
532535
coverage.log_coverage()
533536
return results, coverage if all_args else None
537+
538+
def show_func(filename, start_lineno, func_name, timings, unit):
539+
total_hits = sum(t[1] for t in timings)
540+
total_time = sum(t[2] for t in timings)
541+
#str_out = ""
542+
out_table = ""
543+
table_rows = []
544+
if total_hits == 0:
545+
return ''
546+
scalar = 1
547+
linenos = [t[0] for t in timings]
548+
if os.path.exists(filename):
549+
#str_out+=f'## Function: {func_name}\n'
550+
out_table+=f'## Function: {func_name}\n'
551+
if os.path.exists(filename):
552+
# Clear the cache to ensure that we get up-to-date results.
553+
linecache.clearcache()
554+
all_lines = linecache.getlines(filename)
555+
sublines = inspect.getblock(all_lines[start_lineno - 1:])
556+
#str_out+='## Total time: %g s\n' % (total_time * unit)
557+
out_table+='## Total time: %g s\n' % (total_time * unit)
558+
# Define minimum column sizes so text fits and usually looks consistent
559+
default_column_sizes = {
560+
'hits': 9,
561+
'time': 12,
562+
'perhit': 8,
563+
'percent': 8,
564+
}
565+
display = {}
566+
# Loop over each line to determine better column formatting.
567+
# Fallback to scientific notation if columns are larger than a threshold.
568+
for lineno, nhits, time in timings:
569+
if total_time == 0: # Happens rarely on empty function
570+
percent = ''
571+
else:
572+
percent = '%5.1f' % (100 * time / total_time)
573+
574+
time_disp = '%5.1f' % (time * scalar)
575+
if len(time_disp) > default_column_sizes['time']:
576+
time_disp = '%5.1g' % (time * scalar)
577+
perhit_disp = '%5.1f' % (float(time) * scalar / nhits)
578+
if len(perhit_disp) > default_column_sizes['perhit']:
579+
perhit_disp = '%5.1g' % (float(time) * scalar / nhits)
580+
nhits_disp = "%d" % nhits
581+
if len(nhits_disp) > default_column_sizes['hits']:
582+
nhits_disp = '%g' % nhits
583+
display[lineno] = (nhits_disp, time_disp, perhit_disp, percent)
584+
# Expand column sizes if the numbers are large.
585+
column_sizes = default_column_sizes.copy()
586+
if len(display):
587+
max_hitlen = max(len(t[0]) for t in display.values())
588+
max_timelen = max(len(t[1]) for t in display.values())
589+
max_perhitlen = max(len(t[2]) for t in display.values())
590+
column_sizes['hits'] = max(column_sizes['hits'], max_hitlen)
591+
column_sizes['time'] = max(column_sizes['time'], max_timelen)
592+
column_sizes['perhit'] = max(column_sizes['perhit'], max_perhitlen)
593+
col_order = ['hits', 'time', 'perhit', 'percent']
594+
lhs_template = ' '.join(['%' + str(column_sizes[k]) + 's' for k in col_order])
595+
template = lhs_template + ' %-s'
596+
linenos = range(start_lineno, start_lineno + len(sublines))
597+
empty = ('', '', '', '')
598+
header = ('Hits', 'Time', 'Per Hit', '% Time', 'Line Contents')
599+
header = template % header
600+
table_cols = ('Hits', 'Time', 'Per Hit', '% Time', 'Line Contents')
601+
# str_out+='\n'
602+
#str_out+=header
603+
#str_out+='\n'
604+
#str_out+='=' * len(header)
605+
#str_out+='\n'
606+
for lineno, line in zip(linenos, sublines):
607+
nhits, time, per_hit, percent = display.get(lineno, empty)
608+
line_ = line.rstrip('\n').rstrip('\r')
609+
#txt = template % (nhits, time, per_hit, percent, line_)
610+
if 'def' in line_ or nhits!='':
611+
#str_out+=txt
612+
#str_out+='\n'
613+
table_rows.append((nhits, time, per_hit, percent, line_))
614+
#str_out+='\n'
615+
df = pd.DataFrame(table_rows, columns=table_cols)
616+
out_table+= df.to_markdown(index=False, tablefmt="pipe")
617+
out_table+='\n'
618+
return out_table
619+
def show_text(stats):
620+
""" Show text for the given timings.
621+
"""
622+
#str_out = ""
623+
#str_out+='# Timer unit: %g s\n' % stats.unit
624+
out_table = ""
625+
out_table+='# Timer unit: %g s\n' % stats.unit
626+
stats_order = sorted(stats.timings.items())
627+
# Show detailed per-line information for each function.
628+
for (fn, lineno, name), timings in stats_order:
629+
table_md =show_func(fn, lineno, name, stats.timings[fn, lineno, name], stats.unit)
630+
#str_out+=table_str
631+
out_table+=table_md
632+
return out_table
633+
634+
def parse_lprof_results(lprofiler_database_file: Path | None) -> str:
635+
lprofiler_database_file = lprofiler_database_file.with_suffix(".lprof")
636+
if not lprofiler_database_file.exists():
637+
return ""
638+
else:
639+
with open(lprofiler_database_file,'rb') as f:
640+
stats = pickle.load(f)
641+
return show_text(stats)

codeflash/verification/test_runner.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -131,8 +131,6 @@ def run_behavioral_tests(
131131
else:
132132
msg = f"Unsupported test framework: {test_framework}"
133133
raise ValueError(msg)
134-
if enable_lprofiler:
135-
pass
136134
return result_file_path, results, coverage_database_file if enable_coverage else None, coverage_config_file if enable_coverage else None
137135

138136

0 commit comments

Comments
 (0)