|
| 1 | +#!/usr/bin/env python3 |
| 2 | +import subprocess |
| 3 | +import os |
| 4 | +import platform |
| 5 | +from latex_table import generate_latex_table |
| 6 | + |
| 7 | +# Configuration |
| 8 | +benchmark_executable = './build/benchmarks/benchmark' |
| 9 | +latex_script = './scripts/latex_table.py' |
| 10 | +output_dir = './outputs' |
| 11 | +input_files = [ |
| 12 | + 'data/canada.txt', |
| 13 | + 'data/mesh.txt', |
| 14 | +] |
| 15 | +models = [ |
| 16 | + 'uniform_01', |
| 17 | + 'uniform_all', |
| 18 | + 'integer_uniform', |
| 19 | + 'centered', |
| 20 | + 'non_centered', |
| 21 | +] |
| 22 | +runs_r = 1_000 |
| 23 | +volume_v = 1_000_000 |
| 24 | +flag_combinations = [ |
| 25 | + [], |
| 26 | + ['-F6'], |
| 27 | + ['-s'], |
| 28 | + ['-F6', '-s'], |
| 29 | +] |
| 30 | + |
| 31 | + |
| 32 | +def get_cpu_model(): |
| 33 | + if platform.system() == "Windows": |
| 34 | + return platform.processor() |
| 35 | + elif platform.system() == "Darwin": |
| 36 | + os.environ['PATH'] = os.environ['PATH'] + os.pathsep + '/usr/sbin' |
| 37 | + command = "sysctl -n machdep.cpu.brand_string" |
| 38 | + return subprocess.check_output(command).strip() |
| 39 | + elif platform.system() == "Linux": |
| 40 | + command = "cat /proc/cpuinfo" |
| 41 | + output = subprocess.check_output(command, shell=True).decode().strip() |
| 42 | + for line in output.split("\n"): |
| 43 | + if line.startswith("model name"): |
| 44 | + return line.split(':', 1)[1].strip() |
| 45 | + return "unknown_cpu" |
| 46 | + |
| 47 | + |
| 48 | +CPUModel = get_cpu_model().replace(' ', '_').replace('/', '-').replace('@', '') |
| 49 | +os.makedirs(output_dir, exist_ok=True) |
| 50 | + |
| 51 | + |
| 52 | +# Helper to run a command and return its stdout |
| 53 | +def run_cmd(cmd): |
| 54 | + result = subprocess.run(cmd, capture_output=True, text=True) |
| 55 | + result.check_returncode() |
| 56 | + return result.stdout |
| 57 | + |
| 58 | + |
| 59 | +# Process a single benchmark invocation and generate .tex |
| 60 | +def process_job(label, cmd_args, flags): |
| 61 | + # Run the benchmark |
| 62 | + cmd = [benchmark_executable] + cmd_args + flags |
| 63 | + print(f"Running: {' '.join(cmd)}") |
| 64 | + output = run_cmd(cmd) |
| 65 | + |
| 66 | + # Build output file name |
| 67 | + flag_label = ''.join([f.strip('-') for f in flags]) or 'none' |
| 68 | + safe_label = label.replace('.', '_') |
| 69 | + filename = f"{CPUModel}_{safe_label}_{flag_label}.tex" |
| 70 | + out_path = os.path.join(output_dir, filename) |
| 71 | + |
| 72 | + # Write to file |
| 73 | + tex_content = generate_latex_table(output) |
| 74 | + with open(out_path, 'w') as f: |
| 75 | + f.write(tex_content) |
| 76 | + print(f"Written: {out_path}\n") |
| 77 | + |
| 78 | + |
| 79 | +if __name__ == '__main__': |
| 80 | + # File-based benchmarks |
| 81 | + for filepath in input_files: |
| 82 | + file_label = os.path.splitext(os.path.basename(filepath))[0] |
| 83 | + for flags in flag_combinations: |
| 84 | + process_job( |
| 85 | + label=file_label, |
| 86 | + cmd_args=['-f', filepath, '-r', str(runs_r)], |
| 87 | + flags=flags |
| 88 | + ) |
| 89 | + |
| 90 | + # Model-based benchmarks |
| 91 | + for model in models: |
| 92 | + for flags in flag_combinations: |
| 93 | + process_job( |
| 94 | + label=model, |
| 95 | + cmd_args=['-m', model, '-v', str(volume_v), '-r', str(runs_r)], |
| 96 | + flags=flags |
| 97 | + ) |
0 commit comments