1
- from collections import namedtuple
2
1
import os
3
2
import subprocess as sp
4
3
import sys
5
4
import timeit
6
5
6
+ # Absolute path to the `code_benches` directory
7
7
BENCH_ROOT = os .getcwd ()
8
+ # Absolute path to the `silverfish` directory
8
9
ROOT_PATH = os .path .dirname (BENCH_ROOT )
10
+
9
11
RUNTIME_PATH = ROOT_PATH + "/runtime"
10
12
SILVERFISH_PATH = ROOT_PATH + "/target/release/silverfish"
11
13
WASMCEPTION_PATH = ROOT_PATH + "/wasmception"
12
14
15
+ # Our special WASM clang is under this wasmception path
13
16
WASM_CLANG = WASMCEPTION_PATH + "/dist/bin/clang"
17
+ # These flags are all somewhat important -- see @Others for more information
14
18
WASM_LINKER_FLAGS = "-Wl,--allow-undefined,-z,stack-size={stack_size},--no-threads,--stack-first,--no-entry,--export-all,--export=main,--export=dummy"
19
+ # Point WASM to our custom libc
15
20
WASM_SYSROOT_FLAGS = "--sysroot={}/sysroot" .format (WASMCEPTION_PATH )
16
21
WASM_FLAGS = WASM_LINKER_FLAGS + " --target=wasm32-unknown-unknown-wasm -nostartfiles -O3 -flto " + WASM_SYSROOT_FLAGS
17
22
23
+ # What is the machine we're running on like?
18
24
IS_64_BIT = sys .maxsize > 2 ** 32
19
25
IS_X86 = '86' in os .uname ()[- 1 ]
20
26
IS_32_BIT_X86 = (not IS_64_BIT ) and IS_X86
21
27
28
+ # TODO: Add an option to output a CSV instead of human readable output
29
+
30
+ # How many times should we run our benchmarks
22
31
RUN_COUNT = 10
23
- ENABLE_DEBUG_SYMBOLS = False
32
+ ENABLE_DEBUG_SYMBOLS = True
24
33
25
34
26
35
# FIXME: Mibench runs many of these programs multiple times, which is probably worth replicating
@@ -73,7 +82,7 @@ def __str__(self):
73
82
]
74
83
75
84
76
- # Now some helper methods for compiling code
85
+ # Compile the C code in `program`'s directory into a native executable
77
86
def compile_to_executable (program ):
78
87
opt = "-O3"
79
88
if program .do_lto :
@@ -83,21 +92,25 @@ def compile_to_executable(program):
83
92
sp .check_call ("clang {} -lm {} *.c -o bin/{}" .format (program .custom_arguments , opt , program .name ), shell = True , cwd = program .name )
84
93
85
94
95
+ # Compile the C code in `program`'s directory into WASM
86
96
def compile_to_wasm (program ):
87
97
flags = WASM_FLAGS .format (stack_size = program .stack_size )
88
98
command = "{clang} {flags} {args} -O3 -flto ../dummy.c *.c -o bin/{pname}.wasm" \
89
99
.format (clang = WASM_CLANG , flags = flags , args = program .custom_arguments , pname = program .name )
90
100
sp .check_call (command , shell = True , cwd = program .name )
91
101
92
102
103
+ # Compile the WASM in `program`'s directory into llvm bytecode
93
104
def compile_wasm_to_bc (program ):
94
105
command = "{silverfish} bin/{pname}.wasm -o bin/{pname}.bc" .format (silverfish = SILVERFISH_PATH , pname = program .name )
95
106
sp .check_call (command , shell = True , cwd = program .name )
96
- # Compile a second version with runtime globlas
107
+ # Compile a second version with runtime globals
108
+ # FIXME: Runtime globals were a failed experiment -- evaluate removing all traces of it
97
109
command = "{silverfish} -i --runtime_globals bin/{pname}.wasm -o bin/{pname}_rg.bc" .format (silverfish = SILVERFISH_PATH , pname = program .name )
98
110
sp .check_call (command , shell = True , cwd = program .name )
99
111
100
112
113
+ # Compile the WASM in `program`'s directory into llvm bytecode
101
114
def compile_wasm_to_executable (program , exe_postfix , memory_impl , runtime_globals = False ):
102
115
bc_file = "bin/{pname}.bc" .format (pname = program .name )
103
116
if runtime_globals :
@@ -112,18 +125,24 @@ def compile_wasm_to_executable(program, exe_postfix, memory_impl, runtime_global
112
125
sp .check_call (command , shell = True , cwd = program .name )
113
126
114
127
128
+ # Execute executable `p` with arguments `args` in directory `dir`
115
129
def execute (p , args , dir ):
116
130
command = p + " " + args
117
131
sp .check_call (command , shell = True , stdout = sp .DEVNULL , stderr = sp .DEVNULL , cwd = dir )
118
132
119
133
134
+ # Benchmark the given program's executable
135
+ # p = the program
136
+ # exe_postfix = what postfix we gave the executable in `compile_wasm_to_executable`
137
+ # name = the human readable name for this version of the executable
120
138
def bench (p , exe_postfix , name ):
121
139
print ("Testing {} {}" .format (p , name ))
122
140
path = "{broot}/{pname}/bin/{pname}{pf}" .format (broot = BENCH_ROOT , pname = p .name , pf = exe_postfix )
123
141
command = "execute('{path}', '{args}', '{dir}')" .format (path = path , args = ' ' .join (map (str , p .parameters )), dir = p .name )
124
142
return min (timeit .repeat (command , 'from __main__ import execute' , number = 1 , repeat = RUN_COUNT ))
125
143
126
144
145
+ # Output a run's execution time, telling us how much faster or slower it is
127
146
def output_run (base_time , execution_time ):
128
147
base_time = round (base_time , 4 )
129
148
execution_time = round (execution_time , 4 )
@@ -133,6 +152,7 @@ def output_run(base_time, execution_time):
133
152
print ("{:.4f} ({:.2f}% faster)" .format (execution_time , ((base_time - execution_time ) / base_time ) * 100 ))
134
153
135
154
155
+ # Compile all our programs
136
156
for i , p in enumerate (programs ):
137
157
print ("Compiling {} {}/{}" .format (p .name , i + 1 , len (programs )))
138
158
@@ -151,6 +171,7 @@ def output_run(base_time, execution_time):
151
171
152
172
print ()
153
173
174
+ # Benchmark and output timing info for each of our programs
154
175
for p in programs :
155
176
base_speed = bench (p , "" , "native" )
156
177
print ("{:.4f}" .format (base_speed ))
0 commit comments