Skip to content

Commit 669357f

Browse files
committed
add simple bench_harness utility script to be used for simple benchmarks
- suports number of iterations (for warmup analysis), special __warmup__ and __benchmark__ methods are used to run the actual benchmark
1 parent 57f0635 commit 669357f

File tree

2 files changed

+79
-36
lines changed

2 files changed

+79
-36
lines changed

mx.graalpython/bench_harness.py

Lines changed: 73 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -37,49 +37,92 @@
3737
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3838
# SOFTWARE.
3939

40+
import _io
4041
import argparse
42+
import os
4143
import sys
4244
from time import time
4345

46+
_HRULE = '-'.join(['' for i in range(120)])
47+
ATTR_WARMUP = '__warmup__'
48+
ATTR_BENCHMARK = '__benchmark__'
4449

45-
def _get_duration_message(i, name, duration):
46-
return "iteration=%s, name=%s, duration=%s".format(i, name, duration)
4750

51+
class BenchRunner(object):
52+
def __init__(self, bench_file, bench_args=None, iterations=1, verbose=False):
53+
if bench_args is None:
54+
bench_args = []
55+
self.bench_module = BenchRunner.get_bench_module(bench_file)
56+
self.bench_args = bench_args
57+
self.verbose = verbose
58+
if isinstance(iterations, (list, tuple)):
59+
iterations = iterations[0]
60+
if isinstance(iterations, str):
61+
iterations = int(iterations)
62+
self.iterations = iterations
4863

49-
def benchmark(name, iterations=1):
50-
def fnbenchmark(func):
51-
def wrapper(*args, **kwargs):
52-
for i in range(iterations):
53-
start = time()
54-
func(*args, **kwargs)
55-
duration = "%.3f\n" % (time() - start)
56-
print(_get_duration_message(i, name, duration))
57-
return wrapper
58-
return fnbenchmark
64+
@staticmethod
65+
def get_bench_module(bench_file):
66+
name = bench_file.rpartition("/")[2].partition(".")[0].replace('.py', '')
67+
directory = bench_file.rpartition("/")[0]
68+
pkg = []
69+
while any(f.endswith("__init__.py") for f in os.listdir(directory)):
70+
directory, slash, postfix = directory.rpartition("/")
71+
pkg.insert(0, postfix)
72+
73+
if pkg:
74+
sys.path.insert(0, directory)
75+
bench_module = __import__(".".join(pkg + [name]))
76+
for p in pkg[1:]:
77+
bench_module = getattr(bench_module, p)
78+
bench_module = getattr(bench_module, name)
79+
return bench_module
80+
81+
else:
82+
bench_module = type(sys)(name, bench_file)
83+
with _io.FileIO(bench_file, "r") as f:
84+
bench_module.__file__ = bench_file
85+
exec(compile(f.readall(), bench_file, "exec"), bench_module.__dict__)
86+
return bench_module
5987

88+
def _get_attr(self, attr_name):
89+
if hasattr(self.bench_module, attr_name):
90+
return getattr(self.bench_module, attr_name)
6091

61-
def _call_if_defined(obj, attr_name):
62-
if hasattr(obj, attr_name):
63-
attr = getattr(obj, attr_name)
64-
if hasattr(attr, '__call__'):
92+
def _call_attr(self, attr_name):
93+
attr = self._get_attr(attr_name)
94+
if attr and hasattr(attr, '__call__'):
6595
attr()
6696

97+
def run(self):
98+
if self.verbose:
99+
print(_HRULE)
100+
print(self.bench_module.__name__)
101+
print(_HRULE)
102+
103+
print("### warming up ... ")
104+
self._call_attr(ATTR_WARMUP)
105+
print("### running benchmark ... ")
106+
107+
bench_func = self._get_attr(ATTR_BENCHMARK)
108+
if bench_func and hasattr(bench_func, '__call__'):
109+
for i in range(self.iterations):
110+
start = time()
111+
bench_func(*self.bench_args)
112+
duration = "%.3f\n" % (time() - start)
113+
print("### iteration={}, name={}, duration={}".format(i, self.bench_module.__name__, duration))
67114

68-
ATTR_SETUP = '__setup__'
69-
ATTR_WARMUP = '__warmup__'
70-
ATTR_BENCHMARK = '__benchmark__'
71115

116+
def run_benchmark(prog, args):
117+
parser = argparse.ArgumentParser(prog=prog, description="Run specified benchmark.")
118+
parser.add_argument("-v", "--verbose", help="Verbose output.", action="store_true")
119+
parser.add_argument("-i", "--iterations", help="The number of iterations top run each benchmark.", default=1)
120+
parser.add_argument("bench_file", metavar='BENCH', help="Path to the benchmark to execute.", nargs=1)
121+
parser.add_argument("bench_args", metavar='ARGS', help="Path to the benchmarks to execute.", nargs='*', default=None)
72122

73-
def run_benchmark(name):
74-
parser = argparse.ArgumentParser(description="Run specified benchmark.")
75-
parser.add_argument("-i", "--iterations", help="Number of iterations.", type=int)
123+
args = parser.parse_args(args)
124+
BenchRunner(args.bench_file[0], bench_args=args.bench_args, iterations=args.iterations, verbose=args.verbose).run()
76125

77-
args = parser.parse_args()
78-
current_module = sys.modules[__name__]
79-
_call_if_defined(current_module, ATTR_SETUP)
80-
_call_if_defined(current_module, ATTR_WARMUP)
81-
bench = getattr(current_module, ATTR_BENCHMARK)
82-
if not bench:
83-
raise ValueError('%s not defined for %s'.format(ATTR_BENCHMARK, name))
84126

85-
benchmark(name, iterations=args.iterations)(bench)()
127+
if __name__ == '__main__':
128+
run_benchmark(sys.argv[0], sys.argv[1:])

mx.graalpython/mx_graalpython_benchmark.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
# the graalpython suite
3838
#
3939
# ----------------------------------------------------------------------------------------------------------------------
40-
_truffle_python_suite = mx.suite("graalpython")
40+
_graalpython_suite = mx.suite("graalpython")
4141

4242
# ----------------------------------------------------------------------------------------------------------------------
4343
#
@@ -158,7 +158,7 @@ def run(self, cwd, args):
158158
]
159159

160160
vm_args = [
161-
"-Dpython.home=%s" % join(_truffle_python_suite.dir, "graalpython"),
161+
"-Dpython.home=%s" % join(_graalpython_suite.dir, "graalpython"),
162162
'-cp',
163163
mx.classpath(["com.oracle.graal.python", "com.oracle.graal.python.shell"]),
164164
"com.oracle.graal.python.shell.GraalPythonMain"
@@ -183,7 +183,7 @@ class PythonBenchmarkSuite(VmBenchmarkSuite):
183183
def __init__(self, name):
184184
self._name = name
185185
self._bench_path, self._benchmarks = benchmarks_list[self._name]
186-
self._bench_path = join(_truffle_python_suite.dir, self._bench_path)
186+
self._bench_path = join(_graalpython_suite.dir, self._bench_path)
187187

188188
def rules(self, output, benchmarks, bm_suite_args):
189189
bench_name = os.path.basename(os.path.splitext(benchmarks[0])[0])
@@ -259,6 +259,6 @@ def get_benchmark_suites(cls):
259259
#
260260
# ----------------------------------------------------------------------------------------------------------------------
261261
python_vm_registry = VmRegistry(PYTHON_VM_REGISTRY_NAME, known_host_registries=[java_vm_registry])
262-
python_vm_registry.add_vm(CPythonVm(CONFIGURATION_DEFAULT), _truffle_python_suite)
263-
python_vm_registry.add_vm(PyPyVm(CONFIGURATION_DEFAULT), _truffle_python_suite)
264-
python_vm_registry.add_vm(GraalPythonVm(), _truffle_python_suite, 10)
262+
python_vm_registry.add_vm(CPythonVm(CONFIGURATION_DEFAULT), _graalpython_suite)
263+
python_vm_registry.add_vm(PyPyVm(CONFIGURATION_DEFAULT), _graalpython_suite)
264+
python_vm_registry.add_vm(GraalPythonVm(), _graalpython_suite, 10)

0 commit comments

Comments
 (0)