Skip to content

Commit 84476fd

Browse files
committed
Add benchmark suite for (early/late)startup
1 parent d495421 commit 84476fd

File tree

3 files changed

+74
-3
lines changed

3 files changed

+74
-3
lines changed

mx.graalpython/mx_graalpython.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,11 +55,11 @@
5555
import mx_urlrewrites
5656
import mx_graalpython_bisect
5757
from mx_gate import Task
58-
from mx_graalpython_bench_param import PATH_MESO, BENCHMARKS, JBENCHMARKS
58+
from mx_graalpython_bench_param import PATH_MESO, BENCHMARKS, WARMUP_BENCHMARKS, JBENCHMARKS
5959
from mx_graalpython_benchmark import PythonBenchmarkSuite, python_vm_registry, CPythonVm, PyPyVm, JythonVm, GraalPythonVm, \
6060
CONFIGURATION_DEFAULT, CONFIGURATION_SANDBOXED, CONFIGURATION_NATIVE, \
6161
CONFIGURATION_DEFAULT_MULTI, CONFIGURATION_SANDBOXED_MULTI, CONFIGURATION_NATIVE_MULTI, \
62-
PythonInteropBenchmarkSuite
62+
PythonInteropBenchmarkSuite, PythonVmWarmupBenchmarkSuite
6363

6464

6565
if not sys.modules.get("__main__"):
@@ -1467,6 +1467,8 @@ def _register_vms(namespace):
14671467
def _register_bench_suites(namespace):
14681468
for py_bench_suite in PythonBenchmarkSuite.get_benchmark_suites(BENCHMARKS):
14691469
mx_benchmark.add_bm_suite(py_bench_suite)
1470+
for py_bench_suite in PythonVmWarmupBenchmarkSuite.get_benchmark_suites(WARMUP_BENCHMARKS):
1471+
mx_benchmark.add_bm_suite(py_bench_suite)
14701472
for java_bench_suite in PythonInteropBenchmarkSuite.get_benchmark_suites(JBENCHMARKS):
14711473
mx_benchmark.add_bm_suite(java_bench_suite)
14721474

mx.graalpython/mx_graalpython_bench_param.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
PATH_MICRO = os.path.join(_BASE_PATH, 'micro')
3737
PATH_MESO = os.path.join(_BASE_PATH, 'meso')
3838
PATH_MACRO = os.path.join(_BASE_PATH, 'macro')
39+
PATH_WARMUP = os.path.join(_BASE_PATH, 'warmup')
3940
PATH_INTEROP = os.path.join(_BASE_PATH, 'host_interop')
4041

4142
# ----------------------------------------------------------------------------------------------------------------------
@@ -44,6 +45,7 @@
4445
#
4546
# ----------------------------------------------------------------------------------------------------------------------
4647
# the argument list contains both the harness and benchmark args
48+
ITER_100 = ['-i', '100']
4749
ITER_50 = ['-i', '50']
4850
ITER_35 = ['-i', '35']
4951
ITER_25 = ['-i', '25']
@@ -182,6 +184,14 @@ def _pickling_benchmarks(module='pickle'):
182184
}
183185

184186

187+
WARMUP_BENCHMARKS = {
188+
'gcbench': ITER_100 + ["--startup=1,10,100"] + ['10'] ,
189+
'binarytrees3': ITER_100 + ["--startup=1,10,100"] + ['18'],
190+
#'binarytrees3': ["-i", "2", "--startup=1,10,100"] + ['18'],
191+
'pads-integerpartitions': ITER_100 + ["--startup=1,10,100"] + ['700'],
192+
}
193+
194+
185195
INTEROP_BENCHMARKS = {
186196
'euler_java': ITER_10 + ['200'],
187197
'image-magix': ITER_10 + ['10000'],
@@ -212,6 +222,10 @@ def _pickling_benchmarks(module='pickle'):
212222
"interop": [PATH_INTEROP, INTEROP_BENCHMARKS],
213223
}
214224

225+
WARMUP_BENCHMARKS = {
226+
"warmup": [PATH_WARMUP, WARMUP_BENCHMARKS],
227+
}
228+
215229
JBENCHMARKS = {
216230
"pyjava": [INTEROP_JAVA_BENCHMARKS],
217231
}

mx.graalpython/mx_graalpython_benchmark.py

Lines changed: 56 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ def hosting_registry(self):
284284

285285
def run(self, cwd, args):
286286
_check_vm_args(self.name(), args)
287-
extra_polyglot_args = ["--experimental-options", "--python.MaxNativeMemory=%s" % (2**34)] + self._extra_polyglot_args
287+
extra_polyglot_args = ["--experimental-options", "-snapshot-startup", "--python.MaxNativeMemory=%s" % (2**34)] + self._extra_polyglot_args
288288

289289
host_vm = self.host_vm()
290290
if hasattr(host_vm, 'run_lang'): # this is a full GraalVM build
@@ -601,3 +601,58 @@ def createCommandLineArgs(self, benchmarks, bmSuiteArgs):
601601
def get_benchmark_suites(cls, benchmarks):
602602
assert isinstance(benchmarks, dict), "benchmarks must be a dict: {suite: {bench: args, ... }, ...}"
603603
return [cls(suite_name, suite_info[0]) for suite_name, suite_info in benchmarks.items()]
604+
605+
606+
class PythonVmWarmupBenchmarkSuite(PythonBenchmarkSuite):
607+
def rules(self, output, benchmarks, bm_suite_args):
608+
bench_name = self.get_bench_name(benchmarks)
609+
arg = self.get_arg(bench_name)
610+
611+
return [
612+
# startup (difference between start of VM to end of first iteration)
613+
StdOutRule(
614+
r"### STARTUP +at iteration: (?P<iteration>[0-9]+), +duration: (?P<time>[0-9]+(\.[0-9]+)?$)",
615+
{
616+
"benchmark": '{}.{}'.format(self._name, bench_name),
617+
"metric.name": "startup",
618+
"metric.iteration": ("<iteration>", int),
619+
"metric.type": "numeric",
620+
"metric.value": ("<time>", float),
621+
"metric.unit": "s",
622+
"metric.score-function": "id",
623+
"metric.better": "lower",
624+
"config.run-flags": "".join(arg),
625+
}
626+
),
627+
628+
StdOutRule(
629+
r"### EARLY WARMUP +at iteration: (?P<iteration>[0-9]+), +duration: (?P<time>[0-9]+(\.[0-9]+)?$)",
630+
{
631+
"benchmark": '{}.{}'.format(self._name, bench_name),
632+
"metric.name": "early-warmup",
633+
"metric.iteration": ("<iteration>", int),
634+
"metric.type": "numeric",
635+
"metric.value": ("<time>", float),
636+
"metric.unit": "s",
637+
"metric.score-function": "id",
638+
"metric.better": "lower",
639+
"config.run-flags": "".join(arg),
640+
}
641+
),
642+
643+
StdOutRule(
644+
r"### LATE WARMUP +at iteration: (?P<iteration>[0-9]+), +duration: (?P<time>[0-9]+(\.[0-9]+)?$)",
645+
{
646+
"benchmark": '{}.{}'.format(self._name, bench_name),
647+
"metric.name": "late-warmup",
648+
"metric.iteration": ("<iteration>", int),
649+
"metric.type": "numeric",
650+
"metric.value": ("<time>", float),
651+
"metric.unit": "s",
652+
"metric.score-function": "id",
653+
"metric.better": "lower",
654+
"config.run-flags": "".join(arg),
655+
}
656+
),
657+
]
658+

0 commit comments

Comments
 (0)