Skip to content

Commit 068cdb2

Browse files
committed
add tests for json parsing
1 parent 6f3dc57 commit 068cdb2

File tree

3 files changed

+212
-6
lines changed

3 files changed

+212
-6
lines changed

mx.graalpython/mx_graalpython_python_benchmarks.py

Lines changed: 17 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,19 +3,15 @@
33

44
import glob
55
import json
6+
import math
67
import os
78
import shutil
89
import subprocess
910
import sys
1011

11-
from mx_graalpython_benchmark import python_vm_registry
12-
1312
from os.path import join, abspath, exists
1413

1514

16-
SUITE = mx.suite("graalpython")
17-
18-
1915
class PyPerfJsonRule(mx_benchmark.Rule):
2016
"""Parses a JSON file produced by PyPerf and creates a measurement result."""
2117

@@ -108,6 +104,9 @@ def parse(self, text: str) -> dict:
108104
for benchmark, result in js["results"].items():
109105
param_combinations = itertools.product(*result[param_idx])
110106
for run_idx, params in enumerate(param_combinations):
107+
value = result[peak_idx][run_idx]
108+
if math.isnan(value):
109+
continue
111110
r.append(
112111
{
113112
"bench-suite": self.suiteName,
@@ -118,7 +117,7 @@ def parse(self, text: str) -> dict:
118117
"metric.better": "lower",
119118
"metric.type": "numeric",
120119
"metric.iteration": 0,
121-
"metric.value": result[peak_idx][run_idx],
120+
"metric.value": value,
122121
"config.run-flags": " ".join(params),
123122
}
124123
)
@@ -252,6 +251,12 @@ class WildcardList:
252251
def __contains__(self, x):
253252
return True
254253

254+
def __iter__(self):
255+
mx.abort(
256+
"Cannot iterate over benchmark names in foreign benchmark suites. "
257+
+ "Leave off the benchmark name part to run all, or name the benchmarks yourself."
258+
)
259+
255260

256261
class PyPerformanceSuite(
257262
mx_benchmark.TemporaryWorkdirMixin, mx_benchmark.VmBenchmarkSuite
@@ -497,6 +502,12 @@ def _vmRun(self, vm, workdir, command, benchmarks, bmSuiteArgs):
497502

498503

499504
def register_python_benchmarks():
505+
global python_vm_registry, SUITE
506+
507+
from mx_graalpython_benchmark import python_vm_registry
508+
509+
SUITE = mx.suite("graalpython")
510+
500511
python_vm_registry.add_vm(PyPyVm())
501512
python_vm_registry.add_vm(Python3Vm())
502513
for config_name, options, priority in [

mx.graalpython/test_json_parsing.py

Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
# Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
2+
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3+
#
4+
# The Universal Permissive License (UPL), Version 1.0
5+
#
6+
# Subject to the condition set forth below, permission is hereby granted to any
7+
# person obtaining a copy of this software, associated documentation and/or
8+
# data (collectively the "Software"), free of charge and under any and all
9+
# copyright rights in the Software, and any and all patent rights owned or
10+
# freely licensable by each licensor hereunder covering either (i) the
11+
# unmodified Software as contributed to or provided by such licensor, or (ii)
12+
# the Larger Works (as defined below), to deal in both
13+
#
14+
# (a) the Software, and
15+
#
16+
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
17+
# one is included with the Software each a "Larger Work" to which the Software
18+
# is contributed by such licensors),
19+
#
20+
# without restriction, including without limitation the rights to copy, create
21+
# derivative works of, display, perform, and distribute the Software and make,
22+
# use, sell, offer for sale, import, export, have made, and have sold the
23+
# Software and the Larger Work(s), and to sublicense the foregoing rights on
24+
# either these or other terms.
25+
#
26+
# This license is subject to the following condition:
27+
#
28+
# The above copyright notice and either this complete permission notice or at a
29+
# minimum a reference to the UPL must be included in all copies or substantial
30+
# portions of the Software.
31+
#
32+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38+
# SOFTWARE.
39+
40+
# pylint: skip-file
41+
import math
42+
import os
43+
import shutil
44+
import sys
45+
import unittest
46+
47+
from unittest import mock
48+
49+
from test_json_parsing_data import *
50+
51+
sys.path.append(os.path.join(os.path.dirname(__file__)))
52+
try:
53+
import mx
54+
except ImportError:
55+
if mx_exe := shutil.which("mx"):
56+
sys.path.append(os.path.dirname(mx_exe))
57+
58+
59+
class TestJsonBenchmarkParsers(unittest.TestCase):
60+
def test_pyperf_parsing_multi(self):
61+
import mx
62+
import mx_graalpython_python_benchmarks
63+
from mx_graalpython_python_benchmarks import PyPerfJsonRule
64+
65+
rule = PyPerfJsonRule("", "pyperformance")
66+
67+
with mock.patch('mx_graalpython_python_benchmarks.open', mock.mock_open(read_data=PYPERF_INPUT)):
68+
results = rule.parse("ignored")
69+
70+
benchmarks = ["deltablue", "regex_dna"]
71+
benchmarks_found = set()
72+
73+
for result in results:
74+
self.assertEqual(result["bench-suite"], "pyperformance")
75+
self.assertIn(result["benchmark"], benchmarks)
76+
benchmarks_found.add(result["benchmark"])
77+
self.assertIn(result["metric.name"], ["time", "max-rss", "warmup"])
78+
self.assertIn(result["metric.unit"], ["B", "ms"])
79+
self.assertEqual(result["metric.score-function"], "id")
80+
self.assertEqual(result["metric.type"], "numeric")
81+
self.assertIsInstance(result["metric.value"], (float, int))
82+
self.assertIsInstance(result["metric.iteration"], int)
83+
84+
self.assertSetEqual(benchmarks_found, set(benchmarks))
85+
86+
def test_pyperf_parsing_single(self):
87+
import mx
88+
import mx_graalpython_python_benchmarks
89+
from mx_graalpython_python_benchmarks import PyPerfJsonRule
90+
91+
rule = PyPerfJsonRule("", "pyperformance")
92+
93+
with mock.patch('mx_graalpython_python_benchmarks.open', mock.mock_open(read_data=PYPERF_INPUT_SINGLE)):
94+
results = rule.parse("ignored")
95+
96+
for result in results:
97+
self.assertEqual(result["bench-suite"], "pyperformance")
98+
self.assertEqual(result["benchmark"], "deltablue")
99+
self.assertIn(result["metric.name"], ["time", "max-rss", "warmup"])
100+
self.assertIn(result["metric.unit"], ["B", "ms"])
101+
self.assertEqual(result["metric.score-function"], "id")
102+
self.assertEqual(result["metric.type"], "numeric")
103+
self.assertIsInstance(result["metric.value"], (float, int))
104+
self.assertIsInstance(result["metric.iteration"], int)
105+
106+
def test_asv_parsing(self):
107+
import mx
108+
import mx_graalpython_python_benchmarks
109+
from mx_graalpython_python_benchmarks import AsvJsonRule
110+
111+
rule = AsvJsonRule("", "asv")
112+
113+
with mock.patch('mx_graalpython_python_benchmarks.open', mock.mock_open(read_data=ASV_JSON)):
114+
results = rule.parse("ignored")
115+
116+
benchmarks = ["bench_app.LaplaceInplace.time_it", "bench_app.MaxesOfDots.time_it"]
117+
benchmarks_found = set()
118+
119+
for result in results:
120+
self.assertEqual(result["bench-suite"], "asv")
121+
self.assertIn(result["benchmark"], benchmarks)
122+
benchmarks_found.add(result["benchmark"])
123+
self.assertIn(result["metric.name"], ["time", "warmup"])
124+
self.assertIn(result["metric.unit"], ["s"])
125+
self.assertEqual(result["metric.score-function"], "id")
126+
self.assertEqual(result["metric.type"], "numeric")
127+
self.assertIsInstance(result["metric.value"], float)
128+
self.assertIsInstance(result["metric.iteration"], int)
129+
self.assertIn(result["config.run-flags"], ["", "'inplace'", "'normal'"])
130+
131+
self.assertSetEqual(benchmarks_found, set(benchmarks))
132+
133+
# 3 results, LaplaceInplace with 2 params, and no params for MaxesOfDots
134+
self.assertEqual(len([result for result in results if result["metric.name"] == "time"]), 3)
135+
# 30 warmups, 10 each for each result
136+
self.assertEqual(len([result for result in results if result["metric.name"] == "warmup"]), 30)
137+
138+
def test_asv_parsing2(self):
139+
import mx
140+
import mx_graalpython_python_benchmarks
141+
from mx_graalpython_python_benchmarks import AsvJsonRule
142+
143+
rule = AsvJsonRule("", "asv")
144+
145+
with mock.patch('mx_graalpython_python_benchmarks.open', mock.mock_open(read_data=ASV_JSON2)):
146+
results = rule.parse("ignored")
147+
148+
for result in results:
149+
self.assertFalse(math.isnan(result["metric.value"]), "nan-results are not reported")
150+
151+
def test_pypy_results_parsing(self):
152+
import mx
153+
import mx_graalpython_python_benchmarks
154+
from mx_graalpython_python_benchmarks import PyPyJsonRule
155+
156+
rule = PyPyJsonRule("", "pypy")
157+
158+
with mock.patch('mx_graalpython_python_benchmarks.open', mock.mock_open(read_data=PYPY_JSON)):
159+
results = rule.parse("ignored")
160+
161+
for result in results:
162+
self.assertEqual(result["bench-suite"], "pypy")
163+
self.assertIn(result["benchmark"], ["ai", "deltablue"])
164+
self.assertIn(result["metric.name"], ["time"])
165+
self.assertIn(result["metric.unit"], ["s"])
166+
self.assertEqual(result["metric.score-function"], "id")
167+
self.assertEqual(result["metric.type"], "numeric")
168+
self.assertIsInstance(result["metric.value"], float)
169+
self.assertIsInstance(result["metric.iteration"], int)
170+
171+
self.assertEqual(len(results), 100, "should have 2*50 peak values")
172+
173+
174+
if __name__ == '__main__':
175+
unittest.main()

0 commit comments

Comments
 (0)