Skip to content

Commit 072fb51

Browse files
committed
Use benchmarks.json file for benchmarking
Had to fix a few bugs in that file for invalid expressions. I also added a new one for function application.
1 parent 849ba35 commit 072fb51

File tree

2 files changed

+56
-28
lines changed

2 files changed

+56
-28
lines changed

perf/perftest.py

Lines changed: 26 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,11 @@
2020
from jmespath.lexer import Lexer
2121

2222

23-
DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cases')
23+
BENCHMARK_FILE = os.path.join(
24+
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
25+
'tests',
26+
'compliance',
27+
'benchmarks.json')
2428
APPROX_RUN_TIME = 0.5
2529

2630

@@ -30,16 +34,21 @@ def run_tests(tests):
3034
given = test['given']
3135
expression = test['expression']
3236
result = test['result']
37+
should_search = test['bench_type'] == 'full'
3338
lex_time = _lex_time(expression)
3439
parse_time = _parse_time(expression)
35-
search_time = _search_time(expression, given)
36-
combined_time = _combined_time(expression, given, result)
40+
if should_search:
41+
search_time = _search_time(expression, given)
42+
combined_time = _combined_time(expression, given, result)
43+
else:
44+
search_time = 0
45+
combined_time = 0
3746
sys.stdout.write(
38-
"lex_time: %.5fms, parse_time: %.5fms, search_time: %.5fms "
39-
"combined_time: %.5fms " % (1000 * lex_time,
40-
1000 * parse_time,
41-
1000 * search_time,
42-
1000 * combined_time))
47+
"lex_time: %10.5fus, parse_time: %10.5fus, search_time: %10.5fus "
48+
"combined_time: %10.5fus " % (1000000 * lex_time,
49+
1000000 * parse_time,
50+
1000000 * search_time,
51+
1000000 * combined_time))
4352
sys.stdout.write("name: %s\n" % test['name'])
4453

4554

@@ -129,28 +138,23 @@ def load_tests(filename):
129138

130139
def _add_cases(data, loaded, filename):
131140
for case in data['cases']:
132-
current = {'description': data.get('description', filename),
133-
'given': data['given'],
134-
'name': case.get('name', case['expression']),
135-
'expression': case['expression'],
136-
'result': case.get('result')}
141+
current = {
142+
'given': data['given'],
143+
'name': case.get('comment', case['expression']),
144+
'expression': case['expression'],
145+
'result': case.get('result'),
146+
'bench_type': case['bench'],
147+
}
137148
loaded.append(current)
138149
return loaded
139150

140151

141152
def main():
142153
parser = argparse.ArgumentParser()
143-
parser.add_argument('-d', '--directory', default=DIRECTORY)
144-
parser.add_argument('-f', '--filename')
154+
parser.add_argument('-f', '--filename', default=BENCHMARK_FILE)
145155
args = parser.parse_args()
146156
collected_tests = []
147-
if args.filename:
148-
collected_tests.extend(load_tests(args.filename))
149-
else:
150-
for filename in os.listdir(args.directory):
151-
if filename.endswith('.json'):
152-
full_path = os.path.join(args.directory, filename)
153-
collected_tests.extend(load_tests(full_path))
157+
collected_tests.extend(load_tests(args.filename))
154158
run_tests(collected_tests)
155159

156160

tests/compliance/benchmarks.json

Lines changed: 30 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,27 +32,41 @@
3232
}
3333
}
3434
}
35+
},
36+
"b": true,
37+
"c": {
38+
"d": true
3539
}
3640
},
3741
"cases": [
3842
{
3943
"comment": "simple field",
40-
"expression": "a",
44+
"expression": "b",
45+
"result": true,
4146
"bench": "full"
4247
},
4348
{
4449
"comment": "simple subexpression",
45-
"expression": "a.b",
50+
"expression": "c.d",
51+
"result": true,
4652
"bench": "full"
4753
},
4854
{
49-
"comment": "deep field selection",
55+
"comment": "deep field selection no match",
5056
"expression": "a.b.c.d.e.f.g.h.i.j.k.l.m.n.o.p.q.r.s",
57+
"result": null,
58+
"bench": "full"
59+
},
60+
{
61+
"comment": "deep field selection",
62+
"expression": "a.b.c.d.e.f.g.h.i.j.k.l.m.n.o.p",
63+
"result": true,
5164
"bench": "full"
5265
},
5366
{
5467
"comment": "simple or",
55-
"expression": "not_there || a",
68+
"expression": "not_there || b",
69+
"result": true,
5670
"bench": "full"
5771
}
5872
]
@@ -67,21 +81,31 @@
6781
{
6882
"comment": "deep ands",
6983
"expression": "a && b && c && d && e && f && g && h && i && j && k && l && m && n && o && p && q && r && s && t && u && v && w && x && y && z",
84+
"result": 25,
7085
"bench": "full"
7186
},
7287
{
7388
"comment": "deep ors",
7489
"expression": "z || y || x || w || v || u || t || s || r || q || p || o || n || m || l || k || j || i || h || g || f || e || d || c || b || a",
90+
"result": 25,
7591
"bench": "full"
7692
},
7793
{
7894
"comment": "lots of summing",
79-
"expression": "sum(z, y, x, w, v, u, t, s, r, q, p, o, n, m, l, k, j, i, h, g, f, e, d, c, b, a)",
95+
"expression": "sum([z, y, x, w, v, u, t, s, r, q, p, o, n, m, l, k, j, i, h, g, f, e, d, c, b, a])",
96+
"result": 325,
97+
"bench": "full"
98+
},
99+
{
100+
"comment": "lots of function application",
101+
"expression": "sum([z, sum([y, sum([x, sum([w, sum([v, sum([u, sum([t, sum([s, sum([r, sum([q, sum([p, sum([o, sum([n, sum([m, sum([l, sum([k, sum([j, sum([i, sum([h, sum([g, sum([f, sum([e, sum([d, sum([c, sum([b, a])])])])])])])])])])])])])])])])])])])])])])])])])",
102+
"result": 325,
80103
"bench": "full"
81104
},
82105
{
83106
"comment": "lots of multi list",
84107
"expression": "[z, y, x, w, v, u, t, s, r, q, p, o, n, m, l, k, j, i, h, g, f, e, d, c, b, a]",
108+
"result": [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
85109
"bench": "full"
86110
}
87111
]
@@ -116,7 +140,7 @@
116140
},
117141
{
118142
"comment": "filter projection",
119-
"expression": "foo[bar > baz][qux > baz]",
143+
"expression": "foo[?bar > baz][?qux > baz]",
120144
"bench": "parse"
121145
}
122146
]

0 commit comments

Comments
 (0)