Skip to content

Commit 3a51a3f

Browse files
committed
Add latest compliance tests
Also parse, but don't run benchmark tests. I'll have a separate script for benchmarks.
1 parent 48269d2 commit 3a51a3f

File tree

4 files changed

+152
-21
lines changed

4 files changed

+152
-21
lines changed

tests/compliance/benchmarks.json

Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
[
2+
{
3+
"given": {
4+
"long_name_for_a_field": true,
5+
"a": {
6+
"b": {
7+
"c": {
8+
"d": {
9+
"e": {
10+
"f": {
11+
"g": {
12+
"h": {
13+
"i": {
14+
"j": {
15+
"k": {
16+
"l": {
17+
"m": {
18+
"n": {
19+
"o": {
20+
"p": true
21+
}
22+
}
23+
}
24+
}
25+
}
26+
}
27+
}
28+
}
29+
}
30+
}
31+
}
32+
}
33+
}
34+
}
35+
}
36+
},
37+
"cases": [
38+
{
39+
"comment": "simple field",
40+
"expression": "a",
41+
"bench": "full"
42+
},
43+
{
44+
"comment": "simple subexpression",
45+
"expression": "a.b",
46+
"bench": "full"
47+
},
48+
{
49+
"comment": "deep field selection",
50+
"expression": "a.b.c.d.e.f.g.h.i.j.k.l.m.n.o.p.q.r.s",
51+
"bench": "full"
52+
},
53+
{
54+
"comment": "simple or",
55+
"expression": "not_there || a",
56+
"bench": "full"
57+
}
58+
]
59+
},
60+
{
61+
"given": {
62+
"a":0,"b":1,"c":2,"d":3,"e":4,"f":5,"g":6,"h":7,"i":8,"j":9,"k":10,
63+
"l":11,"m":12,"n":13,"o":14,"p":15,"q":16,"r":17,"s":18,"t":19,"u":20,
64+
"v":21,"w":22,"x":23,"y":24,"z":25
65+
},
66+
"cases": [
67+
{
68+
"comment": "deep ands",
69+
"expression": "a && b && c && d && e && f && g && h && i && j && k && l && m && n && o && p && q && r && s && t && u && v && w && x && y && z",
70+
"bench": "full"
71+
},
72+
{
73+
"comment": "deep ors",
74+
"expression": "z || y || x || w || v || u || t || s || r || q || p || o || n || m || l || k || j || i || h || g || f || e || d || c || b || a",
75+
"bench": "full"
76+
},
77+
{
78+
"comment": "lots of summing",
79+
"expression": "sum(z, y, x, w, v, u, t, s, r, q, p, o, n, m, l, k, j, i, h, g, f, e, d, c, b, a)",
80+
"bench": "full"
81+
},
82+
{
83+
"comment": "lots of multi list",
84+
"expression": "[z, y, x, w, v, u, t, s, r, q, p, o, n, m, l, k, j, i, h, g, f, e, d, c, b, a]",
85+
"bench": "full"
86+
}
87+
]
88+
},
89+
{
90+
"given": {},
91+
"cases": [
92+
{
93+
"comment": "field 50",
94+
"expression": "j49.j48.j47.j46.j45.j44.j43.j42.j41.j40.j39.j38.j37.j36.j35.j34.j33.j32.j31.j30.j29.j28.j27.j26.j25.j24.j23.j22.j21.j20.j19.j18.j17.j16.j15.j14.j13.j12.j11.j10.j9.j8.j7.j6.j5.j4.j3.j2.j1.j0",
95+
"bench": "parse"
96+
},
97+
{
98+
"comment": "pipe 50",
99+
"expression": "j49|j48|j47|j46|j45|j44|j43|j42|j41|j40|j39|j38|j37|j36|j35|j34|j33|j32|j31|j30|j29|j28|j27|j26|j25|j24|j23|j22|j21|j20|j19|j18|j17|j16|j15|j14|j13|j12|j11|j10|j9|j8|j7|j6|j5|j4|j3|j2|j1|j0",
100+
"bench": "parse"
101+
},
102+
{
103+
"comment": "index 50",
104+
"expression": "[49][48][47][46][45][44][43][42][41][40][39][38][37][36][35][34][33][32][31][30][29][28][27][26][25][24][23][22][21][20][19][18][17][16][15][14][13][12][11][10][9][8][7][6][5][4][3][2][1][0]",
105+
"bench": "parse"
106+
},
107+
{
108+
"comment": "long raw string literal",
109+
"expression": "'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'",
110+
"bench": "parse"
111+
},
112+
{
113+
"comment": "deep projection 104",
114+
"expression": "a[*].b[*].c[*].d[*].e[*].f[*].g[*].h[*].i[*].j[*].k[*].l[*].m[*].n[*].o[*].p[*].q[*].r[*].s[*].t[*].u[*].v[*].w[*].x[*].y[*].z[*].a[*].b[*].c[*].d[*].e[*].f[*].g[*].h[*].i[*].j[*].k[*].l[*].m[*].n[*].o[*].p[*].q[*].r[*].s[*].t[*].u[*].v[*].w[*].x[*].y[*].z[*].a[*].b[*].c[*].d[*].e[*].f[*].g[*].h[*].i[*].j[*].k[*].l[*].m[*].n[*].o[*].p[*].q[*].r[*].s[*].t[*].u[*].v[*].w[*].x[*].y[*].z[*].a[*].b[*].c[*].d[*].e[*].f[*].g[*].h[*].i[*].j[*].k[*].l[*].m[*].n[*].o[*].p[*].q[*].r[*].s[*].t[*].u[*].v[*].w[*].x[*].y[*].z[*]",
115+
"bench": "parse"
116+
},
117+
{
118+
"comment": "filter projection",
119+
"expression": "foo[bar > baz][qux > baz]",
120+
"bench": "parse"
121+
}
122+
]
123+
}
124+
]

tests/compliance/multiselect.json

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -387,6 +387,11 @@
387387
"comment": "Nested multiselect",
388388
"expression": "[[*]]",
389389
"result": [[]]
390+
},
391+
{
392+
"comment": "Select on null",
393+
"expression": "missing.{foo: bar}",
394+
"result": null
390395
}
391396
]
392397
}

tests/compliance/syntax.json

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -103,10 +103,6 @@
103103
{
104104
"expression": "!",
105105
"error": "syntax"
106-
},
107-
{
108-
"expression": "foo-bar",
109-
"error": "syntax"
110106
}
111107
]
112108
},

tests/test_compliance.py

Lines changed: 23 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@
55

66
from nose.tools import assert_equal
77

8-
import jmespath
9-
from jmespath.visitor import TreeInterpreter, Options
8+
from jmespath.visitor import Options
109

1110

1211
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -19,16 +18,17 @@
1918
def test_compliance():
2019
for full_path in _walk_files():
2120
if full_path.endswith('.json'):
22-
for given, expression, result, error in _load_cases(full_path):
23-
if error is NOT_SPECIFIED and result is not NOT_SPECIFIED:
24-
yield (_test_expression, given, expression,
25-
result, os.path.basename(full_path))
26-
elif result is NOT_SPECIFIED and error is not NOT_SPECIFIED:
27-
yield (_test_error_expression, given, expression,
28-
error, os.path.basename(full_path))
29-
else:
30-
parts = (given, expression, result, error)
31-
raise RuntimeError("Invalid test description: %s" % parts)
21+
for given, test_type, test_data in load_cases(full_path):
22+
t = test_data
23+
# Benchmark tests aren't run as part of the normal
24+
# test suite, so we only care about 'result' and
25+
# 'error' test_types.
26+
if test_type == 'result':
27+
yield (_test_expression, given, t['expression'],
28+
t['result'], os.path.basename(full_path))
29+
elif test_type == 'error':
30+
yield (_test_error_expression, given, t['expression'],
31+
t['error'], os.path.basename(full_path))
3232

3333

3434
def _walk_files():
@@ -47,14 +47,20 @@ def _walk_files():
4747
yield os.path.join(root, filename)
4848

4949

50-
def _load_cases(full_path):
50+
def load_cases(full_path):
5151
all_test_data = json.load(open(full_path), object_pairs_hook=OrderedDict)
5252
for test_data in all_test_data:
5353
given = test_data['given']
5454
for case in test_data['cases']:
55-
yield (given, case['expression'],
56-
case.get('result', NOT_SPECIFIED),
57-
case.get('error', NOT_SPECIFIED))
55+
if 'result' in case:
56+
test_type = 'result'
57+
elif 'error' in case:
58+
test_type = 'error'
59+
elif 'bench' in case:
60+
test_type = 'bench'
61+
else:
62+
raise RuntimeError("Unknown test type: %s" % json.dumps(case))
63+
yield (given, test_type, case)
5864

5965

6066
def _test_expression(given, expression, expected, filename):
@@ -85,7 +91,7 @@ def _test_error_expression(given, expression, error, filename):
8591
try:
8692
parsed = jmespath.compile(expression)
8793
parsed.search(given)
88-
except ValueError as e:
94+
except ValueError:
8995
# Test passes, it raised a parse error as expected.
9096
pass
9197
else:

0 commit comments

Comments
 (0)