@@ -16,8 +16,8 @@ def get_parsed_tokens(tokens, parsed=None, token_idx=None, depth=0):
16
16
token = tokens [token_idx [0 ]]
17
17
18
18
if state == "newline" and is_deeper (depth , token , tokens , token_idx [0 ] + 1 ):
19
- children = { "type" : "children" , "content" : [] }
20
- parsed [ - 1 ]. append (children )
19
+ children = { "type" : "children" , "content" : [], "index" : token [ "index" ], "filepath" : token [ "filepath" ] }
20
+ append (children , parsed )
21
21
get_parsed_tokens (tokens , children ["content" ], token_idx , depth + 1 )
22
22
# "state" is deliberately not being changed here.
23
23
elif state == "newline" and is_same_depth (depth , token , tokens , token_idx [0 ] + 1 ):
@@ -30,39 +30,50 @@ def get_parsed_tokens(tokens, parsed=None, token_idx=None, depth=0):
30
30
state = "start"
31
31
32
32
elif state == "start" and token ["type" ] == "WORD" :
33
- parsed [ - 1 ]. append ( { "type" : "property" , "content" : token ["content" ] } )
33
+ append ( { "type" : "property" , "content" : token ["content" ] }, parsed )
34
34
state = "property"
35
35
token_idx [0 ] += 1
36
36
elif state == "property" and token ["type" ] == "EQUALS" :
37
- parsed [ - 1 ]. append ( { "type" : "extra" , "content" : token ["content" ] } )
37
+ append ( { "type" : "extra" , "content" : token ["content" ] }, parsed )
38
38
state = "equals"
39
39
token_idx [0 ] += 1
40
40
elif state == "property" and token ["type" ] == "NEWLINES" :
41
- parsed [ - 1 ]. append ( { "type" : "extra" , "content" : token ["content" ] } )
41
+ append ( { "type" : "extra" , "content" : token ["content" ] }, parsed )
42
42
state = "newline"
43
43
token_idx [0 ] += 1
44
44
elif state == "equals" and token ["type" ] == "WORD" :
45
- parsed [ - 1 ]. append ( { "type" : "value" , "content" : token ["content" ] } )
45
+ append ( { "type" : "value" , "content" : token ["content" ] }, parsed )
46
46
state = "value"
47
47
token_idx [0 ] += 1
48
48
elif state == "value" and token ["type" ] == "NEWLINES" :
49
- parsed [ - 1 ]. append ( { "type" : "extra" , "content" : token ["content" ] } )
49
+ append ( { "type" : "extra" , "content" : token ["content" ] }, parsed )
50
50
state = "newline"
51
51
token_idx [0 ] += 1
52
52
53
53
else :
54
- parsed [ - 1 ]. append ( { "type" : "extra" , "content" : token ["content" ] } )
54
+ append ( { "type" : "extra" , "content" : token ["content" ] }, parsed )
55
55
token_idx [0 ] += 1
56
56
57
57
return parsed
58
58
59
59
60
+ def append (token , parsed ):
61
+ if len (parsed ) == 0 :
62
+ token_error (token , "Incorrect tabbing at {line}, column {column} in {filepath}" )
63
+
64
+ parsed [- 1 ].append (token )
65
+
66
+
67
+ def token_error (token , message ):
68
+ line , column = get_token_position (token )
69
+ raise ValueError (message .format (line = line , column = column , filepath = token ["filepath" ]))
70
+
71
+
60
72
def is_deeper (depth , token , tokens , next_token_idx ):
61
73
new_depth = get_depth (token , tokens , next_token_idx )
62
74
63
75
if new_depth > depth + 1 :
64
- line , column = get_token_position (token )
65
- raise ValueError (f"Too many tabs found at line { line } , column { column } in { token ['filepath' ]} " )
76
+ token_error (token , "Too many tabs found at line {line}, column {column} in {filepath}" )
66
77
67
78
return new_depth > depth
68
79
0 commit comments