@@ -15,12 +15,12 @@ def get_parsed_tokens(tokens, parsed=None, token_idx=None, depth=0):
15
15
while token_idx [0 ] < len (tokens ):
16
16
token = tokens [token_idx [0 ]]
17
17
18
- if state == "newline" and is_deeper (depth , token ):
18
+ if state == "newline" and is_deeper (depth , token , tokens , token_idx [ 0 ] + 1 ):
19
19
children = { "type" : "children" , "content" : [] }
20
20
parsed [- 1 ].append (children )
21
21
get_parsed_tokens (tokens , children ["content" ], token_idx , depth + 1 )
22
22
# "state" is deliberately not being changed here.
23
- elif state == "newline" and is_same_depth (token , depth ):
23
+ elif state == "newline" and is_same_depth (depth , token , tokens , token_idx [ 0 ] + 1 ):
24
24
parsed .append ([])
25
25
state = "start"
26
26
elif state == "newline" and is_shallower (depth , token , tokens , token_idx [0 ] + 1 ):
@@ -53,11 +53,11 @@ def get_parsed_tokens(tokens, parsed=None, token_idx=None, depth=0):
53
53
return parsed
54
54
55
55
56
- def is_deeper (depth , token ):
56
+ def is_deeper (depth , token , tokens , next_token_idx ):
57
57
if token ["type" ] != "TABS" :
58
58
return False
59
59
60
- new_depth = get_depth (token )
60
+ new_depth = get_depth (token , tokens , next_token_idx )
61
61
62
62
if new_depth > depth + 1 :
63
63
line , column = get_token_position (token )
@@ -66,29 +66,32 @@ def is_deeper(depth, token):
66
66
return new_depth > depth
67
67
68
68
69
- def get_depth (token ):
70
- return len (token ["content" ])
71
-
72
-
73
- def is_same_depth (token , depth ):
74
- return token ["type" ] == "TABS" and get_depth (token ) == depth
75
-
76
-
77
- def is_shallower (depth , token , tokens , next_token_idx ):
78
- if depth == 0 or token ["type" ] == "NEWLINES" :
79
- return False
69
+ def get_depth (token , tokens , next_token_idx ):
70
+ if token ["type" ] == "NEWLINES" :
71
+ return - 1
72
+ elif token ["type" ] == "WORD" :
73
+ return 0
80
74
81
75
while next_token_idx < len (tokens ):
82
76
next_token = tokens [next_token_idx ]
83
77
84
78
if next_token ["type" ] == "WORD" :
85
- return True
79
+ return len ( token [ "content" ]) # Counts the number of tabs.
86
80
elif next_token ["type" ] == "NEWLINES" :
87
- return False
81
+ return - 1
88
82
89
83
next_token_idx += 1
90
84
91
- return False # Reached when the while-loop read the last character of the file and didn't return.
85
+ return - 1 # Reached when the while-loop read the last character of the file and didn't return.
86
+
87
+
88
+ def is_same_depth (depth , token , tokens , next_token_idx ):
89
+ return token ["type" ] == "TABS" and get_depth (token , tokens , next_token_idx ) == depth
90
+
91
+
92
+ def is_shallower (depth , token , tokens , next_token_idx ):
93
+ new_depth = get_depth (token , tokens , next_token_idx )
94
+ return new_depth != - 1 and new_depth < depth
92
95
93
96
94
97
def get_token_position (token ):
0 commit comments