@@ -53,6 +53,19 @@ def get_parsed_tokens(tokens, parsed=None, token_idx=None, depth=0):
53
53
return parsed
54
54
55
55
56
+ def is_deeper (depth , token ):
57
+ if token ["type" ] != "TABS" :
58
+ return False
59
+
60
+ new_depth = get_depth (token )
61
+
62
+ if new_depth > depth + 1 :
63
+ line , column = get_token_position (token )
64
+ raise ValueError (f"Too many tabs found at line { line } , column { column } in { token ['filepath' ]} " )
65
+
66
+ return new_depth > depth
67
+
68
+
56
69
def get_depth (token ):
57
70
return len (token ["content" ])
58
71
@@ -78,20 +91,7 @@ def is_shallower(depth, token, tokens, next_token_idx):
78
91
return False # Reached when the while-loop read the last character of the file and didn't return.
79
92
80
93
81
- def is_deeper (depth , token ):
82
- if token ["type" ] != "TABS" :
83
- return False
84
-
85
- new_depth = get_depth (token )
86
-
87
- if new_depth > depth + 1 :
88
- line , column = get_token_pos (token )
89
- raise ValueError (f"Too many tabs found at line { line } , column { column } in { token ['filepath' ]} " )
90
-
91
- return new_depth > depth
92
-
93
-
94
- def get_token_pos (token ):
94
+ def get_token_position (token ):
95
95
with open (token ["filepath" ], "r" ) as f :
96
96
text = f .read ()
97
97
0 commit comments