Skip to content

Commit ea68bec

Browse files
committed
Fix throwing of errors
1 parent 3a0a878 commit ea68bec

File tree

4 files changed

+20
-20
lines changed

4 files changed

+20
-20
lines changed

Python/ini_converting/ini_parser.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@ def get_parsed_tokens(tokens, parsed=None, token_idx=None, depth=0):
1616
token = tokens[token_idx[0]]
1717

1818
if state == "newline" and is_deeper(depth, token, tokens, token_idx[0] + 1):
19-
children = { "type": "children", "content": [], "index": token["index"], "filepath": token["filepath"] }
20-
append(children, parsed)
19+
children = { "type": "children", "content": [] }
20+
append(children, parsed, token)
2121
get_parsed_tokens(tokens, children["content"], token_idx, depth + 1)
2222
# "state" is deliberately not being changed here.
2323
elif state == "newline" and is_same_depth(depth, token, tokens, token_idx[0] + 1):
@@ -30,38 +30,38 @@ def get_parsed_tokens(tokens, parsed=None, token_idx=None, depth=0):
3030
state = "start"
3131

3232
elif state == "start" and token["type"] == "WORD":
33-
append( { "type": "property", "content": token["content"] }, parsed )
33+
append( { "type": "property", "content": token["content"] }, parsed, token )
3434
state = "property"
3535
token_idx[0] += 1
3636
elif state == "property" and token["type"] == "EQUALS":
37-
append( { "type": "extra", "content": token["content"] }, parsed )
37+
append( { "type": "extra", "content": token["content"] }, parsed, token )
3838
state = "equals"
3939
token_idx[0] += 1
4040
elif state == "property" and token["type"] == "NEWLINES":
41-
append( { "type": "extra", "content": token["content"] }, parsed )
41+
append( { "type": "extra", "content": token["content"] }, parsed, token )
4242
state = "newline"
4343
token_idx[0] += 1
4444
elif state == "equals" and token["type"] == "WORD":
45-
append( { "type": "value", "content": token["content"] }, parsed )
45+
append( { "type": "value", "content": token["content"] }, parsed, token )
4646
state = "value"
4747
token_idx[0] += 1
4848
elif state == "value" and token["type"] == "NEWLINES":
49-
append( { "type": "extra", "content": token["content"] }, parsed )
49+
append( { "type": "extra", "content": token["content"] }, parsed, token )
5050
state = "newline"
5151
token_idx[0] += 1
5252

5353
else:
54-
append( { "type": "extra", "content": token["content"] }, parsed )
54+
append( { "type": "extra", "content": token["content"] }, parsed, token )
5555
token_idx[0] += 1
5656

5757
return parsed
5858

5959

60-
def append(token, parsed):
60+
def append(parsed_token, parsed, token):
6161
if len(parsed) == 0:
62-
token_error(token, "Incorrect tabbing at {line}, column {column} in {filepath}")
62+
token_error(token, "Incorrect tabbing at line {line}, column {column} in {filepath}")
6363

64-
parsed[-1].append(token)
64+
parsed[-1].append(parsed_token)
6565

6666

6767
def token_error(token, message):

Python/ini_converting/ini_parser_tests.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,9 @@
44

55

66
def parser_tests():
7-
# test("invalid_tabbing", [ # This is expected to raise a "Too many tabs found" error.
8-
# [
9-
# { "type": "property", "content": "AddEffect" }, { "type": "extra", "content": " " }, { "type": "extra", "content": "=" }, { "type": "extra", "content": " " }, { "type": "value", "content": "MOPixel" }, { "type": "extra", "content": "\n" },
10-
# { "type": "extra", "content": "\t\t" }, { "type": "property", "content": "Foo" }, { "type": "extra", "content": " " }, { "type": "extra", "content": "=" }, { "type": "extra", "content": " " }, { "type": "value", "content": "Bar" },
11-
# ]
12-
# ])
7+
# test("invalid_tabbing", []) # This is expected to raise a "Too many tabs found" error.
8+
# test("invalid_immediate_tab", []) # This is expected to raise an "Incorrect tabbing" error.
9+
1310
test("simple", [
1411
[
1512
{ "type": "property", "content": "AddEffect" }, { "type": "extra", "content": " " }, { "type": "extra", "content": "=" }, { "type": "extra", "content": " " }, { "type": "value", "content": "MOPixel" },
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Foo

Python/ini_converting/ini_tokenizer_tests.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,15 @@
33

44

55
def tokenizer_tests():
6-
# It's fine that the tokenizer doesn't notice that there's invalid tabbing in "invalid_tabbing.ini" and happily outputs this data,
7-
# because checking for invalid tabbing is the parser's responsibility.
6+
# It's fine that the tokenizer doesn't notice that these invalid files, because complex checking is the parser's responsibility.
87
test("invalid_tabbing", [
98
{ "type": "WORD", "content": "AddEffect" }, { "type": "EXTRA", "content": " " }, { "type": "EQUALS", "content": "=" }, { "type": "EXTRA", "content": " " }, { "type": "WORD", "content": "MOPixel" }, { "type": "NEWLINES", "content": "\n" },
109
{ "type": "TABS", "content": "\t\t" }, { "type": "WORD", "content": "Foo" }, { "type": "EXTRA", "content": " " }, { "type": "EQUALS", "content": "=" }, { "type": "EXTRA", "content": " " }, { "type": "WORD", "content": "Bar" },
1110
])
11+
test("invalid_immediate_tab", [
12+
{ "type": "TABS", "content": "\t" }, { "type": "WORD", "content": "Foo" },
13+
])
14+
1215
test("simple", [
1316
{ "type": "WORD", "content": "AddEffect" }, { "type": "EXTRA", "content": " " }, { "type": "EQUALS", "content": "=" }, { "type": "EXTRA", "content": " " }, { "type": "WORD", "content": "MOPixel" },
1417
])
@@ -108,7 +111,6 @@ def tokenizer_tests():
108111
{ "type": "WORD", "content": "Foo" }, { "type": "EXTRA", "content": " " }, { "type": "EQUALS", "content": "=" }, { "type": "NEWLINES", "content": "\n" }, { "type": "WORD", "content": "Bar" },
109112
])
110113

111-
112114
def test(filename, expected):
113115
filepath = tests.get_test_path_from_filename(filename)
114116

0 commit comments

Comments
 (0)