Skip to content

Commit 36af77d

Browse files
committed
Make the parser handle the DataModule property which never has an equals nor value
1 parent f55596c commit 36af77d

File tree

2 files changed

+6
-1
lines changed

2 files changed

+6
-1
lines changed

Python/ini_converting/ini_parser.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,10 @@ def get_parsed_tokens(tokens, parsed=None, token_idx=None, depth=0):
3737
parsed[-1].append( { "type": "extra", "content": token["content"] } )
3838
state = "equals"
3939
token_idx[0] += 1
40+
elif state == "property" and token["type"] == "NEWLINES":
41+
parsed[-1].append( { "type": "extra", "content": token["content"] } )
42+
state = "newline"
43+
token_idx[0] += 1
4044
elif state == "equals" and token["type"] == "WORD":
4145
parsed[-1].append( { "type": "value", "content": token["content"] } )
4246
state = "value"

Python/ini_converting/ini_tokenizer.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ def tokenize_newline(i, text_len, text, tokens, filepath):
110110
token += text[i]
111111
i += 1
112112

113-
tokens.append(get_token("NEWLINES", token, i, filepath)) # TODO: Maybe use "NEWLINE" instead of the plural version?
113+
tokens.append(get_token("NEWLINES", token, i, filepath))
114114

115115
return i
116116

@@ -121,6 +121,7 @@ def tokenize_word(i, text_len, text, tokens, filepath):
121121
subtext = text[i:]
122122
token = re.match("(\S+([\t\f\v ]*\S+)*)", subtext).group(0)
123123

124+
# TODO: Become a regex wizard and do this in the above regex instead.
124125
token = token.split("//", maxsplit=1)[0]
125126
token = token.split("/*", maxsplit=1)[0]
126127
token = token.split("=", maxsplit=1)[0]

0 commit comments

Comments
 (0)