|
| 1 | +import hashlib |
1 | 2 | import textwrap
|
2 | 3 | from . import Classification # noqa: I201
|
3 | 4 | from . import Token
|
@@ -143,34 +144,34 @@ def dedented_lines(description):
|
143 | 144 | return textwrap.dedent(description).split('\n')
|
144 | 145 |
|
145 | 146 |
|
146 |
| -def hash_syntax_node(node): |
| 147 | +def digest_syntax_node(digest, node): |
147 | 148 | # Hash into the syntax name and serialization code
|
148 |
| - result = hash((node.name, get_serialization_code(node.syntax_kind))) |
| 149 | + digest.update(node.name) |
| 150 | + digest.update(str(get_serialization_code(node.syntax_kind))) |
149 | 151 | for child in node.children:
|
150 | 152 | # Hash into the expected child syntax
|
151 |
| - result = hash((result, child.syntax_kind)) |
| 153 | + digest.update(child.syntax_kind) |
152 | 154 | # Hash into the child name
|
153 |
| - result = hash((result, child.name)) |
| 155 | + digest.update(child.name) |
154 | 156 | # Hash into whether the child is optional
|
155 |
| - result = hash((result, child.is_optional)) |
156 |
| - return result |
| 157 | + digest.update(str(child.is_optional)) |
157 | 158 |
|
158 | 159 |
|
159 |
| -def hash_token_syntax(token): |
| 160 | +def digest_syntax_token(digest, token): |
160 | 161 | # Hash into the token name and serialization code
|
161 |
| - return hash((token.name, token.serialization_code)) |
| 162 | + digest.update(token.name) |
| 163 | + digest.update(str(token.serialization_code)) |
162 | 164 |
|
163 | 165 |
|
164 |
| -def hash_trivia(trivia): |
165 |
| - return hash((trivia.name, trivia.serialization_code, trivia.characters)) |
| 166 | +def digest_trivia(digest, trivia): |
| 167 | + digest.update(trivia.name) |
| 168 | + digest.update(str(trivia.serialization_code)) |
| 169 | + digest.update(str(trivia.characters)) |
166 | 170 |
|
167 | 171 |
|
168 | 172 | def calculate_node_hash():
|
169 |
| - result = 0 |
170 |
| - for node in SYNTAX_NODES: |
171 |
| - result = hash((result, hash_syntax_node(node))) |
172 |
| - for token in SYNTAX_TOKENS: |
173 |
| - result = hash((result, hash_token_syntax(token))) |
174 |
| - for trivia in TRIVIAS: |
175 |
| - result = hash((result, hash_trivia(trivia))) |
176 |
| - return result |
| 173 | + digest = hashlib.sha1() |
| 174 | + map(lambda node: digest_syntax_node(digest, node), SYNTAX_NODES) |
| 175 | + map(lambda token: digest_syntax_token(digest, token), SYNTAX_TOKENS) |
| 176 | + map(lambda trivia: digest_trivia(digest, trivia), TRIVIAS) |
| 177 | + return digest.hexdigest() |
0 commit comments