|
88 | 88 | '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
|
89 | 89 | '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
|
90 | 90 | WHITESPACE = frozenset(' \t')
|
91 |
| -SKIP_TOKENS = frozenset([tokenize.NL, tokenize.NEWLINE, |
92 |
| - tokenize.INDENT, tokenize.DEDENT]) |
93 |
| -SKIP_TOKENS_C = SKIP_TOKENS.union([tokenize.COMMENT]) |
| 91 | +NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE]) |
| 92 | +SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT]) |
| 93 | +# ERRORTOKEN is triggered by backticks in Python 3 |
| 94 | +SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN]) |
94 | 95 | BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
|
95 | 96 |
|
96 | 97 | INDENT_REGEX = re.compile(r'([ \t]*)')
|
@@ -434,8 +435,7 @@ def continued_indentation(logical_line, tokens, indent_level, hang_closing,
|
434 | 435 | newline = row < start[0] - first_row
|
435 | 436 | if newline:
|
436 | 437 | row = start[0] - first_row
|
437 |
| - newline = (not last_token_multiline and |
438 |
| - token_type not in (tokenize.NL, tokenize.NEWLINE)) |
| 438 | + newline = not last_token_multiline and token_type not in NEWLINE |
439 | 439 |
|
440 | 440 | if newline:
|
441 | 441 | # this is the beginning of a continuation line.
|
@@ -657,8 +657,7 @@ def missing_whitespace_around_operator(logical_line, tokens):
|
657 | 657 | prev_type = tokenize.OP
|
658 | 658 | prev_text = prev_end = None
|
659 | 659 | for token_type, text, start, end, line in tokens:
|
660 |
| - if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN): |
661 |
| - # ERRORTOKEN is triggered by backticks in Python 3 |
| 660 | + if token_type in SKIP_COMMENTS: |
662 | 661 | continue
|
663 | 662 | if text in ('(', 'lambda'):
|
664 | 663 | parens += 1
|
@@ -698,14 +697,8 @@ def missing_whitespace_around_operator(logical_line, tokens):
|
698 | 697 | # Check if the operator is being used as a binary operator
|
699 | 698 | # Allow unary operators: -123, -x, +1.
|
700 | 699 | # Allow argument unpacking: foo(*args, **kwargs).
|
701 |
| - if prev_type == tokenize.OP: |
702 |
| - binary_usage = (prev_text in '}])') |
703 |
| - elif prev_type == tokenize.NAME: |
704 |
| - binary_usage = (prev_text not in KEYWORDS) |
705 |
| - else: |
706 |
| - binary_usage = (prev_type not in SKIP_TOKENS_C) |
707 |
| - |
708 |
| - if binary_usage: |
| 700 | + if (prev_text in '}])' if prev_type == tokenize.OP |
| 701 | + else prev_text not in KEYWORDS): |
709 | 702 | need_space = None
|
710 | 703 | elif text in WS_OPTIONAL_OPERATORS:
|
711 | 704 | need_space = None
|
@@ -1170,11 +1163,11 @@ def filename_match(filename, patterns, default=True):
|
1170 | 1163 |
|
1171 | 1164 | if COMMENT_WITH_NL:
|
1172 | 1165 | def _is_eol_token(token):
|
1173 |
| - return (token[0] in (tokenize.NEWLINE, tokenize.NL) or |
| 1166 | + return (token[0] in NEWLINE or |
1174 | 1167 | (token[0] == tokenize.COMMENT and token[1] == token[4]))
|
1175 | 1168 | else:
|
1176 | 1169 | def _is_eol_token(token):
|
1177 |
| - return token[0] in (tokenize.NEWLINE, tokenize.NL) |
| 1170 | + return token[0] in NEWLINE |
1178 | 1171 |
|
1179 | 1172 |
|
1180 | 1173 | ##############################################################################
|
@@ -1440,11 +1433,11 @@ def check_all(self, expected=None, line_offset=0):
|
1440 | 1433 | elif text in '}])':
|
1441 | 1434 | parens -= 1
|
1442 | 1435 | elif not parens:
|
1443 |
| - if token_type == tokenize.NEWLINE: |
1444 |
| - self.check_logical() |
1445 |
| - self.blank_before = 0 |
1446 |
| - elif token_type == tokenize.NL: |
1447 |
| - if len(self.tokens) == 1: |
| 1436 | + if token_type in NEWLINE: |
| 1437 | + if token_type == tokenize.NEWLINE: |
| 1438 | + self.check_logical() |
| 1439 | + self.blank_before = 0 |
| 1440 | + elif len(self.tokens) == 1: |
1448 | 1441 | # The physical line contains only this token.
|
1449 | 1442 | self.blank_lines += 1
|
1450 | 1443 | del self.tokens[0]
|
|
0 commit comments