Skip to content

Commit 015fbed

Browse files
committed
Create set of tokens for comparison; skip comments completely for E22 checks
1 parent 1825dec commit 015fbed

File tree

1 file changed

+15
-22
lines changed

1 file changed

+15
-22
lines changed

pep8.py

Lines changed: 15 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,10 @@
8888
'**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
8989
'%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
9090
WHITESPACE = frozenset(' \t')
91-
SKIP_TOKENS = frozenset([tokenize.NL, tokenize.NEWLINE,
92-
tokenize.INDENT, tokenize.DEDENT])
93-
SKIP_TOKENS_C = SKIP_TOKENS.union([tokenize.COMMENT])
91+
NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
92+
SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT])
93+
# ERRORTOKEN is triggered by backticks in Python 3
94+
SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN])
9495
BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
9596

9697
INDENT_REGEX = re.compile(r'([ \t]*)')
@@ -434,8 +435,7 @@ def continued_indentation(logical_line, tokens, indent_level, hang_closing,
434435
newline = row < start[0] - first_row
435436
if newline:
436437
row = start[0] - first_row
437-
newline = (not last_token_multiline and
438-
token_type not in (tokenize.NL, tokenize.NEWLINE))
438+
newline = not last_token_multiline and token_type not in NEWLINE
439439

440440
if newline:
441441
# this is the beginning of a continuation line.
@@ -657,8 +657,7 @@ def missing_whitespace_around_operator(logical_line, tokens):
657657
prev_type = tokenize.OP
658658
prev_text = prev_end = None
659659
for token_type, text, start, end, line in tokens:
660-
if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN):
661-
# ERRORTOKEN is triggered by backticks in Python 3
660+
if token_type in SKIP_COMMENTS:
662661
continue
663662
if text in ('(', 'lambda'):
664663
parens += 1
@@ -698,14 +697,8 @@ def missing_whitespace_around_operator(logical_line, tokens):
698697
# Check if the operator is being used as a binary operator
699698
# Allow unary operators: -123, -x, +1.
700699
# Allow argument unpacking: foo(*args, **kwargs).
701-
if prev_type == tokenize.OP:
702-
binary_usage = (prev_text in '}])')
703-
elif prev_type == tokenize.NAME:
704-
binary_usage = (prev_text not in KEYWORDS)
705-
else:
706-
binary_usage = (prev_type not in SKIP_TOKENS_C)
707-
708-
if binary_usage:
700+
if (prev_text in '}])' if prev_type == tokenize.OP
701+
else prev_text not in KEYWORDS):
709702
need_space = None
710703
elif text in WS_OPTIONAL_OPERATORS:
711704
need_space = None
@@ -1170,11 +1163,11 @@ def filename_match(filename, patterns, default=True):
11701163

11711164
if COMMENT_WITH_NL:
11721165
def _is_eol_token(token):
1173-
return (token[0] in (tokenize.NEWLINE, tokenize.NL) or
1166+
return (token[0] in NEWLINE or
11741167
(token[0] == tokenize.COMMENT and token[1] == token[4]))
11751168
else:
11761169
def _is_eol_token(token):
1177-
return token[0] in (tokenize.NEWLINE, tokenize.NL)
1170+
return token[0] in NEWLINE
11781171

11791172

11801173
##############################################################################
@@ -1440,11 +1433,11 @@ def check_all(self, expected=None, line_offset=0):
14401433
elif text in '}])':
14411434
parens -= 1
14421435
elif not parens:
1443-
if token_type == tokenize.NEWLINE:
1444-
self.check_logical()
1445-
self.blank_before = 0
1446-
elif token_type == tokenize.NL:
1447-
if len(self.tokens) == 1:
1436+
if token_type in NEWLINE:
1437+
if token_type == tokenize.NEWLINE:
1438+
self.check_logical()
1439+
self.blank_before = 0
1440+
elif len(self.tokens) == 1:
14481441
# The physical line contains only this token.
14491442
self.blank_lines += 1
14501443
del self.tokens[0]

0 commit comments

Comments
 (0)