@@ -272,7 +272,7 @@ def new_line(self, tokens: TokenWrapper, line_end: int, line_start: int) -> None
272
272
def process_module (self , node : nodes .Module ) -> None :
273
273
pass
274
274
275
- # pylint: disable-next = too-many-return-statements, too-many-branches
275
+ # pylint: disable-next = too-many-return-statements
276
276
def _check_keyword_parentheses (
277
277
self , tokens : list [tokenize .TokenInfo ], start : int
278
278
) -> None :
@@ -351,30 +351,31 @@ def _check_keyword_parentheses(
351
351
)
352
352
return
353
353
elif depth == 1 :
354
- # This is a tuple, which is always acceptable.
355
- if token [1 ] == "," :
356
- return
357
- # 'and' and 'or' are the only boolean operators with lower precedence
358
- # than 'not', so parens are only required when they are found.
359
- if token [1 ] in {"and" , "or" }:
360
- found_and_or = True
361
- # A yield inside an expression must always be in parentheses,
362
- # quit early without error.
363
- elif token [1 ] == "yield" :
364
- return
365
- # A generator expression always has a 'for' token in it, and
366
- # the 'for' token is only legal inside parens when it is in a
367
- # generator expression. The parens are necessary here, so bail
368
- # without an error.
369
- elif token [1 ] == "for" :
370
- return
371
- # A generator expression can have an 'else' token in it.
372
- # We check the rest of the tokens to see if any problems occur after
373
- # the 'else'.
374
- elif token [1 ] == "else" :
375
- if "(" in (i .string for i in tokens [i :]):
376
- self ._check_keyword_parentheses (tokens [i :], 0 )
377
- return
354
+ match token [1 ]:
355
+ case "," :
356
+ # This is a tuple, which is always acceptable.
357
+ return
358
+ case "and" | "or" :
359
+ # 'and' and 'or' are the only boolean operators with lower precedence
360
+ # than 'not', so parens are only required when they are found.
361
+ found_and_or = True
362
+ case "yield" :
363
+ # A yield inside an expression must always be in parentheses,
364
+ # quit early without error.
365
+ return
366
+ case "for" :
367
+ # A generator expression always has a 'for' token in it, and
368
+ # the 'for' token is only legal inside parens when it is in a
369
+ # generator expression. The parens are necessary here, so bail
370
+ # without an error.
371
+ return
372
+ case "else" :
373
+ # A generator expression can have an 'else' token in it.
374
+ # We check the rest of the tokens to see if any problems occur after
375
+ # the 'else'.
376
+ if "(" in (i .string for i in tokens [i :]):
377
+ self ._check_keyword_parentheses (tokens [i :], 0 )
378
+ return
378
379
379
380
def process_tokens (self , tokens : list [tokenize .TokenInfo ]) -> None :
380
381
"""Process tokens and search for:
@@ -401,39 +402,42 @@ def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
401
402
else :
402
403
self .new_line (TokenWrapper (tokens ), idx - 1 , idx )
403
404
404
- if tok_type == tokenize .NEWLINE :
405
- # a program statement, or ENDMARKER, will eventually follow,
406
- # after some (possibly empty) run of tokens of the form
407
- # (NL | COMMENT)* (INDENT | DEDENT+)?
408
- # If an INDENT appears, setting check_equal is wrong, and will
409
- # be undone when we see the INDENT.
410
- check_equal = True
411
- self ._check_line_ending (string , line_num )
412
- elif tok_type == tokenize .INDENT :
413
- check_equal = False
414
- self .check_indent_level (string , indents [- 1 ] + 1 , line_num )
415
- indents .append (indents [- 1 ] + 1 )
416
- elif tok_type == tokenize .DEDENT :
417
- # there's nothing we need to check here! what's important is
418
- # that when the run of DEDENTs ends, the indentation of the
419
- # program statement (or ENDMARKER) that triggered the run is
420
- # equal to what's left at the top of the indents stack
421
- check_equal = True
422
- if len (indents ) > 1 :
423
- del indents [- 1 ]
424
- elif tok_type == tokenize .NL :
425
- if not line .strip ("\r \n " ):
426
- last_blank_line_num = line_num
427
- elif tok_type not in (tokenize .COMMENT , tokenize .ENCODING ):
428
- # This is the first concrete token following a NEWLINE, so it
429
- # must be the first token of the next program statement, or an
430
- # ENDMARKER; the "line" argument exposes the leading white-space
431
- # for this statement; in the case of ENDMARKER, line is an empty
432
- # string, so will properly match the empty string with which the
433
- # "indents" stack was seeded
434
- if check_equal :
405
+ match tok_type :
406
+ case tokenize .NEWLINE :
407
+ # a program statement, or ENDMARKER, will eventually follow,
408
+ # after some (possibly empty) run of tokens of the form
409
+ # (NL | COMMENT)* (INDENT | DEDENT+)?
410
+ # If an INDENT appears, setting check_equal is wrong, and will
411
+ # be undone when we see the INDENT.
412
+ check_equal = True
413
+ self ._check_line_ending (string , line_num )
414
+ case tokenize .INDENT :
435
415
check_equal = False
436
- self .check_indent_level (line , indents [- 1 ], line_num )
416
+ self .check_indent_level (string , indents [- 1 ] + 1 , line_num )
417
+ indents .append (indents [- 1 ] + 1 )
418
+ case tokenize .DEDENT :
419
+ # there's nothing we need to check here! what's important is
420
+ # that when the run of DEDENTs ends, the indentation of the
421
+ # program statement (or ENDMARKER) that triggered the run is
422
+ # equal to what's left at the top of the indents stack
423
+ check_equal = True
424
+ if len (indents ) > 1 :
425
+ del indents [- 1 ]
426
+ case tokenize .NL :
427
+ if not line .strip ("\r \n " ):
428
+ last_blank_line_num = line_num
429
+ case tokenize .COMMENT | tokenize .ENCODING :
430
+ pass
431
+ case _:
432
+ # This is the first concrete token following a NEWLINE, so it
433
+ # must be the first token of the next program statement, or an
434
+ # ENDMARKER; the "line" argument exposes the leading white-space
435
+ # for this statement; in the case of ENDMARKER, line is an empty
436
+ # string, so will properly match the empty string with which the
437
+ # "indents" stack was seeded
438
+ if check_equal :
439
+ check_equal = False
440
+ self .check_indent_level (line , indents [- 1 ], line_num )
437
441
438
442
if tok_type == tokenize .NUMBER and string .endswith ("l" ):
439
443
self .add_message ("lowercase-l-suffix" , line = line_num )
@@ -550,30 +554,26 @@ def _infer_else_finally_line_number(
550
554
551
555
def _check_multi_statement_line (self , node : nodes .NodeNG , line : int ) -> None :
552
556
"""Check for lines containing multiple statements."""
553
- if isinstance (node , nodes .With ):
554
- # Do not warn about multiple nested context managers in with statements.
555
- return
556
- if (
557
- isinstance (node .parent , nodes .If )
558
- and not node .parent .orelse
559
- and self .linter .config .single_line_if_stmt
560
- ):
561
- return
562
- if (
563
- isinstance (node .parent , nodes .ClassDef )
564
- and len (node .parent .body ) == 1
565
- and self .linter .config .single_line_class_stmt
566
- ):
567
- return
568
-
569
- # Functions stubs and class with ``Ellipsis`` as body are exempted.
570
- if (
571
- isinstance (node , nodes .Expr )
572
- and isinstance (node .parent , (nodes .FunctionDef , nodes .ClassDef ))
573
- and isinstance (node .value , nodes .Const )
574
- and node .value .value is Ellipsis
575
- ):
576
- return
557
+ match node :
558
+ case nodes .With ():
559
+ # Do not warn about multiple nested context managers in with statements.
560
+ return
561
+ case nodes .NodeNG (
562
+ parent = nodes .If (orelse = [])
563
+ ) if self .linter .config .single_line_if_stmt :
564
+ return
565
+ case nodes .NodeNG (
566
+ parent = nodes .ClassDef (body = [_])
567
+ ) if self .linter .config .single_line_class_stmt :
568
+ return
569
+ case nodes .Expr (
570
+ parent = nodes .FunctionDef () | nodes .ClassDef (),
571
+ value = nodes .Const (value = value ),
572
+ ) if (
573
+ value is Ellipsis
574
+ ):
575
+ # Functions stubs and class with ``Ellipsis`` as body are exempted.
576
+ return
577
577
578
578
self .add_message ("multiple-statements" , node = node , confidence = HIGH )
579
579
self ._visited_lines [line ] = 2
0 commit comments