Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@ repos:
name: Run Ruff (lint) on Argument Clinic
args: [--exit-non-zero-on-fix, --config=Tools/clinic/.ruff.toml]
files: ^Tools/clinic/|Lib/test/test_clinic.py
- id: ruff
name: Run Ruff (lint) on Tools/peg_generator/
args: [--exit-non-zero-on-fix, --config=Tools/peg_generator/.ruff.toml]
files: ^Tools/peg_generator/
- id: ruff-format
name: Run Ruff (format) on Doc/
args: [--check]
Expand Down
26 changes: 26 additions & 0 deletions Tools/peg_generator/.ruff.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
extend = "../../.ruff.toml" # Inherit the project-wide settings
extend-exclude = [
# Generated files:
"Tools/peg_generator/pegen/grammar_parser.py",
]

[lint]
select = [
"F", # pyflakes
"I", # isort
"UP", # pyupgrade
"RUF100", # Ban unused `# noqa` comments
"PGH004", # Ban blanket `# noqa` comments (only ignore specific error codes)
]
ignore = [
# Use PEP-604 unions rather than tuples for isinstance() checks.
# Makes code slower and more verbose. https://github.com/astral-sh/ruff/issues/7871.
"UP038",
]
unfixable = [
# The autofixes sometimes do the wrong things for these;
# it's better to have to manually look at the code and see how it needs fixing
"F841", # Detects unused variables
"F601", # Detects dictionaries that have duplicate keys
"F602", # Also detects dictionaries that have duplicate keys
]
7 changes: 3 additions & 4 deletions Tools/peg_generator/pegen/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
import time
import token
import traceback
from typing import Tuple

from pegen.grammar import Grammar
from pegen.parser import Parser
Expand All @@ -21,7 +20,7 @@

def generate_c_code(
args: argparse.Namespace,
) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
) -> tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
from pegen.build import build_c_parser_and_generator

verbose = args.verbose
Expand Down Expand Up @@ -50,7 +49,7 @@ def generate_c_code(

def generate_python_code(
args: argparse.Namespace,
) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
) -> tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
from pegen.build import build_python_parser_and_generator

verbose = args.verbose
Expand Down Expand Up @@ -188,7 +187,7 @@ def main() -> None:


if __name__ == "__main__":
if sys.version_info < (3, 8):
if sys.version_info < (3, 8): # noqa: UP036
print("ERROR: using pegen requires at least Python 3.8!", file=sys.stderr)
sys.exit(1)
main()
18 changes: 9 additions & 9 deletions Tools/peg_generator/pegen/ast_dump.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,17 @@
TODO: Remove the above-described hack.
"""

from typing import Any, Optional, Tuple
from typing import Any


def ast_dump(
node: Any,
annotate_fields: bool = True,
include_attributes: bool = False,
*,
indent: Optional[str] = None,
indent: str | None = None,
) -> str:
def _format(node: Any, level: int = 0) -> Tuple[str, bool]:
def _format(node: Any, level: int = 0) -> tuple[str, bool]:
if indent is not None:
level += 1
prefix = "\n" + indent * level
Expand All @@ -41,7 +41,7 @@ def _format(node: Any, level: int = 0) -> Tuple[str, bool]:
value, simple = _format(value, level)
allsimple = allsimple and simple
if keywords:
args.append("%s=%s" % (name, value))
args.append(f"{name}={value}")
else:
args.append(value)
if include_attributes and node._attributes:
Expand All @@ -54,16 +54,16 @@ def _format(node: Any, level: int = 0) -> Tuple[str, bool]:
continue
value, simple = _format(value, level)
allsimple = allsimple and simple
args.append("%s=%s" % (name, value))
args.append(f"{name}={value}")
if allsimple and len(args) <= 3:
return "%s(%s)" % (node.__class__.__name__, ", ".join(args)), not args
return "%s(%s%s)" % (node.__class__.__name__, prefix, sep.join(args)), False
return "{}({})".format(node.__class__.__name__, ", ".join(args)), not args
return f"{node.__class__.__name__}({prefix}{sep.join(args)})", False
elif isinstance(node, list):
if not node:
return "[]", True
return "[%s%s]" % (prefix, sep.join(_format(x, level)[0] for x in node)), False
return f"[{prefix}{sep.join(_format(x, level)[0] for x in node)}]", False
return repr(node), True

if all(cls.__name__ != "AST" for cls in node.__class__.__mro__):
raise TypeError("expected AST, got %r" % node.__class__.__name__)
raise TypeError(f"expected AST, got {node.__class__.__name__!r}")
return _format(node)[0]
23 changes: 11 additions & 12 deletions Tools/peg_generator/pegen/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import sysconfig
import tempfile
import tokenize
from typing import IO, Any, Dict, List, Optional, Set, Tuple
from typing import IO, Any

from pegen.c_generator import CParserGenerator
from pegen.grammar import Grammar
Expand All @@ -18,11 +18,11 @@

MOD_DIR = pathlib.Path(__file__).resolve().parent

TokenDefinitions = Tuple[Dict[int, str], Dict[str, int], Set[str]]
TokenDefinitions = tuple[dict[int, str], dict[str, int], set[str]]
Incomplete = Any # TODO: install `types-setuptools` and remove this alias


def get_extra_flags(compiler_flags: str, compiler_py_flags_nodist: str) -> List[str]:
def get_extra_flags(compiler_flags: str, compiler_py_flags_nodist: str) -> list[str]:
flags = sysconfig.get_config_var(compiler_flags)
py_flags_nodist = sysconfig.get_config_var(compiler_py_flags_nodist)
if flags is None or py_flags_nodist is None:
Expand Down Expand Up @@ -71,11 +71,11 @@ def fixup_build_ext(cmd: Incomplete) -> None:

def compile_c_extension(
generated_source_path: str,
build_dir: Optional[str] = None,
build_dir: str | None = None,
verbose: bool = False,
keep_asserts: bool = True,
disable_optimization: bool = False,
library_dir: Optional[str] = None,
library_dir: str | None = None,
) -> pathlib.Path:
"""Compile the generated source for a parser generator into an extension module.

Expand All @@ -93,11 +93,10 @@ def compile_c_extension(
"""
import setuptools.command.build_ext
import setuptools.logging

from setuptools import Extension, Distribution
from setuptools.modified import newer_group
from setuptools import Distribution, Extension
from setuptools._distutils.ccompiler import new_compiler
from setuptools._distutils.sysconfig import customize_compiler
from setuptools.modified import newer_group

if verbose:
setuptools.logging.set_threshold(logging.DEBUG)
Expand Down Expand Up @@ -241,7 +240,7 @@ def compile_c_extension(

def build_parser(
grammar_file: str, verbose_tokenizer: bool = False, verbose_parser: bool = False
) -> Tuple[Grammar, Parser, Tokenizer]:
) -> tuple[Grammar, Parser, Tokenizer]:
with open(grammar_file) as file:
tokenizer = Tokenizer(tokenize.generate_tokens(file.readline), verbose=verbose_tokenizer)
parser = GrammarParser(tokenizer, verbose=verbose_parser)
Expand Down Expand Up @@ -292,7 +291,7 @@ def build_c_generator(
keep_asserts_in_extension: bool = True,
skip_actions: bool = False,
) -> ParserGenerator:
with open(tokens_file, "r") as tok_file:
with open(tokens_file) as tok_file:
all_tokens, exact_tok, non_exact_tok = generate_token_definitions(tok_file)
with open(output_file, "w") as file:
gen: ParserGenerator = CParserGenerator(
Expand Down Expand Up @@ -333,7 +332,7 @@ def build_c_parser_and_generator(
verbose_c_extension: bool = False,
keep_asserts_in_extension: bool = True,
skip_actions: bool = False,
) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
) -> tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
"""Generate rules, C parser, tokenizer, parser generator for a given grammar

Args:
Expand Down Expand Up @@ -373,7 +372,7 @@ def build_python_parser_and_generator(
verbose_tokenizer: bool = False,
verbose_parser: bool = False,
skip_actions: bool = False,
) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
) -> tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
"""Generate rules, python parser, tokenizer, parser generator for a given grammar

Args:
Expand Down
Loading
Loading