|
22 | 22 | from sphinx.util.parsing import nested_parse_to_nodes |
23 | 23 |
|
24 | 24 | if TYPE_CHECKING: |
25 | | - from collections.abc import Callable, Iterable, Iterator, Set |
| 25 | + from collections.abc import Callable, Iterable, Iterator, Sequence, Set |
26 | 26 | from typing import Any, ClassVar, Final |
27 | 27 |
|
28 | 28 | from docutils.nodes import Element, Node, system_message |
@@ -597,41 +597,91 @@ class ProductionList(SphinxDirective): |
597 | 597 | option_spec: ClassVar[OptionSpec] = {} |
598 | 598 |
|
599 | 599 | def run(self) -> list[Node]: |
600 | | - domain = self.env.domains.standard_domain |
601 | | - node: Element = addnodes.productionlist() |
| 600 | + node = addnodes.productionlist() |
602 | 601 | self.set_source_info(node) |
603 | 602 | # The backslash handling is from ObjectDescription.get_signatures |
604 | 603 | nl_escape_re = re.compile(r'\\\n') |
605 | 604 | lines = nl_escape_re.sub('', self.arguments[0]).split('\n') |
| 605 | + production_group = self.production_group(lines, self.options) |
| 606 | + production_lines = list(self.production_definitions(lines)) |
| 607 | + max_len = max(len(name) for _, name, _ in production_lines) |
| 608 | + node_location = self.get_location() |
| 609 | + node += [ |
| 610 | + self.make_production( |
| 611 | + rawsource=rule, |
| 612 | + name=name, |
| 613 | + tokens=tokens, |
| 614 | + production_group=production_group, |
| 615 | + max_len=max_len, |
| 616 | + location=node_location, |
| 617 | + ) |
| 618 | + for rule, name, tokens in production_lines |
| 619 | + ] |
| 620 | + return [node] |
606 | 621 |
|
607 | | - production_group = '' |
608 | | - first_rule_seen = False |
609 | | - for rule in lines: |
610 | | - if not first_rule_seen and ':' not in rule: |
611 | | - production_group = rule.strip() |
612 | | - continue |
613 | | - first_rule_seen = True |
614 | | - try: |
615 | | - name, tokens = rule.split(':', 1) |
616 | | - except ValueError: |
| 622 | + @staticmethod |
| 623 | + def production_group(lines: Sequence[str], options: dict[str, Any]) -> str: # NoQA: ARG004 |
| 624 | + # get production_group |
| 625 | + if not lines or ':' in lines[0]: |
| 626 | + return '' |
| 627 | + production_group = lines[0].strip() |
| 628 | + lines[:] = lines[1:] |
| 629 | + return production_group |
| 630 | + |
| 631 | + @staticmethod |
| 632 | + def production_definitions(lines: Iterable[str]) -> Iterator[tuple[str, str, str]]: |
| 633 | + """Yield triples of rawsource, name, definition.""" |
| 634 | + for line in lines: |
| 635 | + if ':' not in line: |
617 | 636 | break |
618 | | - subnode = addnodes.production(rule) |
619 | | - name = name.strip() |
620 | | - subnode['tokenname'] = name |
621 | | - if subnode['tokenname']: |
622 | | - prefix = 'grammar-token-%s' % production_group |
623 | | - node_id = make_id(self.env, self.state.document, prefix, name) |
624 | | - subnode['ids'].append(node_id) |
625 | | - self.state.document.note_implicit_target(subnode, subnode) |
626 | | - |
627 | | - if len(production_group) != 0: |
628 | | - obj_name = f'{production_group}:{name}' |
629 | | - else: |
630 | | - obj_name = name |
631 | | - domain.note_object('token', obj_name, node_id, location=node) |
632 | | - subnode.extend(token_xrefs(tokens, production_group=production_group)) |
633 | | - node.append(subnode) |
634 | | - return [node] |
| 637 | + name, _, tokens = line.partition(':') |
| 638 | + yield line, name.strip(), tokens.strip() |
| 639 | + |
| 640 | + def make_production( |
| 641 | + self, |
| 642 | + rawsource: str, |
| 643 | + name: str, |
| 644 | + tokens: str, |
| 645 | + production_group: str, |
| 646 | + max_len: int, |
| 647 | + location: str, |
| 648 | + ) -> addnodes.production: |
| 649 | + production_node = addnodes.production(rawsource, tokenname=name) |
| 650 | + if name: |
| 651 | + production_node += self.make_target(name, production_group, location) |
| 652 | + else: |
| 653 | + production_node += self.continuation_padding(max_len) |
| 654 | + production_node.append(self.production_separator(name, max_len)) |
| 655 | + production_node += token_xrefs(tokens, production_group=production_group) |
| 656 | + production_node.append(nodes.Text('\n')) |
| 657 | + return production_node |
| 658 | + |
| 659 | + def make_target( |
| 660 | + self, |
| 661 | + name: str, |
| 662 | + production_group: str, |
| 663 | + location: str, |
| 664 | + ) -> addnodes.literal_strong: |
| 665 | + """Make a link target for the given production.""" |
| 666 | + name_node = addnodes.literal_strong(name, name) |
| 667 | + prefix = f'grammar-token-{production_group}' |
| 668 | + node_id = make_id(self.env, self.state.document, prefix, name) |
| 669 | + name_node['ids'].append(node_id) |
| 670 | + self.state.document.note_implicit_target(name_node, name_node) |
| 671 | + obj_name = f'{production_group}:{name}' if production_group else name |
| 672 | + std = self.env.domains.standard_domain |
| 673 | + std.note_object('token', obj_name, node_id, location=location) |
| 674 | + return name_node |
| 675 | + |
| 676 | + @staticmethod |
| 677 | + def continuation_padding(max_len: int) -> nodes.Text: |
| 678 | + return nodes.Text(' ' * max_len) |
| 679 | + |
| 680 | + @staticmethod |
| 681 | + def production_separator(name: str, max_len: int) -> nodes.Text: |
| 682 | + if name: |
| 683 | + return nodes.Text(' ::= '.rjust(max_len - len(name) + 5)) |
| 684 | + return nodes.Text(' ') |
635 | 685 |
|
636 | 686 |
|
637 | 687 | class TokenXRefRole(XRefRole): |
|
0 commit comments