55"""
66from __future__ import annotations
77
8- from typing import Any , Callable , Dict , List , Tuple , Union , cast
8+ from typing import Callable , Dict , List , Union , cast
99
10- from pydoctor .epydoc .markup import DocstringLinker , ParseError , ParsedDocstring , get_parser_by_name
11- from pydoctor .node2stan import node2stan
10+ from pydoctor .epydoc .markup import ParseError , ParsedDocstring , get_parser_by_name
11+ from pydoctor .epydoc . markup . _pyval_repr import PyvalColorizer
1212from pydoctor .napoleon .docstring import TokenType , TypeDocstring
13+ from pydoctor .epydoc .docutils import new_document , set_node_attributes
1314
1415from docutils import nodes
15- from twisted .web .template import Tag , tags
1616
17+ # TODO: This class should use composition instead of multiple inheritence...
1718class ParsedTypeDocstring (TypeDocstring , ParsedDocstring ):
1819 """
1920 Add L{ParsedDocstring} interface on top of L{TypeDocstring} and
@@ -38,25 +39,15 @@ def __init__(self, annotation: Union[nodes.document, str],
3839 else :
3940 TypeDocstring .__init__ (self , annotation , warns_on_unknown_tokens )
4041
41-
42- # We need to store the line number because we need to pass it to DocstringLinker.link_xref
4342 self ._lineno = lineno
43+ self ._document = self ._parse_tokens ()
4444
4545 @property
4646 def has_body (self ) -> bool :
4747 return len (self ._tokens )> 0
4848
4949 def to_node (self ) -> nodes .document :
50- """
51- Not implemented.
52- """
53- raise NotImplementedError ()
54-
55- def to_stan (self , docstring_linker : DocstringLinker ) -> Tag :
56- """
57- Present the type as a stan tree.
58- """
59- return self ._convert_type_spec_to_stan (docstring_linker )
50+ return self ._document
6051
6152 def _tokenize_node_type_spec (self , spec : nodes .document ) -> List [Union [str , nodes .Node ]]:
6253 def _warn_not_supported (n :nodes .Node ) -> None :
@@ -84,97 +75,42 @@ def _warn_not_supported(n:nodes.Node) -> None:
8475
8576 return tokens
8677
87- def _convert_obj_tokens_to_stan (self , tokens : List [Tuple [Any , TokenType ]],
88- docstring_linker : DocstringLinker ) -> list [tuple [Any , TokenType ]]:
89- """
90- Convert L{TokenType.OBJ} and PEP 484 like L{TokenType.DELIMITER} type to stan, merge them together. Leave the rest untouched.
91-
92- Exemple:
93-
94- >>> tokens = [("list", TokenType.OBJ), ("(", TokenType.DELIMITER), ("int", TokenType.OBJ), (")", TokenType.DELIMITER)]
95- >>> ann._convert_obj_tokens_to_stan(tokens, NotFoundLinker())
96- ... [(Tag('code', children=['list', '(', 'int', ')']), TokenType.OBJ)]
97-
98- @param tokens: List of tuples: C{(token, type)}
78+ def _parse_tokens (self ) -> nodes .document :
9979 """
100-
101- combined_tokens : list [tuple [Any , TokenType ]] = []
102-
103- open_parenthesis = 0
104- open_square_braces = 0
105-
106- for _token , _type in tokens :
107- # The actual type of_token is str | Tag | Node.
108-
109- if (_type is TokenType .DELIMITER and _token in ('[' , '(' , ')' , ']' )) \
110- or _type is TokenType .OBJ :
111- if _token == "[" : open_square_braces += 1
112- elif _token == "(" : open_parenthesis += 1
113-
114- if _type is TokenType .OBJ :
115- _token = docstring_linker .link_xref (
116- _token , _token , self ._lineno )
117-
118- if open_square_braces + open_parenthesis > 0 :
119- try : last_processed_token = combined_tokens [- 1 ]
120- except IndexError :
121- combined_tokens .append ((_token , _type ))
122- else :
123- if last_processed_token [1 ] is TokenType .OBJ \
124- and isinstance (last_processed_token [0 ], Tag ):
125- # Merge with last Tag
126- if _type is TokenType .OBJ :
127- assert isinstance (_token , Tag )
128- last_processed_token [0 ](* _token .children )
129- else :
130- last_processed_token [0 ](_token )
131- else :
132- combined_tokens .append ((_token , _type ))
133- else :
134- combined_tokens .append ((_token , _type ))
135-
136- if _token == "]" : open_square_braces -= 1
137- elif _token == ")" : open_parenthesis -= 1
138-
139- else :
140- # the token will be processed in _convert_type_spec_to_stan() method.
141- combined_tokens .append ((_token , _type ))
142-
143- return combined_tokens
144-
145- def _convert_type_spec_to_stan (self , docstring_linker : DocstringLinker ) -> Tag :
146- """
147- Convert type to L{Tag} object.
80+ Convert type to docutils document object.
14881 """
14982
150- tokens = self ._convert_obj_tokens_to_stan (self ._tokens , docstring_linker )
151-
83+ document = new_document ('code' )
15284 warnings : List [ParseError ] = []
15385
154- converters : Dict [TokenType , Callable [[Union [str , Tag ]], Union [str , Tag ]]] = {
155- TokenType .LITERAL : lambda _token : tags .span (_token , class_ = "literal" ),
156- TokenType .CONTROL : lambda _token : tags .em (_token ),
157- # We don't use safe_to_stan() here, if these converter functions raise an exception,
158- # the whole type docstring will be rendered as plaintext.
159- # it does not crash on invalid xml entities
160- TokenType .REFERENCE : lambda _token : get_parser_by_name ('restructuredtext' )(_token , warnings ).to_stan (docstring_linker ) if isinstance (_token , str ) else _token ,
161- TokenType .UNKNOWN : lambda _token : get_parser_by_name ('restructuredtext' )(_token , warnings ).to_stan (docstring_linker ) if isinstance (_token , str ) else _token ,
162- TokenType .OBJ : lambda _token : _token , # These convertions (OBJ and DELIMITER) are done in _convert_obj_tokens_to_stan().
163- TokenType .DELIMITER : lambda _token : _token ,
86+ converters : Dict [TokenType , Callable [[str | nodes .Node ], str | nodes .Node | list [nodes .Node ]]] = {
87+ # we're re-using the variable string css class for the whole literal token, it's the
88+ # best approximation we have for now.
89+ TokenType .LITERAL : lambda _token : nodes .inline (_token , _token , classes = [PyvalColorizer .STRING_TAG ]),
90+ TokenType .CONTROL : lambda _token : nodes .emphasis (_token , _token ),
91+ TokenType .REFERENCE : lambda _token : get_parser_by_name ('restructuredtext' )(_token , warnings ).to_node ().children if isinstance (_token , str ) else _token ,
92+ TokenType .UNKNOWN : lambda _token : get_parser_by_name ('restructuredtext' )(_token , warnings ).to_node ().children if isinstance (_token , str ) else _token ,
93+ TokenType .OBJ : lambda _token : nodes .title_reference (_token , _token , line = self ._lineno ),
94+ TokenType .DELIMITER : lambda _token : nodes .Text (_token ),
16495 TokenType .ANY : lambda _token : _token ,
16596 }
16697
16798 for w in warnings :
16899 self .warnings .append (w .descr ())
169100
170- converted = Tag ( '' )
101+ elements = []
171102
172- for token , type_ in tokens :
103+ for token , type_ in self . _tokens :
173104 assert token is not None
174- if isinstance (token , nodes .Node ):
175- token = node2stan (token , docstring_linker )
176- assert isinstance (token , (str , Tag ))
177105 converted_token = converters [type_ ](token )
178- converted (converted_token )
106+ if isinstance (converted_token , list ):
107+ elements .extend (converted_token )
108+ elif isinstance (converted_token , str ) and not isinstance (converted_token , nodes .Text ):
109+ elements .append (nodes .Text (converted_token ))
110+ else :
111+ elements .append (converted_token )
179112
180- return converted
113+ return set_node_attributes (document , children = [
114+ set_node_attributes (nodes .inline ('' , '' ,
115+ classes = ['literal' ]),
116+ children = elements )])
0 commit comments