forked from NVIDIA/TensorRT-LLM
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconf.py
More file actions
219 lines (178 loc) · 6.75 KB
/
conf.py
File metadata and controls
219 lines (178 loc) · 6.75 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import datetime
import importlib.util
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import subprocess
import sys
import pygit2
from docutils import nodes
sys.path.insert(0, os.path.abspath('.'))
project = 'TensorRT-LLM'
copyright = '2025, NVidia'
author = 'NVidia'
branch_name = pygit2.Repository('.').head.shorthand
html_show_sphinx = False
# Get the git commit hash
repo = pygit2.Repository('.')
commit_hash = str(repo.head.target)[:7] # Get first 7 characters of commit hash
# Get current date
last_updated = datetime.datetime.now(
datetime.timezone.utc).strftime("%B %d, %Y")
# Get the version from the version.py file
version_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../tensorrt_llm/version.py"))
spec = importlib.util.spec_from_file_location("version_module", version_path)
version_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(version_module)
version = version_module.__version__
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
templates_path = ['_templates']
exclude_patterns = ['performance/performance-tuning-guide/introduction.md']
extensions = [
'sphinx.ext.duration',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'myst_parser', # for markdown support
"breathe",
'sphinx.ext.todo',
'sphinx.ext.autosectionlabel',
'sphinxarg.ext',
'sphinx_click',
'sphinx_copybutton',
'sphinxcontrib.autodoc_pydantic',
'sphinx_togglebutton',
]
autodoc_member_order = 'bysource'
autodoc_pydantic_model_show_json = True
autodoc_pydantic_model_show_config_summary = True
autodoc_pydantic_field_doc_policy = "description"
autodoc_pydantic_model_show_field_list = True # Display field list with descriptions
autodoc_pydantic_model_member_order = "groupwise"
autodoc_pydantic_model_hide_pydantic_methods = True
autodoc_pydantic_field_list_validators = False
autodoc_pydantic_settings_signature_prefix = "" # remove any prefix
autodoc_pydantic_settings_hide_reused_validator = True # hide all the validator should be better
myst_url_schemes = {
"http":
None,
"https":
None,
"source":
"https://github.com/NVIDIA/TensorRT-LLM/tree/" + branch_name + "/{{path}}",
}
myst_heading_anchors = 4
myst_enable_extensions = [
"deflist",
"substitution",
]
myst_substitutions = {
"version":
version,
"version_quote":
f"`{version}`",
"container_tag_admonition":
r"""
```{admonition} Container image tags
:class: dropdown note
In the example shell commands, `x.y.z` corresponds to the TensorRT-LLM container
version to use. If omitted, `IMAGE_TAG` will default to `tensorrt_llm.__version__`
(e.g., this documentation was generated from the {{version_quote}} source tree).
If this does not work, e.g., because a container for the version you are
currently working with has not been released yet, you can try using a
container published for a previous
[GitHub pre-release or release](https://github.com/NVIDIA/TensorRT-LLM/releases)
(see also [NGC Catalog](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/tensorrt-llm/containers/release/tags)).
```
""",
}
autosummary_generate = True
copybutton_exclude = '.linenos, .gp, .go'
copybutton_prompt_text = ">>> |$ |# "
copybutton_line_continuation_character = "\\"
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
'.json': 'json',
}
html_theme = 'nvidia_sphinx_theme'
html_static_path = ['_static']
html_theme_options = {
"switcher": {
"json_url": "./_static/switcher.json",
"version_match": version,
"check_switcher": True,
},
"extra_footer": [
f'<p>Last updated on {last_updated}.</p>',
f'<p>This page is generated by TensorRT-LLM commit <a href="https://github.com/NVIDIA/TensorRT-LLM/tree/{commit_hash}">{commit_hash}</a>.</p>'
]
}
# ------------------------ C++ Doc related --------------------------
# Breathe configuration
breathe_default_project = "TensorRT-LLM"
breathe_projects = {"TensorRT-LLM": "../cpp_docs/xml"}
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CPP_INCLUDE_DIR = os.path.join(SCRIPT_DIR, '../../cpp/include/tensorrt_llm')
CPP_GEN_DIR = os.path.join(SCRIPT_DIR, '_cpp_gen')
print('CPP_INCLUDE_DIR', CPP_INCLUDE_DIR)
print('CPP_GEN_DIR', CPP_GEN_DIR)
html_css_files = [
'custom.css',
]
def tag_role(name, rawtext, text, lineno, inliner, options=None, content=None):
"""A custom role for displaying tags."""
options = options or {}
content = content or []
tag_name = text.lower()
node = nodes.literal(text, text, classes=['tag', tag_name])
return [node], []
def setup(app):
from helper import generate_examples, generate_llmapi
from tensorrt_llm.llmapi.utils import tag_llm_params
tag_llm_params()
app.add_role('tag', tag_role)
generate_examples()
generate_llmapi()
def gen_cpp_doc(ofile_name: str, header_dir: str, summary: str):
cpp_header_files = [
file for file in os.listdir(header_dir) if file.endswith('.h')
]
with open(ofile_name, 'w') as ofile:
ofile.write(summary + "\n")
for header in cpp_header_files:
ofile.write(f"{header}\n")
ofile.write("_" * len(header) + "\n\n")
ofile.write(f".. doxygenfile:: {header}\n")
ofile.write(" :project: TensorRT-LLM\n\n")
runtime_summary = f"""
Runtime
==========
.. Here are files in the cpp/include/runtime
.. We manually add subsection to enable detailed description in the future
.. It is also doable to automatically generate this file and list all the modules in the conf.py
""".strip()
# compile cpp doc
subprocess.run(['mkdir', '-p', CPP_GEN_DIR])
gen_cpp_doc(CPP_GEN_DIR + '/runtime.rst', CPP_INCLUDE_DIR + '/runtime',
runtime_summary)
executor_summary = f"""
Executor
==========
.. Here are files in the cpp/include/executor
.. We manually add subsection to enable detailed description in the future
.. It is also doable to automatically generate this file and list all the modules in the conf.py
""".strip()
subprocess.run(['mkdir', '-p', CPP_GEN_DIR])
gen_cpp_doc(CPP_GEN_DIR + '/executor.rst', CPP_INCLUDE_DIR + '/executor',
executor_summary)