forked from ai-dynamo/dynamo
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpyproject.toml
More file actions
337 lines (302 loc) · 12.7 KB
/
pyproject.toml
File metadata and controls
337 lines (302 loc) · 12.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
# SPDX-FileCopyrightText: Copyright (c) 2024-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
[project]
name = "ai-dynamo"
version = "1.0.0"
description = "Distributed Inference Framework"
readme = "README.md"
authors = [
{ name = "NVIDIA Inc.", email = "sw-dl-dynamo@nvidia.com" },
]
license = { text = "Apache-2.0" }
license-files = ["LICENSE"]
requires-python = ">=3.10"
dependencies = [
"ai-dynamo-runtime==1.0.0",
"transformers>=4.56.0",
"pytest>=8.3.4",
"types-aiofiles>=24.1.0",
"types-psutil>=7.0.0.20250218",
"types-requests>=2.32.4.20260107",
"kubernetes>=32.0.1,<33.0.0",
"fastapi>=0.115.0",
"distro",
# filelock: required by planner
"filelock",
"typer",
"click<8.2.0",
"setuptools",
"prometheus_client>=0.23.1,<1.0",
]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Operating System :: POSIX :: Linux",
]
keywords = ["llm", "genai", "inference", "nvidia", "distributed", "dynamo"]
[project.urls]
Repository = "https://github.com/ai-dynamo/dynamo.git"
[project.optional-dependencies]
trtllm =[
"uvloop",
"msgpack==1.1.2",
"tensorrt-llm==1.3.0rc5",
]
vllm = [
"uvloop",
"nixl[cu12]<=0.10.1",
"vllm[flashinfer,runai]==0.16.0",
# vllm-omni 0.16.0rc1 is not on PyPI; installed from source in container builds
# (see container/deps/vllm/install_vllm.sh). pip install ai-dynamo[vllm] will
# not include vllm-omni — install it separately from source if needed.
# "vllm-omni==0.16.0rc1",
"blake3>=1.0.0,<2.0.0",
]
sglang = [
"uvloop",
"sglang[diffusion]==0.5.9",
"nixl[cu12]<=0.10.1",
"cupy-cuda12x>=13.0.0",
]
[project.entry-points.pytest11]
vllm_tests = "dynamo.vllm.tests.conftest"
trtllm_tests = "dynamo.trtllm.tests.conftest"
sglang_tests = "dynamo.sglang.tests.conftest"
[dependency-groups]
docs = [
# Core Sphinx
"sphinx>=8.1",
"nvidia-sphinx-theme>=0.0.8",
# Sphinx extensions
"ablog>=0.11",
"sphinx-copybutton>=0.5",
"sphinx-design>=0.6",
"sphinx-prompt>=1.9",
"sphinx-sitemap>=2.6",
"sphinx-tabs>=3.4",
"sphinx-book-theme>=1.1",
"sphinxcontrib-mermaid>=1.0",
"sphinxcontrib-bibtex>=2.6",
"sphinx-reredirects>=1.0.0",
# Markdown and notebook support
"myst-parser>=4.0",
"myst-nb>=1.2",
"nbsphinx>=0.9",
]
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.hooks.custom]
path = "hatch_build.py"
[tool.hatch.build.targets.wheel]
packages = [
"components/src/dynamo",
]
[tool.hatch.metadata]
allow-direct-references = true
[tool.codespell]
# note: pre-commit passes explicit lists of files here, which this skip file list doesn't override -
# this is only to allow you to run codespell interactively
# this also overrides the grpc_generated folder, since it is generated
# Ignore data files and auto-generated files
skip = "./.git,./.github,./lib/llm/tests/data,*.lock,*.sum"
# ignore allowed words used in code
ignore-words-list = "afterall,ser,ende"
# use the 'clear' dictionary for unambiguous spelling mistakes
builtin = "clear"
# use custom dictionary in addition to the built-in one
dictionary = "./codespell.txt"
# disable warnings about binary files and wrong encoding
quiet-level = 3
[tool.isort]
profile = "black"
use_parentheses = true
multi_line_output = 3
include_trailing_comma = true
force_grid_wrap = 0
ensure_newline_before_comments = true
line_length = 88
balanced_wrapping = true
indent = " "
skip = ["build"]
known_first_party = ["dynamo", "deploy"]
# isort may confuse what is 1st or 3rd library. e.g.
# when dynamo/vllm/omni/xx.py import vllm, local isort may treat this `vllm` as first
# party heuristically. This causes local sort differs from GitHub sort and pre-commit
# failure. To mitigate 1) one can install 3rd party lib so that isort is aware of it,
# 2) hardcode 3rd party lib here, 3) add "# isort: skip_file" to problematic files
# as the last resort.
known_third_party = ["vllm", "tensorrt_llm", "sglang", "aiconfigurator"]
[tool.pytest.ini_options]
minversion = "8.0"
tmp_path_retention_policy = "failed"
# NOTE
# We ignore model.py explicitly here to avoid mypy errors with duplicate modules
# pytest overrides the default mypy exclude configuration and so we exclude here as well
addopts = [
"-ra",
"--showlocals",
"--strict-markers",
"--strict-config",
"--mypy",
"--ignore-glob=*model.py",
"--ignore-glob=*vllm_integration*",
"--ignore-glob=*trtllm_integration*",
"--ignore-glob=*kvbm/python/kvbm*",
"--ignore-glob=*_inc.py",
"--ignore-glob=*/llm/tensorrtllm*",
"--ignore-glob=docs/*",
"--ignore-glob=components/src/dynamo/sglang/request_handlers/*",
"--ignore-glob=components/src/dynamo/sglang/multimodal_utils/*",
"--ignore-glob=components/src/dynamo/vllm/multimodal_utils/*",
"--ignore-glob=examples/backends/sglang/slurm_jobs/*",
# FIXME: Get relative/generic blob paths to work here
]
xfail_strict = true
log_cli_level = "INFO"
filterwarnings = [
"error",
"ignore:.*cuda*:DeprecationWarning", # Need this to avoid deprecation warnings from CUDA in tensorrt_llm.
"ignore:.*pkg_resources.*:DeprecationWarning",
"ignore:.*pkg_resources.*:UserWarning",
"ignore:.*multipart.*:PendingDeprecationWarning",
"ignore:.*PyType_Spec.*custom tp_new.*:DeprecationWarning", # Ignore protobuf deprecation warning
"ignore:.*unclosed.*socket.*:ResourceWarning", # Ignore unclosed socket warnings
"ignore:.*unclosed event loop.*:ResourceWarning", # Ignore unclosed event loop warnings
"ignore:.*Exception ignored in.*:pytest.PytestUnraisableExceptionWarning", # Ignore unraisable exception warnings
"ignore:The pynvml package is deprecated.*:FutureWarning", # Ignore pynvml deprecation warning, temporary until upstream library updates to nvidia-ml-py
"ignore:The behavior of DataFrame concatenation with empty or all-NA entries is deprecated.*:FutureWarning", # pandas 2.x concat deprecation in AIC SDK TODO: fix in AIC
"ignore:Automatic KV events configuration is deprecated.*:FutureWarning", # Ignore Dynamo's own KV events deprecation warning in tests
# Pydantic V2 deprecation warnings from TRTLLM dependencies (raised at import time during collection)
"ignore:Support for class-based `config`.*:pydantic.warnings.PydanticDeprecatedSince20",
"ignore:Using extra keyword arguments on `Field`.*:pydantic.warnings.PydanticDeprecatedSince20",
"ignore:The `schema` method is deprecated.*:pydantic.warnings.PydanticDeprecatedSince20",
# Pydantic warning about field shadowing in tensorrt_llm.serve.openai_protocol.ResponseFormat
'ignore:Field name "schema" in "ResponseFormat" shadows an attribute in parent:UserWarning',
# pytest-benchmark automatically disables when xdist is active, ignore the warning
"ignore:.*Benchmarks are automatically disabled.*:pytest_benchmark.logger.PytestBenchmarkWarning",
################################################################################################
# vLLM
################################################################################################
# vLLM tokenizer deprecation warning (AnyTokenizer moved to vllm.tokenizers.TokenizerLike)
"ignore:.*vllm\\.transformers_utils\\.tokenizer\\.AnyTokenizer.*has been moved.*:DeprecationWarning",
################################################################################################
# TRT-LLM
################################################################################################
# torchao sometimes emits SyntaxWarning from docstrings (e.g. invalid escape sequences) at import
# time; our global `error` policy would otherwise fail test collection. Do not rely on module=
# matching here because these can be raised during compilation where the module field may not
# match as expected.
"ignore:.*invalid escape sequence.*:SyntaxWarning",
# torchao deprecation warnings for import path changes (see https://github.com/pytorch/ao/issues/2752)
"ignore:Importing.*torchao\\.dtypes.*:DeprecationWarning",
# nvidia-modelopt warning about transformers version incompatibility
"ignore:transformers version .* is incompatible with nvidia-modelopt.*:UserWarning",
]
# NOTE: Can also manually mark tests with @pytest.mark.asyncio
asyncio_mode = "auto"
# IMPORTANT: tests/conftest.py also registers a subset of these markers for
# environments where pyproject.toml is not available (e.g. some CI containers).
# Keep the marker definitions here and in tests/conftest.py synchronized.
markers = [
"pre_merge: marks tests to run before merging",
"post_merge: marks tests to run after merge",
"parallel: marks tests that can run in parallel with pytest-xdist",
"nightly: marks tests to run nightly",
"weekly: marks tests to run weekly",
"release: marks tests to run on release pipelines",
"gpu_0: marks tests that don't require GPU",
"gpu_1: marks tests to run on GPU",
"gpu_2: marks tests to run on 2GPUs",
"gpu_4: marks tests to run on 4GPUs",
"gpu_8: marks tests to run on 8GPUs",
"e2e: marks tests as end-to-end tests",
"integration: marks tests as integration tests",
"unit: marks tests as unit tests",
"stress: marks tests as stress tests",
"performance: marks tests as performance tests",
"benchmark: marks tests as benchmark tests",
"vllm: marks tests as requiring vllm",
"trtllm: marks tests as requiring trtllm",
"sglang: marks tests as requiring sglang",
"multimodal: marks tests as multimodal (image/video) tests",
"slow: marks tests as known to be slow",
"h100: marks tests to run on H100",
"aiconfigurator: marks e2e tests that cover aiconfigurator functionality",
"router: marks tests for router component",
"planner: marks tests for planner component",
"kvbm: marks tests for KV behavior and model determinism",
"kvbm_concurrency: marks concurrency stress tests for KVBM (runs separately)",
"model: model id used by a test or parameter",
"custom_build: marks tests that require custom builds or special setup (e.g., MoE models)",
"k8s: marks tests as requiring Kubernetes",
"fault_tolerance: marks tests as fault tolerance tests",
"deploy: marks tests as deployment tests",
# Built-in markers
"skip: skip this test",
"skipif: skip if condition is true",
"xfail: expected failure",
"usefixtures: use fixtures",
"parametrize: parameterized test",
"filterwarnings: filter warnings",
"asyncio: asyncio test marker",
# Third-party plugin markers
"timeout: test timeout in seconds (pytest-timeout plugin)",
]
# Linting/formatting
[tool.ruff]
# Same as Black.
line-length = 88
indent-width = 4
[tool.ruff.lint.extend-per-file-ignores]
"icp/tests/**/test_*.py" = ["F811", "F401"]
"*_inc.py" = ["F821"]
# This is IDE (e.g. Cursor's default Python language server)
# Configure it so that developers can use "go-to-definition", "hover types" and other
# features.
[tool.basedpyright]
extraPaths = ["components/src", "lib/bindings/python/src"]
# This is for external dependencies.
venvPath = "."
venv = ".venv"
[tool.mypy]
# --disable-error-code: WAR large set of errors due to mypy not being run
# previously. We can slowly enable sets of errors to fix over time.
# disable_error_code = []
# --explicit-package-bases: WAR errors about duplicate module names used
# throughout the llm examples. For example, the common module in
# tensorrt_llm and vllm are both named common.
explicit_package_bases = true
# --ignore-missing-imports: WAR too many errors when developing outside
# of container environment with PYTHONPATH set and packages installed.
# NOTE: Can possibly move mypy from pre-commit to a github action run only in
# a container with the expected environment and PYTHONPATH setup.
ignore_missing_imports = true
check_untyped_defs = true
[[tool.mypy.overrides]]
# Skip mypy analysis on internal dependencies of vllm
module = ["vllm.*"]
follow_imports = "skip"
ignore_missing_imports = true
[tool.sphinx]
# extra-content-head
extra_content_head = [
'''
<script src="https://assets.adobedtm.com/5d4962a43b79/c1061d2c5e7b/launch-191c2462b890.min.js" ></script>
''',
]
#extra-content-footer
extra_content_footer = [
'''
<script type="text/javascript">if (typeof _satellite !== "undefined") {_satellite.pageBottom();}</script>
''',
]