Skip to content

Commit f80b847

Browse files
committed
Merge branch 'main' into jingyux/block-quant-onnx
Signed-off-by: Jingyu Xin <[email protected]>
2 parents 0a0ad7a + 17439e6 commit f80b847

File tree

119 files changed

+3906
-2370
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

119 files changed

+3906
-2370
lines changed

.github/CODEOWNERS

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ modelopt/torch/trace @NVIDIA/modelopt-torch-nas-prune-codeowners
3030
modelopt/torch/utils @NVIDIA/modelopt-torch-utils-codeowners
3131

3232
# Examples
33-
/docker @NVIDIA/modelopt-docker-codeowners
3433
/README.md @NVIDIA/modelopt-examples-codeowners
3534
/examples @NVIDIA/modelopt-examples-codeowners
3635
/examples/chained_optimizations @NVIDIA/modelopt-torch-nas-prune-codeowners

.github/workflows/example_tests.yml

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -68,15 +68,18 @@ jobs:
6868
container: &example_container
6969
image: nvcr.io/nvidia/tensorrt-llm/release:1.1.0rc2.post2
7070
env:
71-
LD_LIBRARY_PATH: "/usr/lib/x86_64-linux-gnu:/usr/local/tensorrt/targets/x86_64-linux-gnu/lib:${LD_LIBRARY_PATH}"
72-
# PATH: "/usr/local/tensorrt/targets/x86_64-linux-gnu/bin:${PATH}"
7371
PIP_CONSTRAINT: "" # Disable pip constraint for upgrading packages
72+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
7473
steps: &example_steps
7574
- uses: actions/checkout@v4
7675
- uses: nv-gha-runners/setup-proxy-cache@main
76+
- name: Setup environment variables
77+
run: |
78+
echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/include:/usr/lib/x86_64-linux-gnu:/usr/local/tensorrt/targets/x86_64-linux-gnu/lib" >> $GITHUB_ENV
79+
echo "PATH=${PATH}:/usr/local/tensorrt/targets/x86_64-linux-gnu/bin" >> $GITHUB_ENV
7780
- name: Run example tests
7881
run: |
79-
pip install ".[all,dev-test]"
82+
pip install ".[hf,dev-test]"
8083
find examples/${{ matrix.EXAMPLE }} -name "requirements.txt" | while read req_file; do pip install -r "$req_file" || exit 1; done
8184
pytest -s tests/examples/${{ matrix.EXAMPLE }}
8285
example-tests-non-pr:

.github/workflows/gpu_tests.yml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,11 +66,14 @@ jobs:
6666
image: nvcr.io/nvidia/pytorch:25.06-py3
6767
env:
6868
GIT_DEPTH: 1000 # For correct version for tests/gpu/torch/quantization/plugins/test_megatron.py
69-
LD_LIBRARY_PATH: "/usr/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH}" # Add libcudnn*.so and libnv*.so to path.
7069
PIP_CONSTRAINT: "" # Disable pip constraint for upgrading packages
7170
steps: &gpu_steps
7271
- uses: actions/checkout@v4
7372
- uses: nv-gha-runners/setup-proxy-cache@main
73+
- name: Setup environment variables
74+
run: |
75+
echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/include:/usr/lib/x86_64-linux-gnu:/usr/local/tensorrt/targets/x86_64-linux-gnu/lib" >> $GITHUB_ENV
76+
echo "PATH=${PATH}:/usr/local/tensorrt/targets/x86_64-linux-gnu/bin" >> $GITHUB_ENV
7477
- name: Run gpu tests
7578
run: pip install tox-current-env && tox -e py312-cuda12-gpu --current-env
7679
gpu-tests-non-pr:

.gitlab/tests.yml

Lines changed: 51 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
1-
# NOTE: Make sure this file is consistent with .github/workflows/{unit,gpu}_tests.yml
1+
# NOTE: Make sure this file is consistent with .github/workflows/{unit,gpu,example}_tests.yml
22
.tests-default:
3+
variables:
4+
PIP_CONSTRAINT: "" # Disable pip constraint for upgrading packages
35
stage: tests
46
rules:
57
- if: $CI_PIPELINE_SOURCE == "schedule"
6-
when: always
7-
- if: $CI_PIPELINE_SOURCE != "schedule"
8-
when: manual
8+
- if: $CI_COMMIT_TAG =~ /^\d+\.\d+\.\d+$/
9+
- when: manual
910

1011
##### Unit Tests #####
1112
unit:
@@ -24,50 +25,74 @@ unit:
2425
- tox -e py3$PYTHON-torch$TORCH-tf_$TRANSFORMERS-unit
2526

2627
##### GPU Tests #####
27-
gpu:
28+
.multi-gpu-tests-default:
2829
extends: .tests-default
29-
timeout: 60m
30+
timeout: 90m
3031
image: nvcr.io/nvidia/pytorch:25.06-py3
3132
variables:
3233
GIT_DEPTH: 1000 # For correct version for tests/gpu/torch/quantization/plugins/test_megatron.py
33-
LD_LIBRARY_PATH: "/usr/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH}" # Add libcudnn*.so and libnv*.so to path.
34-
PIP_CONSTRAINT: "" # Disable pip constraint for upgrading packages
3534
tags: [docker, linux, 2-gpu]
35+
before_script:
36+
# Add libcudnn*.so and libnv*.so to path
37+
- export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/include:/usr/lib/x86_64-linux-gnu:/usr/local/tensorrt/targets/x86_64-linux-gnu/lib"
38+
# Add trtexec to path
39+
- export PATH="${PATH}:/usr/local/tensorrt/targets/x86_64-linux-gnu/bin"
40+
# Install git-lfs for Daring-Anteater dataset
41+
- apt-get update && apt-get install -y git-lfs
42+
- git lfs install --system
43+
44+
multi-gpu:
45+
extends: .multi-gpu-tests-default
3646
script:
3747
# Use pre-installed packages without a new venv with tox-current-env
3848
- pip install tox-current-env
3949
- tox -e py312-cuda12-gpu --current-env
4050

4151
##### Example Tests #####
42-
example:
43-
extends: .tests-default
44-
stage: tests
45-
timeout: 45m
46-
image: gitlab-master.nvidia.com:5005/omniml/modelopt/modelopt_examples:latest
47-
variables:
48-
TEST_TYPE: pytest
49-
tags: [docker, linux, 2-gpu, sm<89]
52+
example-torch:
53+
extends: .multi-gpu-tests-default
54+
timeout: 30m
5055
parallel:
5156
matrix:
52-
- EXAMPLE: [diffusers, llm_distill, llm_qat, llm_sparsity, onnx_ptq, speculative_decoding]
53-
allow_failure: true # Allow to continue next stages even if job is canceled (e.g. during release)
54-
before_script:
55-
- pip install ".[all,dev-test]"
57+
- EXAMPLE: [llm_distill, llm_sparsity, speculative_decoding]
5658
script:
57-
# Uninstall apex since T5 Int8 (PixArt) + Apex is not supported as per https://github.com/huggingface/transformers/issues/21391
58-
- if [ "$EXAMPLE" = "diffusers" ]; then pip uninstall -y apex; fi
59+
- pip install ".[hf,dev-test]"
5960
- find examples/$EXAMPLE -name "requirements.txt" | while read req_file; do pip install -r "$req_file" || exit 1; done
60-
- if [ "$TEST_TYPE" = "pytest" ]; then pytest -s tests/examples/$EXAMPLE; else bash tests/examples/test_$EXAMPLE.sh; fi
61+
- pytest -s tests/examples/$EXAMPLE
62+
63+
# TODO: Fix llm_qat test hang in GitLab CI
64+
example-failing:
65+
extends: example-torch
66+
allow_failure: true
67+
parallel:
68+
matrix:
69+
- EXAMPLE: [llm_qat]
6170

62-
example-ada:
63-
extends: example
71+
example-trtllm:
72+
extends: example-torch
6473
timeout: 60m
74+
image: nvcr.io/nvidia/tensorrt-llm/release:1.1.0rc2.post2
75+
tags: [docker, linux, 2-gpu, sm>=89]
76+
parallel:
77+
matrix:
78+
- EXAMPLE: [llm_autodeploy, llm_eval, llm_ptq, vlm_ptq]
79+
80+
example-onnx:
81+
extends: example-torch
82+
image: nvcr.io/nvidia/tensorrt:25.08-py3
6583
tags: [docker, linux, 2-gpu, sm>=89]
6684
parallel:
6785
matrix:
68-
- EXAMPLE: [llm_eval, llm_ptq, vlm_ptq, llm_autodeploy]
86+
- EXAMPLE: [diffusers, onnx_ptq]
87+
TEST_TYPE: pytest
6988
- EXAMPLE: [onnx_ptq]
7089
TEST_TYPE: bash
90+
script:
91+
# Uninstall apex since T5 Int8 (PixArt) + Apex is not supported as per https://github.com/huggingface/transformers/issues/21391
92+
- if [ "$EXAMPLE" = "diffusers" ]; then pip uninstall -y apex; fi
93+
- pip install ".[all,dev-test]"
94+
- find examples/$EXAMPLE -name "requirements.txt" | while read req_file; do pip install -r "$req_file" || exit 1; done
95+
- if [ "$TEST_TYPE" = "pytest" ]; then pytest -s tests/examples/$EXAMPLE; else bash tests/examples/test_$EXAMPLE.sh; fi
7196

7297
##### Megatron / NeMo Integration Tests #####
7398
megatron-nemo-integration:

.vscode/settings.json

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,5 +42,7 @@
4242
"evenBetterToml.schema.enabled": false, // disable toml/json schema since we have custom fields
4343
"python.analysis.extraPaths": [
4444
"./tests/" // add tests to python path just like pytest does in pyproject.toml
45-
]
45+
],
46+
"git.alwaysSignOff": true,
47+
"git.enableCommitSigning": true,
4648
}

CHANGELOG.rst

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,23 @@
11
Model Optimizer Changelog (Linux)
22
=================================
33

4+
0.39 (2025-10-xx)
5+
^^^^^^^^^^^^^^^^^
6+
7+
**Deprecations**
8+
9+
**New Features**
10+
11+
- Add flag ``op_types_to_exclude_fp16`` in ONNX quantization to exclude ops from being converted to FP16/BF16. Alternatively, for custom TensorRT ops, this can also be done by indicating ``'fp32'`` precision in ``trt_plugins_precision``.
12+
413
0.37 (2025-09-xx)
514
^^^^^^^^^^^^^^^^^
615

716
**Deprecations**
817

18+
- Deprecated ModelOpt's custom docker images. Please use the PyTorch, TensorRT-LLM or TensorRT docker image directly or refer to the `installation guide <https://nvidia.github.io/TensorRT-Model-Optimizer/getting_started/2_installation.html>`_ for more details.
919
- Deprecated ``quantize_mode`` argument in ``examples/onnx_ptq/evaluate.py`` to support strongly typing. Use ``engine_precision`` instead.
10-
- Deprecated TRT-LLM's TRT backend in ``examples/llm_ptq`` and ``examples/vlm_ptq``. Tasks ``build`` and ``benchmark`` support are removed and replaced with ``quant``. For performance evaluation, please use ``trtllm-bench`` directly.
20+
- Deprecated TRT-LLM's TRT backend in ``examples/llm_ptq`` and ``examples/vlm_ptq``. Tasks ``build`` and ``benchmark`` support are removed and replaced with ``quant``. ``engine_dir`` is replaced with ``checkpoint_dir`` in ``examples/llm_ptq`` and ``examples/vlm_ptq``. For performance evaluation, please use ``trtllm-bench`` directly.
1121
- ``--export_fmt`` flag in ``examples/llm_ptq`` is removed. By default we export to the unified Hugging Face checkpoint format.
1222
- Deprecated ``examples/vlm_eval`` as it depends on the deprecated TRT-LLM's TRT backend.
1323

@@ -16,6 +26,7 @@ Model Optimizer Changelog (Linux)
1626
- ``high_precision_dtype`` default to fp16 in ONNX quantization, i.e. quantized output model weights are now FP16 by default.
1727
- Upgrade TensorRT-LLM dependency to 1.1.0rc2.
1828
- Support Phi-4-multimodal and Qwen2.5-VL quantized HF checkpoint export in ``examples/vlm_ptq``.
29+
- Support storing and restoring Minitron pruning activations and scores for re-pruning without running the forward loop again.
1930
- Add Minitron pruning example for Megatron-LM framework. See ``examples/megatron-lm`` for more details.
2031

2132
0.35 (2025-09-04)

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ pip install -e ".[dev]"
1111
```
1212

1313
If you are working on features that require dependencies like TensorRT-LLM or Megatron-Core, consider using a docker container to simplify the setup process.
14-
See [docker README](./README.md#installation--docker) for more details.
14+
Visit our [installation docs](https://nvidia.github.io/TensorRT-Model-Optimizer/getting_started/2_installation.html) for more information.
1515

1616
## 🧹 Code linting and formatting
1717

README.md

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
______________________________________________________________________
1717

18-
The **NVIDIA TensorRT Model Optimizer** (referred to as **Model Optimizer**, or **ModelOpt**) is a library comprising state-of-the-art model optimization [techniques](#techniques) including quantization, distillation, pruning, speculative decoding and sparsity to accelerate models.
18+
**NVIDIA TensorRT Model Optimizer** (referred to as **Model Optimizer**, or **ModelOpt**) is a library comprising state-of-the-art model optimization [techniques](#techniques) including quantization, distillation, pruning, speculative decoding and sparsity to accelerate models.
1919

2020
**[Input]** Model Optimizer currently supports inputs of a [Hugging Face](https://huggingface.co/), [PyTorch](https://github.com/pytorch/pytorch) or [ONNX](https://github.com/onnx/onnx) model.
2121

@@ -61,10 +61,10 @@ Model Optimizer is also integrated with [NVIDIA NeMo](https://github.com/NVIDIA-
6161
To install stable release packages for Model Optimizer with `pip` from [PyPI](https://pypi.org/project/nvidia-modelopt/):
6262

6363
```bash
64-
pip install nvidia-modelopt[all]
64+
pip install -U nvidia-modelopt[all]
6565
```
6666

67-
To install from source in editable mode with all development dependencies or to test the latest changes, run:
67+
To install from source in editable mode with all development dependencies or to use the latest features, run:
6868

6969
```bash
7070
# Clone the Model Optimizer repository
@@ -74,7 +74,11 @@ cd TensorRT-Model-Optimizer
7474
pip install -e .[dev]
7575
```
7676

77-
Visit our [installation guide](https://nvidia.github.io/TensorRT-Model-Optimizer/getting_started/2_installation.html) for more fine-grained control on installed dependencies or view our pre-made [dockerfiles](docker/README.md) for more information.
77+
You can also directly use the [TensorRT-LLM docker images](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/tensorrt-llm/containers/release/tags)
78+
(e.g., `nvcr.io/nvidia/tensorrt-llm/release:<version>`), which have Model Optimizer pre-installed.
79+
Make sure to upgrade Model Optimizer to the latest version using ``pip`` as described above.
80+
Visit our [installation guide](https://nvidia.github.io/TensorRT-Model-Optimizer/getting_started/2_installation.html) for
81+
more fine-grained control on installed dependencies or for alternative docker images and environment variables to setup.
7882

7983
## Techniques
8084

docker/Dockerfile

Lines changed: 0 additions & 27 deletions
This file was deleted.

docker/README.md

Lines changed: 0 additions & 16 deletions
This file was deleted.

0 commit comments

Comments
 (0)