Skip to content

Commit ee32fa8

Browse files
Add CI tests for partial installs and multi-versions + torch 2.8 fix
Signed-off-by: Keval Morabia <[email protected]>
1 parent f053d84 commit ee32fa8

File tree

16 files changed

+135
-35
lines changed

16 files changed

+135
-35
lines changed

.github/workflows/gpu_tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ jobs:
2121
runs-on: linux-amd64-gpu-h100-latest-1
2222
timeout-minutes: 60
2323
container:
24-
image: nvcr.io/nvidia/pytorch:25.04-py3
24+
image: nvcr.io/nvidia/pytorch:25.06-py3
2525
env:
2626
GIT_DEPTH: 1000 # For correct version for tests/gpu/torch/quantization/plugins/test_megatron.py
2727
LD_LIBRARY_PATH: "/usr/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH}" # Add libcudnn*.so and libnv*.so to path.
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
# Run unit tests with older supported Python and Torch versions
2+
name: Multi version tests
3+
4+
on:
5+
pull_request:
6+
branches: [main, release/*]
7+
paths:
8+
- ".github/workflows/multi_version_unit_tests.yml"
9+
- "modelopt/**"
10+
- "tests/**"
11+
- "setup.py"
12+
- "tox.ini"
13+
14+
# Cancel previous runs if new commit is pushed to the same PR
15+
concurrency:
16+
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
17+
cancel-in-progress: true
18+
19+
jobs:
20+
multi-py-unit:
21+
runs-on: ubuntu-latest
22+
timeout-minutes: 30
23+
strategy:
24+
matrix:
25+
py: [10, 11]
26+
steps:
27+
- uses: actions/checkout@v4
28+
- uses: actions/setup-python@v5
29+
with:
30+
python-version: "3.${{ matrix.py }}"
31+
- name: Install dependencies
32+
run: pip install tox
33+
- name: Run unit tests
34+
run: tox -e py3${{ matrix.py }}-torch28-unit
35+
multi-torch-unit:
36+
runs-on: ubuntu-latest
37+
timeout-minutes: 30
38+
strategy:
39+
matrix:
40+
torch: [25, 26, 27]
41+
steps:
42+
- uses: actions/checkout@v4
43+
- uses: actions/setup-python@v5
44+
with:
45+
python-version: "3.12"
46+
- name: Install dependencies
47+
run: pip install tox
48+
- name: Run unit tests
49+
run: tox -e py312-torch${{ matrix.torch }}-unit
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
name: Partial install unit tests
2+
3+
on:
4+
pull_request:
5+
branches: [main, release/*]
6+
paths:
7+
- ".github/workflows/partial_unit_tests.yml"
8+
- "modelopt/**"
9+
- "tests/**"
10+
- "setup.py"
11+
- "tox.ini"
12+
13+
# Cancel previous runs if new commit is pushed to the same PR
14+
concurrency:
15+
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
16+
cancel-in-progress: true
17+
18+
jobs:
19+
partial-unit:
20+
runs-on: ubuntu-latest
21+
timeout-minutes: 30
22+
strategy:
23+
matrix:
24+
test-env: [onnx, torch]
25+
steps:
26+
- uses: actions/checkout@v4
27+
- uses: actions/setup-python@v5
28+
with:
29+
python-version: "3.12"
30+
- name: Install dependencies
31+
run: pip install tox
32+
- name: Run unit tests
33+
run: tox -e py312-partial-unit-${{ matrix.test-env }}

.github/workflows/unit_tests.yml

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,11 @@ concurrency:
1717

1818
jobs:
1919
unit-tests:
20-
runs-on: ubuntu-latest
20+
runs-on: ${{ matrix.os }}
2121
timeout-minutes: 30
22+
strategy:
23+
matrix:
24+
os: [ubuntu-latest, windows-latest]
2225
steps:
2326
- uses: actions/checkout@v4
2427
- uses: actions/setup-python@v5
@@ -27,4 +30,4 @@ jobs:
2730
- name: Install dependencies
2831
run: pip install tox
2932
- name: Run unit tests
30-
run: tox -e py312-torch27-unit
33+
run: tox -e py312-torch28-unit

CHANGELOG.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ Model Optimizer Changelog (Linux)
88

99
**Deprecations**
1010

11+
- Deprecate ``torch<2.5`` support.
12+
1113
**New Features**
1214

1315
- (Experimental) Add quantization support for custom TensorRT op in ONNX models.

modelopt/torch/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,9 @@
2222

2323
from . import distill, nas, opt, prune, quantization, sparsity, speculative, utils
2424

25-
if _Version(_torch_version) < _Version("2.5"):
25+
if _Version(_torch_version) < _Version("2.6"):
2626
_warnings.warn(
27-
"nvidia-modelopt will drop torch<2.5 support in a future release.", DeprecationWarning
27+
"nvidia-modelopt will drop torch<2.6 support in a future release.", DeprecationWarning
2828
)
2929

3030
# Since `hf` dependencies are optional and users have pre-installed transformers, we need to ensure

modelopt/torch/_deploy/utils/torch_onnx.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -411,15 +411,18 @@ def get_onnx_bytes_and_metadata(
411411
)
412412
with torch.inference_mode(), autocast, quantizer_context:
413413
if not dynamo_export or Version(torch.__version__) >= Version("2.6"):
414+
additional_kwargs = {}
415+
if not dynamo_export and Version(torch.__version__) >= Version("2.8"):
416+
additional_kwargs["dynamic_axes"] = dynamic_axes
414417
torch.onnx.export(
415418
model,
416419
dummy_input,
417420
onnx_save_path,
418421
input_names=input_names,
419422
output_names=output_names,
420423
opset_version=onnx_opset,
421-
dynamic_axes=dynamic_axes,
422424
dynamo=dynamo_export,
425+
**additional_kwargs,
423426
)
424427
else: # torch < 2.6 with dynamo export
425428
export_options = torch.onnx.ExportOptions(dynamic_shapes=True)

modelopt/torch/nas/modules/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,9 @@ def get_sliced_tensor_by_slices(
4040
tensor_sliced = tensor
4141
for i, _ in enumerate(slices):
4242
if sum(not isinstance(s, slice) for s in slices) < 2:
43-
tensor_sliced = tensor_sliced[slices]
43+
tensor_sliced = tensor_sliced[tuple(slices)]
4444
break
45-
tensor_sliced = tensor_sliced[slices[: i + 1]]
45+
tensor_sliced = tensor_sliced[tuple(slices[: i + 1])]
4646
slices[i] = slice(None) # replace with a vanilla slice ("[:]") for next slicing iteration
4747

4848
# return sliced, contiguous tensor

setup.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@
4343
"pulp",
4444
"regex",
4545
"safetensors",
46-
"torch>=2.4",
46+
"torch>=2.5",
4747
"torchprofile>=0.0.4",
4848
"torchvision",
4949
]
@@ -58,7 +58,7 @@
5858
"onnxconverter-common",
5959
"onnxruntime~=1.22.0 ; platform_machine == 'aarch64' or platform_system == 'Darwin'",
6060
"onnxruntime-gpu~=1.22.0 ; platform_machine != 'aarch64' and platform_system != 'Darwin' and platform_system != 'Windows'", # noqa: E501
61-
"onnxruntime-gpu==1.20.0; platform_system == 'Windows'",
61+
"onnxruntime-directml==1.20.0; platform_system == 'Windows'",
6262
"onnxscript", # For test_onnx_dynamo_export unit test
6363
"onnxsim ; python_version < '3.12' and platform_machine != 'aarch64'",
6464
"polygraphy>=0.49.22",

tests/conftest.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16+
import platform
17+
1618
import pytest
1719

1820

@@ -36,3 +38,9 @@ def pytest_collection_modifyitems(config, items):
3638
for item in items:
3739
if "manual" in item.keywords:
3840
item.add_marker(skipper)
41+
42+
43+
@pytest.fixture
44+
def skip_on_windows():
45+
if platform.system() == "Windows":
46+
pytest.skip("Skipping on Windows")

0 commit comments

Comments
 (0)