Skip to content

Commit 30aff3a

Browse files
authored
chore: Updates for 2.3 (#2788)
1 parent 0499493 commit 30aff3a

File tree

11 files changed

+18
-33
lines changed

11 files changed

+18
-33
lines changed

.github/scripts/install-torch-tensorrt.sh

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,7 @@ ${CONDA_RUN} ${PIP_INSTALL_TORCH} torchvision
66
${CONDA_RUN} python -m pip install pyyaml mpmath==1.3.0
77
export TRT_VERSION=$(${CONDA_RUN} python -c "import versions; versions.tensorrt_version()")
88

9-
# Install TensorRT manually
10-
wget -q -P /opt/torch-tensorrt-builds/ https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.0.1/tars/TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz
11-
tar -xzf /opt/torch-tensorrt-builds/TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz -C /opt/torch-tensorrt-builds/
12-
python -m pip install /opt/torch-tensorrt-builds/TensorRT-10.0.1.6/python/tensorrt-10.0.1-cp${PYTHON_VERSION//./}-none-linux_x86_64.whl
13-
149
# Install Torch-TensorRT
15-
${CONDA_RUN} python -m pip install /opt/torch-tensorrt-builds/torch_tensorrt*+${CU_VERSION}*.whl
10+
${CONDA_RUN} python -m pip install /opt/torch-tensorrt-builds/torch_tensorrt*+${CU_VERSION}*.whl tensorrt~=${TRT_VERSION} --extra-index-url=https://pypi.ngc.nvidia.com
1611

1712
echo -e "Running test script";

.github/workflows/build-test.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ on:
1515

1616
jobs:
1717
generate-matrix:
18-
uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@release/2.3
18+
uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main
1919
with:
2020
package-type: wheel
2121
os: linux
@@ -41,7 +41,7 @@ jobs:
4141
smoke-test-script: packaging/smoke_test_script.sh
4242
package-name: torch_tensorrt
4343
name: Build torch-tensorrt whl package
44-
uses: pytorch/test-infra/.github/workflows/build_wheels_linux.yml@release/2.3
44+
uses: pytorch/test-infra/.github/workflows/build_wheels_linux.yml@main
4545
with:
4646
repository: ${{ matrix.repository }}
4747
ref: ""

dev_dep_versions.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
__version__: "2.3.0"
22
__cuda_version__: "12.1"
33
__cudnn_version__: "8.9"
4-
__tensorrt_version__: "10.0.1.6"
4+
__tensorrt_version__: "10.0.1"

docker/Dockerfile

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,6 @@ ENV BASE_IMG=nvidia/cuda:12.1.1-devel-ubuntu22.04
88
ARG TENSORRT_VERSION
99
ENV TENSORRT_VERSION=${TENSORRT_VERSION}
1010
RUN test -n "$TENSORRT_VERSION" || (echo "No tensorrt version specified, please use --build-arg TENSORRT_VERSION=x.y to specify a version." && exit 1)
11-
ARG CUDNN_VERSION
12-
ENV CUDNN_VERSION=${CUDNN_VERSION}
13-
RUN test -n "$CUDNN_VERSION" || (echo "No cudnn version specified, please use --build-arg CUDNN_VERSION=x.y to specify a version." && exit 1)
1411

1512
ARG PYTHON_VERSION=3.10
1613
ENV PYTHON_VERSION=${PYTHON_VERSION}
@@ -35,19 +32,12 @@ RUN wget -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-instal
3532
RUN pyenv install -v ${PYTHON_VERSION}
3633
RUN pyenv global ${PYTHON_VERSION}
3734

38-
# Install CUDNN + TensorRT + dependencies
39-
RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-ubuntu2204.pin
40-
RUN mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600
41-
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/7fa2af80.pub
42-
RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /"
43-
RUN apt-get update
44-
RUN apt-get install -y libcudnn8=${CUDNN_VERSION}* libcudnn8-dev=${CUDNN_VERSION}*
45-
35+
# Install TensorRT + dependencies
4636
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub
4737
RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /"
4838
RUN apt-get update
4939

50-
RUN apt-get install -y libnvinfer8=${TENSORRT_VERSION}.* libnvinfer-plugin8=${TENSORRT_VERSION}.* libnvinfer-dev=${TENSORRT_VERSION}.* libnvinfer-plugin-dev=${TENSORRT_VERSION}.* libnvonnxparsers8=${TENSORRT_VERSION}.* libnvonnxparsers-dev=${TENSORRT_VERSION}.* libnvparsers8=${TENSORRT_VERSION}.* libnvparsers-dev=${TENSORRT_VERSION}.* libnvinfer-headers-dev=${TENSORRT_VERSION}.* libnvinfer-headers-plugin-dev=${TENSORRT_VERSION}.*
40+
RUN apt-get install -y libnvinfer10=${TENSORRT_VERSION}.* libnvinfer-plugin10=${TENSORRT_VERSION}.* libnvinfer-dev=${TENSORRT_VERSION}.* libnvinfer-plugin-dev=${TENSORRT_VERSION}.* libnvonnxparsers10=${TENSORRT_VERSION}.* libnvonnxparsers-dev=${TENSORRT_VERSION}.*
5141

5242
# Setup Bazel via Bazelisk
5343
RUN wget -q https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 -O /usr/bin/bazel &&\

docker/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
* Use `Dockerfile` to build a container which provides the exact development environment that our master branch is usually tested against.
44

55
* The `Dockerfile` currently uses <a href="https://github.com/bazelbuild/bazelisk">Bazelisk</a> to select the Bazel version, and uses the exact library versions of Torch and CUDA listed in <a href="https://github.com/pytorch/TensorRT#dependencies">dependencies</a>.
6-
* The desired versions of CUDNN and TensorRT must be specified as build-args, with major and minor versions as in: `--build-arg TENSORRT_VERSION=a.b --build-arg CUDNN_VERSION=x.y`
6+
* The desired versions of TensorRT must be specified as build-args, with major and minor versions as in: `--build-arg TENSORRT_VERSION=a.b`
77
* [**Optional**] The desired base image be changed by explicitly setting a base image, as in `--build-arg BASE_IMG=nvidia/cuda:11.8.0-devel-ubuntu22.04`, though this is optional
88
* [**Optional**] Additionally, the desired Python version can be changed by explicitly setting a version, as in `--build-arg PYTHON_VERSION=3.10`, though this is optional as well.
99

@@ -17,14 +17,14 @@ Note: By default the container uses the `pre-cxx11-abi` version of Torch + Torch
1717

1818
### Instructions
1919

20-
- The example below uses CUDNN 8.9 and TensorRT 8.6
20+
- The example below uses TensorRT 10.0.1.6
2121
- See <a href="https://github.com/pytorch/TensorRT#dependencies">dependencies</a> for a list of current default dependencies.
2222

2323
> From root of Torch-TensorRT repo
2424
2525
Build:
2626
```
27-
DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=8.6 --build-arg CUDNN_VERSION=8.9 -f docker/Dockerfile -t torch_tensorrt:latest .
27+
DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=10.0.1 -f docker/Dockerfile -t torch_tensorrt:latest .
2828
```
2929

3030
Run:

packaging/pre_build_script.sh

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,7 @@
44
python3 -m pip install pyyaml
55
yum install -y ninja-build gettext
66
TRT_VERSION=$(python3 -c "import versions; versions.tensorrt_version()")
7-
wget -q -P /opt/torch-tensorrt-builds/ https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.0.1/tars/TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz
8-
tar -xzf /opt/torch-tensorrt-builds/TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz -C /opt/torch-tensorrt-builds/
9-
export LD_LIBRARY_PATH=/opt/torch-tensorrt-builds/TensorRT-10.0.1.6/lib:$LD_LIBRARY_PATH
7+
108
wget https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 \
119
&& mv bazelisk-linux-amd64 /usr/bin/bazel \
1210
&& chmod +x /usr/bin/bazel

py/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,4 @@ torch==2.3.0
66
torchvision==0.18.0
77
--extra-index-url https://pypi.ngc.nvidia.com
88
pyyaml
9-
tensorrt==10.0.1.6
9+
tensorrt==10.0.1

tests/py/dynamo/runtime/hw_compat.ts

108 KB
Binary file not shown.

tests/py/dynamo/runtime/test_hw_compat.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,8 @@
22
import unittest
33

44
import torch
5-
from torch.testing._internal.common_utils import TestCase, run_tests
6-
75
import torch_tensorrt
6+
from torch.testing._internal.common_utils import TestCase, run_tests
87

98

109
class TestHardwareCompatibility(TestCase):
@@ -81,7 +80,7 @@ def test_hw_compat_3080_build(self):
8180

8281
cwd = os.getcwd()
8382
os.chdir(os.path.dirname(os.path.realpath(__file__)))
84-
model = torch.jit.load("../../ts/models/hw_compat.ts").cuda()
83+
model = torch.jit.load("./hw_compat.ts").cuda()
8584
out = model(*inputs)
8685
self.assertTrue(
8786
len(out) == 1 and isinstance(out, torch.Tensor),

tests/py/ts/models/hw_compat.ts

-108 KB
Binary file not shown.

0 commit comments

Comments
 (0)