Skip to content

Commit 6d6a347

Browse files
committed
Merge branch 'slice' of https://github.com/inocsin/Torch-TensorRT into slice
2 parents 979d9d1 + 4a3fdee commit 6d6a347

File tree

343 files changed

+31867
-916
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

343 files changed

+31867
-916
lines changed

.circleci/config.yml

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
# Use the latest 2.1 version of CircleCI pipeline process engine.
2+
# See: https://circleci.com/docs/2.0/configuration-reference
3+
version: 2.1
4+
5+
# Define a job to be invoked later in a workflow.
6+
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
7+
jobs:
8+
build:
9+
machine:
10+
# Primary container image where all steps run.
11+
# image: nvcr.io/nvidia/tensorrt:22.01-py3 # does not work with customized image
12+
# https://circleci.com/docs/2.0/configuration-reference#available-linux-gpu-images
13+
image: ubuntu-2004-cuda-11.4:202110-01
14+
resource_class: gpu.nvidia.large
15+
steps:
16+
- checkout
17+
- run:
18+
name: install cudnn + tensorrt + bazel
19+
command: |
20+
cd ~
21+
OS=ubuntu2004
22+
CUDNN_VERSION=8.2.1.*-1+cuda11.3
23+
TRT_VERSION=8.2.4-1+cuda11.4
24+
BAZEL_VERSION=5.1.1
25+
26+
wget https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-${OS}.pin
27+
sudo mv cuda-${OS}.pin /etc/apt/preferences.d/cuda-repository-pin-600
28+
sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/7fa2af80.pub
29+
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 536F8F1DE80F6A35
30+
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC
31+
sudo add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/ /"
32+
sudo apt-get update
33+
sudo apt-get install libcudnn8=${CUDNN_VERSION}
34+
sudo apt-get install libcudnn8-dev=${CUDNN_VERSION}
35+
36+
sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/{OS}/x86_64/3bf863cc.pub
37+
sudo add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/ /"
38+
sudo apt-get update
39+
40+
sudo apt-get install libnvinfer8=${TRT_VERSION} libnvonnxparsers8=${TRT_VERSION} libnvparsers8=${TRT_VERSION} libnvinfer-plugin8=${TRT_VERSION} libnvinfer-dev=${TRT_VERSION} libnvonnxparsers-dev=${TRT_VERSION} libnvparsers-dev=${TRT_VERSION} libnvinfer-plugin-dev=${TRT_VERSION} python3-libnvinfer=${TRT_VERSION}
41+
# check available version, apt list libnvinfer8 -a
42+
sudo wget -q https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-linux-x86_64 -O /usr/bin/bazel
43+
sudo chmod a+x /usr/bin/bazel
44+
45+
- run:
46+
name: set up python environment
47+
command: |
48+
pip3 install nvidia-pyindex
49+
pip3 install nvidia-tensorrt==8.2.4.2
50+
pip3 install --pre torch==1.13.0.dev20220621 torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cu113
51+
pip3 install pytest parameterized expecttest
52+
# install torch_tensorrt
53+
mv WORKSPACE.ci WORKSPACE
54+
cd py
55+
python3 setup.py install
56+
57+
# install fx2trt
58+
# cd py/torch_tensorrt/fx/setup
59+
# python3 setup.py install
60+
- run:
61+
name: run fx2trt tests
62+
command: |
63+
# one fix pending to enable below
64+
# cd py/torch_tensorrt/fx/test
65+
# pytest $(find . -name '*.py' | grep -v test_dispatch* | grep -v test_setitem*)
66+
67+
cd py/torch_tensorrt/fx/test
68+
pushd converters/acc_op
69+
pytest
70+
popd
71+
pushd passes
72+
list_passes=$(ls | grep -v test_setitem*)
73+
pytest $list_passes
74+
popd
75+
pushd core
76+
pytest
77+
popd
78+
# pushd quant
79+
# pytest
80+
# popd
81+
pushd tools
82+
pytest
83+
popd
84+
pushd trt_lower
85+
pytest
86+
popd
87+
pushd tracer
88+
list_tracer=$(ls | grep -v test_dispatch_*)
89+
pytest $list_tracer
90+
popd
91+
# Invoke jobs via workflows
92+
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
93+
workflows:
94+
build_run:
95+
jobs:
96+
- build

.github/code-owners.yml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,8 +114,13 @@
114114
- "andi4191"
115115
- "narendasan"
116116

117-
"component: ux":
117+
"ux":
118118
# Issues related to the user experience including debugging and installation
119119
- "narendasan"
120120
- "peri044"
121121

122+
"component: fx":
123+
- "frank-wei"
124+
- "yinghai"
125+
- "842974287"
126+
- "wushirong"

.github/pr-labels.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,9 @@
1818

1919
"component: evaluators":
2020
- core/conversion/evaluators/**/*
21+
22+
"component: fx":
23+
- py/torch_tensorrt/fx/**/*
2124

2225
"component: partitioning":
2326
- core/partitioning/**/*

.github/pull_request_template.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,4 +20,5 @@ Please delete options that are not relevant and/or add your own.
2020
- [ ] I have commented my code, particularly in hard-to-understand areas and hacks
2121
- [ ] I have made corresponding changes to the documentation
2222
- [ ] I have added tests to verify my fix or my feature
23-
- [ ] New and existing unit tests pass locally with my changes
23+
- [ ] New and existing unit tests pass locally with my changes
24+
- [ ] I have added the relevant labels to my PR in so that relevant reviewers are notified

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
### Developing Torch-TensorRT
44

5-
Do try to fill an issue with your feature or bug before filling a PR (op support is generally an exception as long as you provide tests to prove functionality). There is also a backlog (https://github.com/NVIDIA/Torch-TensorRT/issues) of issues which are tagged with the area of focus, a coarse priority level and whether the issue may be accessible to new contributors. Let us know if you are interested in working on a issue. We are happy to provide guidance and mentorship for new contributors. Though note, there is no claiming of issues, we prefer getting working code quickly vs. addressing concerns about "wasted work".
5+
Do try to fill an issue with your feature or bug before filling a PR (op support is generally an exception as long as you provide tests to prove functionality). There is also a backlog (https://github.com/pytorch/TensorRT/issues) of issues which are tagged with the area of focus, a coarse priority level and whether the issue may be accessible to new contributors. Let us know if you are interested in working on a issue. We are happy to provide guidance and mentorship for new contributors. Though note, there is no claiming of issues, we prefer getting working code quickly vs. addressing concerns about "wasted work".
66

77
#### Communication
88

LICENSE

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
Copyright (c) 2020-present, NVIDIA CORPORATION. All rights reserved.
2+
Copyright (c) Meta Platforms, Inc. and affiliates.
3+
24

35
Redistribution and use in source and binary forms, with or without
46
modification, are permitted provided that the following conditions
@@ -11,9 +13,9 @@ are met:
1113
notice, this list of conditions and the following disclaimer in the
1214
documentation and/or other materials provided with the distribution.
1315

14-
3. Neither the name of NVIDIA CORPORATION nor the names of its
15-
contributors may be used to endorse or promote products derived
16-
from this software without specific prior written permission.
16+
3. Neither the name of the copyright holder nor the names of its contributors
17+
may be used to endorse or promote products derived from this software
18+
without specific prior written permission.
1719

1820
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
1921
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
@@ -25,4 +27,4 @@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2527
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
2628
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2729
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28-
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

README.md

Lines changed: 45 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ These are the following dependencies used to verify the testcases. Torch-TensorR
118118

119119
## Prebuilt Binaries and Wheel files
120120

121-
Releases: https://github.com/NVIDIA/Torch-TensorRT/releases
121+
Releases: https://github.com/pytorch/TensorRT/releases
122122

123123
## Compiling Torch-TensorRT
124124

@@ -212,6 +212,12 @@ new_local_repository(
212212
bazel build //:libtorchtrt --compilation_mode opt
213213
```
214214

215+
### FX path (Python only) installation
216+
If the user plan to try FX path (Python only) and would like to avoid bazel build. Please follow the steps below.
217+
``` shell
218+
cd py && python3 setup.py install --fx-only
219+
```
220+
215221
### Debug build
216222

217223
``` shell
@@ -250,11 +256,48 @@ docker run -it -v$(pwd)/..:/workspace/Torch-TensorRT build_torch_tensorrt_wheel
250256

251257
Python compilation expects using the tarball based compilation strategy from above.
252258

259+
260+
## Testing using Python backend
261+
262+
Torch-TensorRT supports testing in Python using [nox](https://nox.thea.codes/en/stable)
263+
264+
To install the nox using python-pip
265+
266+
```
267+
python3 -m pip install --upgrade nox
268+
```
269+
270+
To list supported nox sessions:
271+
272+
```
273+
nox --session -l
274+
```
275+
276+
Environment variables supported by nox
277+
278+
```
279+
PYT_PATH - To use different PYTHONPATH than system installed Python packages
280+
TOP_DIR - To set the root directory of the noxfile
281+
USE_CXX11 - To use cxx11_abi (Defaults to 0)
282+
USE_HOST_DEPS - To use host dependencies for tests (Defaults to 0)
283+
```
284+
285+
Usage example
286+
287+
```
288+
nox --session l0_api_tests
289+
```
290+
291+
Supported Python versions:
292+
```
293+
["3.7", "3.8", "3.9", "3.10"]
294+
```
295+
253296
## How do I add support for a new op...
254297

255298
### In Torch-TensorRT?
256299

257-
Thanks for wanting to contribute! There are two main ways to handle supporting a new op. Either you can write a converter for the op from scratch and register it in the NodeConverterRegistry or if you can map the op to a set of ops that already have converters you can write a graph rewrite pass which will replace your new op with an equivalent subgraph of supported ops. Its preferred to use graph rewriting because then we do not need to maintain a large library of op converters. Also do look at the various op support trackers in the [issues](https://github.com/NVIDIA/Torch-TensorRT/issues) for information on the support status of various operators.
300+
Thanks for wanting to contribute! There are two main ways to handle supporting a new op. Either you can write a converter for the op from scratch and register it in the NodeConverterRegistry or if you can map the op to a set of ops that already have converters you can write a graph rewrite pass which will replace your new op with an equivalent subgraph of supported ops. Its preferred to use graph rewriting because then we do not need to maintain a large library of op converters. Also do look at the various op support trackers in the [issues](https://github.com/pytorch/TensorRT/issues) for information on the support status of various operators.
258301

259302
### In my application?
260303

WORKSPACE.ci

Lines changed: 147 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,147 @@
1+
workspace(name = "Torch-TensorRT")
2+
3+
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
4+
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
5+
6+
http_archive(
7+
name = "rules_python",
8+
sha256 = "778197e26c5fbeb07ac2a2c5ae405b30f6cb7ad1f5510ea6fdac03bded96cc6f",
9+
url = "https://github.com/bazelbuild/rules_python/releases/download/0.2.0/rules_python-0.2.0.tar.gz",
10+
)
11+
12+
load("@rules_python//python:pip.bzl", "pip_install")
13+
14+
http_archive(
15+
name = "rules_pkg",
16+
sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d",
17+
urls = [
18+
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz",
19+
"https://github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz",
20+
],
21+
)
22+
23+
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
24+
25+
rules_pkg_dependencies()
26+
27+
git_repository(
28+
name = "googletest",
29+
commit = "703bd9caab50b139428cea1aaff9974ebee5742e",
30+
remote = "https://github.com/google/googletest",
31+
shallow_since = "1570114335 -0400",
32+
)
33+
34+
# External dependency for torch_tensorrt if you already have precompiled binaries.
35+
local_repository(
36+
name = "torch_tensorrt",
37+
path = "/opt/conda/lib/python3.8/site-packages/torch_tensorrt"
38+
)
39+
40+
# CUDA should be installed on the system locally
41+
new_local_repository(
42+
name = "cuda",
43+
build_file = "@//third_party/cuda:BUILD",
44+
path = "/usr/local/cuda/",
45+
)
46+
47+
new_local_repository(
48+
name = "cublas",
49+
build_file = "@//third_party/cublas:BUILD",
50+
path = "/usr",
51+
)
52+
#############################################################################################################
53+
# Tarballs and fetched dependencies (default - use in cases when building from precompiled bin and tarballs)
54+
#############################################################################################################
55+
56+
#http_archive(
57+
# name = "libtorch",
58+
# build_file = "@//third_party/libtorch:BUILD",
59+
# sha256 = "8d9e829ce9478db4f35bdb7943308cf02e8a2f58cf9bb10f742462c1d57bf287",
60+
# strip_prefix = "libtorch",
61+
# urls = ["https://download.pytorch.org/libtorch/cu113/libtorch-cxx11-abi-shared-with-deps-1.11.0%2Bcu113.zip"],
62+
#)
63+
#
64+
#http_archive(
65+
# name = "libtorch_pre_cxx11_abi",
66+
# build_file = "@//third_party/libtorch:BUILD",
67+
# sha256 = "90159ecce3ff451f3ef3f657493b6c7c96759c3b74bbd70c1695f2ea2f81e1ad",
68+
# strip_prefix = "libtorch",
69+
# urls = ["https://download.pytorch.org/libtorch/cu113/libtorch-shared-with-deps-1.11.0%2Bcu113.zip"],
70+
#)
71+
72+
# Download these tarballs manually from the NVIDIA website
73+
# Either place them in the distdir directory in third_party and use the --distdir flag
74+
# or modify the urls to "file:///<PATH TO TARBALL>/<TARBALL NAME>.tar.gz
75+
76+
#http_archive(
77+
# name = "cudnn",
78+
# build_file = "@//third_party/cudnn/archive:BUILD",
79+
# sha256 = "0e5d2df890b9967efa6619da421310d97323565a79f05a1a8cb9b7165baad0d7",
80+
# strip_prefix = "cuda",
81+
# urls = [
82+
# "https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.2.4/11.4_20210831/cudnn-11.4-linux-x64-v8.2.4.15.tgz",
83+
# ],
84+
#)
85+
#
86+
#http_archive(
87+
# name = "tensorrt",
88+
# build_file = "@//third_party/tensorrt/archive:BUILD",
89+
# sha256 = "826180eaaecdf9a7e76116855b9f1f3400ea9b06e66b06a3f6a0747ba6f863ad",
90+
# strip_prefix = "TensorRT-8.2.4.2",
91+
# urls = [
92+
# "https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/8.2.4/tars/tensorrt-8.2.4.2.linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz",
93+
# ],
94+
#)
95+
96+
####################################################################################
97+
# Locally installed dependencies (use in cases of custom dependencies or aarch64)
98+
####################################################################################
99+
100+
# NOTE: In the case you are using just the pre-cxx11-abi path or just the cxx11 abi path
101+
# with your local libtorch, just point deps at the same path to satisfy bazel.
102+
103+
# NOTE: NVIDIA's aarch64 PyTorch (python) wheel file uses the CXX11 ABI unlike PyTorch's standard
104+
# x86_64 python distribution. If using NVIDIA's version just point to the root of the package
105+
# for both versions here and do not use --config=pre-cxx11-abi
106+
107+
new_local_repository(
108+
name = "libtorch",
109+
path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch",
110+
build_file = "third_party/libtorch/BUILD"
111+
)
112+
113+
new_local_repository(
114+
name = "libtorch_pre_cxx11_abi",
115+
path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch",
116+
build_file = "third_party/libtorch/BUILD"
117+
)
118+
119+
new_local_repository(
120+
name = "cudnn",
121+
path = "/usr/",
122+
build_file = "@//third_party/cudnn/local:BUILD"
123+
)
124+
125+
new_local_repository(
126+
name = "tensorrt",
127+
path = "/usr/",
128+
build_file = "@//third_party/tensorrt/local:BUILD"
129+
)
130+
131+
# #########################################################################
132+
# # Testing Dependencies (optional - comment out on aarch64)
133+
# #########################################################################
134+
# pip_install(
135+
# name = "torch_tensorrt_py_deps",
136+
# requirements = "//py:requirements.txt",
137+
# )
138+
139+
# pip_install(
140+
# name = "py_test_deps",
141+
# requirements = "//tests/py:requirements.txt",
142+
# )
143+
144+
pip_install(
145+
name = "pylinter_deps",
146+
requirements = "//tools/linter:requirements.txt",
147+
)

0 commit comments

Comments
 (0)