Skip to content

Commit bfae556

Browse files
committed
chore: Fix merge conflicts
Signed-off-by: Dheeraj Peri <[email protected]>
2 parents 5bf2f4a + 10325f1 commit bfae556

File tree

190 files changed

+3618
-1146
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

190 files changed

+3618
-1146
lines changed

.circleci/config.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -751,7 +751,7 @@ parameters:
751751
# Nightly platform config
752752
torch-nightly-build:
753753
type: string
754-
default: "1.13.0.dev20220731+cu113"
754+
default: "1.13.0.dev20220810+cu113"
755755
torch-nightly-build-index:
756756
type: string
757757
default: "https://download.pytorch.org/whl/nightly/cu113"

.github/actions/assigner/package-lock.json

Lines changed: 24 additions & 9 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.github/actions/assigner/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
"license": "BSD-3-Clause",
2020
"repository": "https://www.github.com/pytorch/TensorRT",
2121
"dependencies": {
22-
"@actions/core": "^1.8.2",
22+
"@actions/core": "^1.9.1",
2323
"@actions/github": "^5.0.3",
2424
"fs": "^0.0.1-security",
2525
"js-yaml": "^4.1.0"

.github/workflows/docgen.yml

Lines changed: 58 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,9 @@ on:
1010

1111
jobs:
1212
build-docs:
13-
runs-on: ubuntu-18.04
13+
runs-on: ubuntu-20.04
1414
container:
15-
image: docker.pkg.github.com/pytorch/tensorrt/docgen:latest
15+
image: ghcr.io/pytorch/tensorrt/docgen:latest
1616
credentials:
1717
username: ${{ github.actor }}
1818
password: ${{ secrets.GITHUB_TOKEN }}
@@ -22,23 +22,77 @@ jobs:
2222
rm -rf /usr/share/dotnet
2323
rm -rf /opt/ghc
2424
rm -rf "/usr/local/share/boost"
25+
rm -rf /usr/local/cuda/cuda-*
26+
- name: Install base deps
27+
run: |
28+
apt update
29+
DEBIAN_FRONTEND=noninteractive apt install -y software-properties-common gcc git curl wget make zlib1g-dev bzip2 libbz2-dev lzma lzma-dev libreadline-dev libsqlite3-dev libssl-dev libffi-dev doxygen pandoc
30+
git config --global --add safe.directory '*'
31+
- name: Install Python
32+
run: |
33+
mkdir -p /opt/circleci
34+
git clone https://github.com/pyenv/pyenv.git /opt/circleci/.pyenv
35+
export PYENV_ROOT="/opt/circleci/.pyenv"
36+
export PATH="$PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH"
37+
pyenv install 3.9.4
38+
pyenv global 3.9.4
39+
python3 -m pip install --upgrade pip
40+
python3 -m pip install wheel
2541
- uses: actions/checkout@v2
2642
with:
2743
ref: ${{github.head_ref}}
2844
- name: Get HEAD SHA
2945
id: vars
3046
run: echo "::set-output name=sha::$(git rev-parse --short HEAD)"
47+
- name: Get Bazel version
48+
id: bazel_info
49+
run: echo "::set-output name=version::$(cat .bazelversion)"
50+
- name: Install Bazel
51+
run: |
52+
wget -q https://github.com/bazelbuild/bazel/releases/download/${{ steps.bazel_info.outputs.version }}/bazel-${{ steps.bazel_info.outputs.version }}-linux-x86_64 -O /usr/bin/bazel
53+
chmod a+x /usr/bin/bazel
54+
- name: Install cudnn + tensorrt
55+
run: |
56+
apt-get update
57+
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin
58+
mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600
59+
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/7fa2af80.pub
60+
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 536F8F1DE80F6A35
61+
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC
62+
add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/ /"
63+
apt-get update
64+
apt-get install -y libcudnn8 libcudnn8-dev
65+
66+
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub
67+
add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/ /"
68+
apt-get update
69+
70+
apt-get install -y libnvinfer8 libnvinfer-plugin8 libnvinfer-dev libnvinfer-plugin-dev
71+
- name: Install Torch
72+
run: |
73+
export PYENV_ROOT="/opt/circleci/.pyenv"
74+
export PATH="$PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH"
75+
pyenv global 3.9.4
76+
python3 -m pip install -r py/requirements.txt
3177
- name: Build Python Package
3278
run: |
33-
cp docker/WORKSPACE.docker WORKSPACE
79+
cp toolchains/ci_workspaces/WORKSPACE.x86_64 WORKSPACE
3480
cd py
81+
export PYENV_ROOT="/opt/circleci/.pyenv"
82+
export PATH="$PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH"
83+
pyenv global 3.9.4
3584
python3 setup.py install
85+
cd ..
3686
- name: Generate New Docs
3787
run: |
3888
cd docsrc
39-
pip3 install -r requirements.txt
89+
export PYENV_ROOT="/opt/circleci/.pyenv"
90+
export PATH="$PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH"
91+
pyenv global 3.9.4
92+
python3 -m pip install -r requirements.txt
4093
python3 -c "import torch_tensorrt; print(torch_tensorrt.__version__)"
4194
make html
95+
cd ..
4296
- uses: stefanzweifel/git-auto-commit-action@v4
4397
with:
4498
# Required

.gitmodules

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
[submodule "toolchains/nv_workspaces"]
2+
path = toolchains/nv_workspaces
3+
url = ssh://[email protected]:12051/torch-tensorrt/WORKSPACES.git
4+
branch = master
5+
ignore = dirty

.pre-commit-config.yaml

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,4 +27,13 @@ repos:
2727
- id: buildifier
2828
args:
2929
- --warnings=all
30-
- id: buildifier-lint
30+
- id: buildifier-lint
31+
- repo: local
32+
hooks:
33+
- id: dont-commit-upstream
34+
name: NVIDIA-INTERNAL check
35+
entry: "!NVIDIA-INTERNAL"
36+
exclude: "^.pre-commit-config.yaml"
37+
language: pygrep
38+
types: [text]
39+

CONTRIBUTING.md

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -18,20 +18,12 @@ We use the PyTorch Slack for communication about core development, integration w
1818

1919
- We generally follow the coding guidelines used in PyTorch
2020

21-
- Use the built in linting tools to ensure that your code matches the style guidelines
22-
```sh
23-
# C++ Linting (After installing clang-format [Version 9.0.0])
24-
# Print non-conforming sections of code
25-
bazel run //tools/linter:cpplint_diff -- //...
26-
# Modify code to conform with style guidelines
27-
bazel run //tools/linter:cpplint -- //...
28-
29-
# Python Linting
30-
# Print non-conforming sections of code
31-
bazel run //tools/linter:pylint_diff -- //...
32-
# Modify code to conform with style guidelines
33-
bazel run //tools/linter:pylint -- //...
34-
```
21+
- Linting your code is essential to ensure code matches the style guidelines.
22+
To begin with, please install the following dependencies
23+
* `pip install -r requirements-dev.txt`
24+
* Install Bazel buildifier https://github.com/bazelbuild/buildtools/blob/master/buildifier/README.md#setup
25+
26+
Once the above dependencies are installed, `git commit` command will perform linting before committing your code.
3527

3628
- Avoid introducing unnecessary complexity into existing code so that maintainability and readability are preserved
3729

README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,8 @@ trt_ts_module = torch_tensorrt.compile(torch_script_module,
8080
# For static size shape=[1, 3, 224, 224]
8181
dtype=torch.half) # Datatype of input tensor. Allowed options torch.(float|half|int8|int32|bool)
8282
],
83-
enabled_precisions = {torch.half}, # Run with FP16)
83+
enabled_precisions = {torch.half}, # Run with FP16
84+
)
8485
8586
result = trt_ts_module(input_data) # run inference
8687
torch.jit.save(trt_ts_module, "trt_torchscript_module.ts") # save the TRT embedded Torchscript

WORKSPACE

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,8 +76,8 @@ http_archive(
7676
http_archive(
7777
name = "cudnn",
7878
build_file = "@//third_party/cudnn/archive:BUILD",
79-
sha256 = "7f3fbe6201708de409532a32d647af6b4bdb10d7f045d557270549e286487289",
80-
strip_prefix = "cudnn-linux-x86_64-8.4.1.114_cuda11.4-archive",
79+
sha256 = "ec96d2376d81fca42bdd3d4c3d705a99b29a065bab57f920561c763e29c67d01",
80+
strip_prefix = "cudnn-linux-x86_64-8.4.1.50_cuda11.6-archive",
8181
urls = [
8282
"https://developer.nvidia.com/compute/cudnn/secure/8.4.1/local_installers/11.6/cudnn-linux-x86_64-8.4.1.50_cuda11.6-archive.tar.xz",
8383
],

core/compiler.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,8 @@ torch::jit::Module CompileGraph(const torch::jit::Module& mod, CompileSpec cfg)
426426
auto outputIsCollection = conversion::OutputIsCollection(g->block());
427427
if (cfg.partition_info.enabled &&
428428
(cfg.lower_info.forced_fallback_modules.size() == 0 &&
429-
cfg.partition_info.forced_fallback_operators.size() == 0 && isBlockConvertible)) {
429+
cfg.partition_info.forced_fallback_operators.size() == 0 && isBlockConvertible) &&
430+
!outputIsCollection) {
430431
LOG_INFO("Skipping partitioning since model is fully supported");
431432
}
432433

0 commit comments

Comments
 (0)