Skip to content

Commit ae18ae7

Browse files
authored
Merge pull request #141 from NVIDIA/fix_notebook_container
Fix notebook container
2 parents cff4211 + 22ed5cf commit ae18ae7

File tree

8 files changed

+157
-89
lines changed

8 files changed

+157
-89
lines changed

WORKSPACE

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ http_archive(
5454
build_file = "@//third_party/libtorch:BUILD",
5555
strip_prefix = "libtorch",
5656
urls = ["https://download.pytorch.org/libtorch/cu102/libtorch-cxx11-abi-shared-with-deps-1.5.1.zip"],
57-
sha256 = "cf0691493d05062fe3239cf76773bae4c5124f4b039050dbdd291c652af3ab2a"
57+
sha256 = "0efdd4e709ab11088fa75f0501c19b0e294404231442bab1d1fb953924feb6b5"
5858
)
5959

6060
http_archive(
@@ -71,15 +71,15 @@ http_archive(
7171

7272
http_archive(
7373
name = "cudnn",
74-
urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.0.1.13/10.2_20200626/cudnn-10.2-linux-x64-v8.0.1.13.tgz"],
74+
urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.0.1.13/10.2_20200626/cudnn-10.2-linux-x64-v8.0.1.13.tgz",],
7575
build_file = "@//third_party/cudnn/archive:BUILD",
7676
sha256 = "0c106ec84f199a0fbcf1199010166986da732f9b0907768c9ac5ea5b120772db",
7777
strip_prefix = "cuda"
7878
)
7979

8080
http_archive(
8181
name = "tensorrt",
82-
urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.1/tars/TensorRT-7.1.3.4.Ubuntu-18.04.x86_64-gnu.cuda-10.2.cudnn8.0.tar.gz"],
82+
urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.1/tars/TensorRT-7.1.3.4.Ubuntu-18.04.x86_64-gnu.cuda-10.2.cudnn8.0.tar.gz",],
8383
build_file = "@//third_party/tensorrt/archive:BUILD",
8484
sha256 = "9205bed204e2ae7aafd2e01cce0f21309e281e18d5bfd7172ef8541771539d41",
8585
strip_prefix = "TensorRT-7.1.3.4"

core/conversion/converters/impl/conv_deconv.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,10 @@ auto conv_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
4545

4646
deconv->setStrideNd(stride);
4747
deconv->setPaddingNd(padding);
48+
#if NV_TENSORRT_MAJOR > 7 || (NV_TENSORRT_MAJOR == 7 && NV_TENSORRT_MINOR == 1)
4849
deconv->setDilationNd(dilation);
4950
deconv->setNbGroups(groups);
50-
51+
#endif
5152
new_layer = deconv;
5253
} else {
5354
nvinfer1::IConvolutionLayer* conv;

core/conversion/converters/impl/interpolate.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ auto interpolate_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
153153
// align_corners not supported in TensorRT, create plugin and run layer through PyTorch
154154
create_plugin(ctx, n, in, "linear1d", in_shape, out_shape, out_size, std::string("linear"));
155155
} else {
156-
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kLINEAR. true);
156+
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kLINEAR, true);
157157
}
158158
#else
159159
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kLINEAR, align_corners);
@@ -185,7 +185,7 @@ auto interpolate_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
185185
// align_corners not supported in TensorRT, create plugin and run layer through PyTorch
186186
create_plugin(ctx, n, in, "bilinear2d", in_shape, out_shape, out_size, std::string("bilinear"));
187187
} else {
188-
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kLINEAR. true);
188+
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kLINEAR, true);
189189
}
190190
#else
191191
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kLINEAR, align_corners);
@@ -217,7 +217,7 @@ auto interpolate_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
217217
// align_corners not supported in TensorRT, create plugin and run layer through PyTorch
218218
create_plugin(ctx, n, in, "trilinear3d", in_shape, out_shape, out_size, std::string("trilinear"));
219219
} else {
220-
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kLINEAR. true);
220+
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kLINEAR, true);
221221
}
222222
#else
223223
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kLINEAR, align_corners);

core/conversion/converters/impl/plugins/interpolate_plugin.cpp

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -178,13 +178,13 @@ int InterpolatePlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, cons
178178

179179
cudaStreamWaitEvent(torch_stream.stream(), event, 0);
180180

181-
if (mode == "linear") {
181+
if (mode_ == "linear") {
182182
at::upsample_linear1d_out(output, input, {size_[0]}, align_corners_);
183-
} else if (mode == "bilinear") {
183+
} else if (mode_ == "bilinear") {
184184
at::upsample_bilinear2d_out(output, input, {size_[0], size_[1]}, align_corners_);
185-
} else if (mode == "trilinear") {
185+
} else if (mode_ == "trilinear") {
186186
at::upsample_trilinear3d_out(output, input, {size_[0], size_[1], size_[2]}, align_corners_);
187-
} else if (mode == "adaptive_pool2d") {
187+
} else if (mode_ == "adaptive_pool2d") {
188188
at::adaptive_avg_pool2d_out(output, input, {size_[0], size_[1]});
189189
}
190190

@@ -212,11 +212,6 @@ int InterpolatePlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, cons
212212
output = at::adaptive_avg_pool2d(input, {size_[0], size_[1]});
213213
}
214214

215-
output = output.contiguous();
216-
for (int i = 0; i < util::volume(outputDesc->dims); i++) {
217-
std::cout << ((float*)output.data_ptr())[i] << std::endl;
218-
}
219-
220215
cudaMemcpyAsync(outputs[0], output.data_ptr(), util::volume(outputDesc->dims) * sizeof(float), cudaMemcpyHostToDevice, stream);
221216
cudaStreamSynchronize(stream);
222217

notebooks/Dockerfile.notebook

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
FROM nvcr.io/nvidia/pytorch:20.03-py3
2+
3+
RUN apt update && apt install curl gnupg
4+
RUN curl https://bazel.build/bazel-release.pub.gpg | apt-key add -
5+
RUN echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list
6+
7+
RUN apt update && apt install bazel-3.3.1
8+
RUN ln -s /usr/bin/bazel-3.3.1 /usr/bin/bazel
9+
10+
RUN pip install pillow==4.3.0
11+
RUN pip install torch==1.5.1
12+
RUN pip install torchvision==0.6.1
13+
14+
COPY . /workspace/TRTorch
15+
RUN rm /workspace/TRTorch/WORKSPACE
16+
COPY ./notebooks/WORKSPACE.notebook /workspace/TRTorch/WORKSPACE
17+
18+
WORKDIR /workspace/TRTorch
19+
RUN bazel build //:libtrtorch --compilation_mode opt
20+
21+
WORKDIR /workspace/TRTorch/py
22+
RUN python3 setup.py install
23+
24+
WORKDIR /workspace/TRTorch/notebooks

notebooks/README.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,24 +3,24 @@ This folder contains demo notebooks for TRTorch.
33

44
## 1. Requirements
55

6-
The most convenient way to run these notebooks is via a docker container, which provides a self-contained, isolated and re-producible environment for all experiments.
6+
The most convenient way to run these notebooks is via a docker container, which provides a self-contained, isolated and re-producible environment for all experiments.
77

88
First, clone the repository:
99

1010
```
1111
git clone https://github.com/NVIDIA/TRTorch
1212
```
1313

14-
Next, build the NVIDIA TRTorch container:
14+
Next, build the NVIDIA TRTorch container (from repo root):
1515

1616
```
17-
docker build -t trtorch -f Dockerfile.notebook .
17+
docker build -t trtorch -f notebooks/Dockerfile.notebook .
1818
```
1919

2020
Then launch the container with:
2121

2222
```
23-
docker run --runtime=nvidia -it --rm --ipc=host --net=host trtorch
23+
docker run --runtime=nvidia -it --rm --ipc=host --net=host trtorch
2424
```
2525

2626
Within the docker interactive bash session, start Jupyter with
@@ -38,7 +38,7 @@ in, for example:
3838
```http://[host machine]:8888/?token=aae96ae9387cd28151868fee318c3b3581a2d794f3b25c6b```
3939

4040

41-
Within the container, this notebook itself is located at `/workspace/TRTorch/notebooks`.
41+
Within the container, the notebooks themselves are located at `/workspace/TRTorch/notebooks`.
4242

4343
## 2. Notebook list
4444

notebooks/WORKSPACE.notebook

Lines changed: 43 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -25,29 +25,65 @@ http_archive(
2525
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
2626
rules_pkg_dependencies()
2727

28+
git_repository(
29+
name = "googletest",
30+
remote = "https://github.com/google/googletest",
31+
commit = "703bd9caab50b139428cea1aaff9974ebee5742e",
32+
shallow_since = "1570114335 -0400"
33+
)
34+
2835
# CUDA should be installed on the system locally
2936
new_local_repository(
3037
name = "cuda",
31-
path = "/usr/local/cuda-10.2/targets/x86_64-linux/",
38+
path = "/usr/local/cuda-10.2/",
3239
build_file = "@//third_party/cuda:BUILD",
3340
)
3441

42+
new_local_repository(
43+
name = "cublas",
44+
path = "/usr",
45+
build_file = "@//third_party/cublas:BUILD",
46+
)
47+
48+
#############################################################################################################
49+
# Tarballs and fetched dependencies (default - use in cases when building from precompiled bin and tarballs)
50+
#############################################################################################################
51+
3552
http_archive(
36-
name = "libtorch_pre_cxx11_abi",
53+
name = "libtorch",
3754
build_file = "@//third_party/libtorch:BUILD",
3855
strip_prefix = "libtorch",
39-
sha256 = "ea8de17c5f70015583f3a7a43c7a5cdf91a1d4bd19a6a7bc11f074ef6cd69e27",
40-
urls = ["https://download.pytorch.org/libtorch/cu102/libtorch-shared-with-deps-1.5.0.zip"],
56+
urls = ["https://download.pytorch.org/libtorch/cu102/libtorch-cxx11-abi-shared-with-deps-1.5.1.zip"],
57+
sha256 = "cf0691493d05062fe3239cf76773bae4c5124f4b039050dbdd291c652af3ab2a"
4158
)
4259

4360
http_archive(
44-
name = "libtorch",
61+
name = "libtorch_pre_cxx11_abi",
4562
build_file = "@//third_party/libtorch:BUILD",
4663
strip_prefix = "libtorch",
47-
urls = ["https://download.pytorch.org/libtorch/cu102/libtorch-cxx11-abi-shared-with-deps-1.5.0.zip"],
48-
sha256 = "0efdd4e709ab11088fa75f0501c19b0e294404231442bab1d1fb953924feb6b5"
64+
sha256 = "818977576572eadaf62c80434a25afe44dbaa32ebda3a0919e389dcbe74f8656",
65+
urls = ["https://download.pytorch.org/libtorch/cu102/libtorch-shared-with-deps-1.5.1.zip"],
66+
)
67+
68+
####################################################################################
69+
# Locally installed dependencies (use in cases of custom dependencies or aarch64)
70+
####################################################################################
71+
72+
new_local_repository(
73+
name = "cudnn",
74+
path = "/usr/",
75+
build_file = "@//third_party/cudnn/local:BUILD"
76+
)
77+
78+
new_local_repository(
79+
name = "tensorrt",
80+
path = "/usr/",
81+
build_file = "@//third_party/tensorrt/local:BUILD"
4982
)
5083

84+
#########################################################################
85+
# Testing Dependencies (optional - comment out on aarch64)
86+
#########################################################################
5187
pip3_import(
5288
name = "trtorch_py_deps",
5389
requirements = "//py:requirements.txt"
@@ -64,39 +100,3 @@ pip3_import(
64100
load("@py_test_deps//:requirements.bzl", "pip_install")
65101
pip_install()
66102

67-
## Downloaded distributions to use with --distdir
68-
#http_archive(
69-
# name = "cudnn",
70-
# urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/7.6.5.32/Production/10.2_20191118/cudnn-10.2-linux-x64-v7.6.5.32.tgz"],
71-
# build_file = "@//third_party/cudnn/archive:BUILD",
72-
# sha256 = "600267f2caaed2fd58eb214ba669d8ea35f396a7d19b94822e6b36f9f7088c20",
73-
# strip_prefix = "cuda"
74-
#)
75-
76-
#http_archive(
77-
# name = "tensorrt",
78-
# urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.0/7.0.0.11/tars/TensorRT-7.0.0.11.Ubuntu-18.04.x86_64-gnu.cuda-10.2.cudnn7.6.tar.gz"],
79-
# build_file = "@//third_party/tensorrt/archive:BUILD",
80-
# sha256 = "c7d73b2585b18aae68b740249efa8c8ba5ae852abe9a023720595432a8eb4efd",
81-
# strip_prefix = "TensorRT-7.0.0.11"
82-
#)
83-
84-
# Locally installed dependencies
85-
new_local_repository(
86-
name = "cudnn",
87-
path = "/usr/",
88-
build_file = "@//third_party/cudnn/local:BUILD"
89-
)
90-
91-
new_local_repository(
92-
name = "tensorrt",
93-
path = "/usr/",
94-
build_file = "@//third_party/tensorrt/local:BUILD"
95-
)
96-
97-
git_repository(
98-
name = "googletest",
99-
remote = "https://github.com/google/googletest",
100-
commit = "703bd9caab50b139428cea1aaff9974ebee5742e",
101-
shallow_since = "1570114335 -0400"
102-
)

0 commit comments

Comments
 (0)