Skip to content

Commit d872d02

Browse files
committed
refactor: Update example names
Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 493c19f commit d872d02

File tree

34 files changed

+412
-260
lines changed

34 files changed

+412
-260
lines changed

.bazelrc

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,4 +33,10 @@ build:python --define=target_lang=python
3333

3434
build:pre_cxx11_abi --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"
3535
build:pre_cxx11_abi --linkopt="-D_GLIBCXX_USE_CXX11_ABI=0"
36-
build:pre_cxx11_abi --define=abi=pre_cxx11_abi
36+
build:pre_cxx11_abi --define=abi=pre_cxx11_abi
37+
38+
build:ci_testing --define=torchtrt_src=pre_built
39+
build:use_precompiled_torchtrt --define=torchtrt_src=pre_built
40+
41+
test:ci_testing --define=torchtrt_src=pre_built
42+
test:use_precompiled_torchtrt --define=torchtrt_src=pre_built

docker/dist-test.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,5 @@ pip3 install timm --trusted-host pypi.org --trusted-host pypi.python.org --trust
55
cd tests/modules && python3 ./hub.py
66
cd ../..
77

8-
bazel test //tests:tests //tests:python_api_tests --compilation_mode=opt --jobs=4
8+
bazel test //tests:tests //tests:python_api_tests --compilation_mode=opt --jobs=4 --define=torchtrt_src=prebuilt
9+

examples/benchmark/BUILD

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ cc_binary(
77
"timer.h",
88
],
99
deps = [
10-
"//cpp:trtorch",
10+
"//cpp:torch_tensorrt",
1111
"@libtorch",
1212
"@libtorch//:caffe2",
1313
],

examples/benchmark/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
# Benchmarking
22

3-
This is a quick benchmarking application for TRTorch. It lets you run supported TorchScript modules both in JIT and TRT and returns the average runtime and throughput.
3+
This is a quick benchmarking application for Torch-TensorRT. It lets you run supported TorchScript modules both in JIT and TRT and returns the average runtime and throughput.
44

55
## Compilation / Usage
66

77
Run with bazel:
88

9-
> Note: Make sure libtorch and TensorRT are in your LD_LIBRARY_PATH before running, if you need a location you can `export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:[WORKSPACE ROOT]/bazel-TRTorch/external/libtorch/lib:[WORKSPACE ROOT]/bazel-TRTorch/external/tensorrt/lib`
9+
> Note: Make sure libtorch and TensorRT are in your LD_LIBRARY_PATH before running, if you need a location you can `export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:[WORKSPACE ROOT]/bazel-Torch-TensorRT/external/libtorch/lib:[WORKSPACE ROOT]/bazel-Torch-TensorRT/external/tensorrt/lib`
1010
1111
``` sh
1212
bazel run //cpp/benchmark --cxxopt="-DNDEBUG" --cxxopt="-DJIT" --cxxopt="-DTRT" -- [PATH TO JIT MODULE FILE] [INPUT SIZE (explicit batch)]
@@ -20,7 +20,7 @@ bazel run //cpp/benchmark --cxxopt="-DNDEBUG" --cxxopt="-DJIT" --cxxopt="-DTRT"
2020

2121
### Options
2222

23-
You can run a module with JIT or TRT via TRTorch in either FP32 or FP16. These options are controlled by preprocessor directives.
23+
You can run a module with JIT or TRT via Torch-TensorRT in either FP32 or FP16. These options are controlled by preprocessor directives.
2424

2525
- To enable JIT profiling, add the argument `--cxxopt="-DJIT"`
2626

examples/benchmark/main.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
#include "torch/script.h"
55

66
#include "timer.h"
7-
#include "trtorch/trtorch.h"
7+
#include "torch_tensorrt/torch_tensorrt.h"
88

99
#include <iostream>
1010
#include <memory>
@@ -120,18 +120,18 @@ int main(int argc, const char* argv[]) {
120120
at::globalContext().setBenchmarkCuDNN(true);
121121

122122
#ifdef TRT
123-
auto compile_spec = trtorch::CompileSpec(dims);
123+
auto compile_spec = torch_tensorrt::ts::CompileSpec(dims);
124124
compile_spec.workspace_size = 1 << 20;
125125

126126
#ifdef HALF
127127
compile_spec.enabled_precisions.insert(torch::kF16);
128128
#endif
129129

130-
auto trt_mod = trtorch::CompileGraph(mod, compile_spec);
130+
auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
131131

132132
#ifdef SAVE_ENGINE
133133
std::cout << "Compiling graph to save as TRT engine (/tmp/engine_converted_from_jit.trt)" << std::endl;
134-
auto engine = trtorch::ConvertGraphToTRTEngine(mod, "forward", compile_spec);
134+
auto engine = torch_tensorrt::ts::ConvertMethodToTRTEngine(mod, "forward", compile_spec);
135135
std::ofstream out("/tmp/engine_converted_from_jit.trt");
136136
out << engine;
137137
out.close();

examples/custom_converters/README.md

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -31,11 +31,11 @@ Once we are clear about these rules and writing patterns, we can create a sepera
3131

3232
namespace my_custom_converters {
3333

34-
auto actelu = trtorch::core::conversion::converters::RegisterNodeConversionPatterns().pattern(
34+
auto actelu = torch_tensorrt::core::conversion::converters::RegisterNodeConversionPatterns().pattern(
3535
{"aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> (Tensor)",
36-
[](trtorch::core::conversion::ConversionCtx* ctx,
36+
[](torch_tensorrt::core::conversion::ConversionCtx* ctx,
3737
const torch::jit::Node* n,
38-
trtorch::core::conversion::converters::args& args) -> bool {
38+
torch_tensorrt::core::conversion::converters::args& args) -> bool {
3939
auto in = args[0].ITensorOrFreeze(ctx);
4040
auto alpha = args[1].unwrapToDouble();
4141

@@ -45,7 +45,7 @@ auto actelu = trtorch::core::conversion::converters::RegisterNodeConversionPatte
4545
}
4646

4747
new_layer->setAlpha(alpha);
48-
new_layer->setName(trtorch::core::util::node_info(n).c_str());
48+
new_layer->setName(torch_tensorrt::core::util::node_info(n).c_str());
4949
ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
5050

5151
return true;
@@ -65,18 +65,18 @@ from setuptools import setup, Extension
6565
from torch.utils import cpp_extension
6666
6767
68-
# library_dirs should point to the libtrtorch.so, include_dirs should point to the dir that include the headers
68+
# library_dirs should point to the libtorch_tensorrt.so, include_dirs should point to the dir that include the headers
6969
# 1) download the latest package from https://github.com/NVIDIA/TRTorch/releases/
70-
# 2) Extract the file from downloaded package, we will get the "trtorch" directory
71-
# 3) Set trtorch_path to that directory
72-
trtorch_path = <PATH TO TRTORCH>
70+
# 2) Extract the file from downloaded package, we will get the "torch_tensorrt" directory
71+
# 3) Set torch_tensorrt_path to that directory
72+
torch_tensorrt_path = <PATH TO TRTORCH>
7373
7474
7575
ext_modules = [
7676
cpp_extension.CUDAExtension('elu_converter', ['./csrc/elu_converter.cpp'],
77-
library_dirs=[(trtorch_path + "/lib/")],
78-
libraries=["trtorch"],
79-
include_dirs=[trtorch_path + "/include/trtorch/"])
77+
library_dirs=[(torch_tensorrt_path + "/lib/")],
78+
libraries=["torch_tensorrt"],
79+
include_dirs=[torch_tensorrt_path + "/include/torch_tensorrt/"])
8080
]
8181
8282
setup(
@@ -88,7 +88,7 @@ setup(
8888
Make sure to include the path for header files in `include_dirs` and the path
8989
for dependent libraries in `library_dirs`. Generally speaking, you should download
9090
the latest package from [here](https://github.com/NVIDIA/TRTorch/releases), extract
91-
the files, and the set the `trtorch_path` to it. You could also add other compilation
91+
the files, and the set the `torch_tensorrt_path` to it. You could also add other compilation
9292
flags in cpp_extension if you need. Then, run above python scripts as:
9393
```shell
9494
python3 setup.py install --user
@@ -104,7 +104,7 @@ We use `torch.ops.load_library` to load `.so`. For example, we could load the EL
104104
converter and use it in our application:
105105
```python
106106
import torch
107-
import trtorch
107+
import torch_tensorrt
108108

109109
# After "python3 setup install", you should find this .so file under generated "build" directory
110110
torch.ops.load_library('./elu_converter/build/lib.linux-x86_64-3.6/elu_converter.cpython-36m-x86_64-linux-gnu.so')
@@ -120,8 +120,8 @@ class Elu(torch.nn.Module):
120120
return self.elu(x)
121121

122122

123-
def cal_max_diff(pytorch_out, trtorch_out):
124-
diff = torch.sub(pytorch_out, trtorch_out)
123+
def cal_max_diff(pytorch_out, torch_tensorrt_out):
124+
diff = torch.sub(pytorch_out, torch_tensorrt_out)
125125
abs_diff = torch.abs(diff)
126126
max_diff = torch.max(abs_diff)
127127
print("Maximum differnce between TRTorch and PyTorch: \n", max_diff)
@@ -132,22 +132,22 @@ def main():
132132

133133
scripted_model = torch.jit.script(model)
134134
compile_settings = {
135-
"inputs": [trtorch.Input(
135+
"inputs": [torch_tensorrt.Input(
136136
min_shape=[1024, 1, 32, 32],
137137
opt_shape=[1024, 1, 33, 33],
138138
max_shape=[1024, 1, 34, 34],
139139
}],
140140
"enabled_precisions": {torch.float, torch.half} # Run with FP16
141141
}
142-
trt_ts_module = trtorch.compile(scripted_model, compile_settings)
142+
trt_ts_module = torch_tensorrt.compile(scripted_model, compile_settings)
143143
input_data = torch.randn((1024, 1, 32, 32))
144144
input_data = input_data.half().to("cuda")
145145
pytorch_out = model.forward(input_data)
146146

147-
trtorch_out = trt_ts_module(input_data)
147+
torch_tensorrt_out = trt_ts_module(input_data)
148148
print('PyTorch output: \n', pytorch_out[0, :, :, 0])
149-
print('TRTorch output: \n', trtorch_out[0, :, :, 0])
150-
cal_max_diff(pytorch_out, trtorch_out)
149+
print('TRTorch output: \n', torch_tensorrt_out[0, :, :, 0])
150+
cal_max_diff(pytorch_out, torch_tensorrt_out)
151151

152152

153153
if __name__ == "__main__":

examples/custom_converters/elu_converter/csrc/elu_converter.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,11 @@
33

44
namespace my_custom_converters {
55

6-
auto actelu = trtorch::core::conversion::converters::RegisterNodeConversionPatterns().pattern(
6+
auto actelu = torch_tensorrt::core::conversion::converters::RegisterNodeConversionPatterns().pattern(
77
{"aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> (Tensor)",
8-
[](trtorch::core::conversion::ConversionCtx* ctx,
8+
[](torch_tensorrt::core::conversion::ConversionCtx* ctx,
99
const torch::jit::Node* n,
10-
trtorch::core::conversion::converters::args& args) -> bool {
10+
torch_tensorrt::core::conversion::converters::args& args) -> bool {
1111
auto in = args[0].ITensorOrFreeze(ctx);
1212
auto alpha = args[1].unwrapToDouble();
1313

@@ -17,7 +17,7 @@ auto actelu = trtorch::core::conversion::converters::RegisterNodeConversionPatte
1717
}
1818

1919
new_layer->setAlpha(alpha);
20-
new_layer->setName(trtorch::core::util::node_info(n).c_str());
20+
new_layer->setName(torch_tensorrt::core::util::node_info(n).c_str());
2121
ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
2222

2323
return true;

examples/custom_converters/elu_model.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import torch
2-
import trtorch
2+
import torch_tensorrt
33

44
# After "python3 setup install", you should find this .so file under generated "build" directory
55
torch.ops.load_library('./elu_converter/build/lib.linux-x86_64-3.6/elu_converter.cpython-36m-x86_64-linux-gnu.so')
@@ -15,34 +15,34 @@ def forward(self, x):
1515
return self.elu(x)
1616

1717

18-
def cal_max_diff(pytorch_out, trtorch_out):
19-
diff = torch.sub(pytorch_out, trtorch_out)
18+
def cal_max_diff(pytorch_out, torch_tensorrt_out):
19+
diff = torch.sub(pytorch_out, torch_tensorrt_out)
2020
abs_diff = torch.abs(diff)
2121
max_diff = torch.max(abs_diff)
22-
print("Maximum differnce between TRTorch and PyTorch: \n", max_diff)
22+
print("Maximum differnce between Torch-TensorRT and PyTorch: \n", max_diff)
2323

2424

2525
def main():
2626
model = Elu().eval() #.cuda()
2727

2828
scripted_model = torch.jit.script(model)
2929
compile_settings = {
30-
"inputs": [trtorch.Input(
30+
"inputs": [torch_tensorrt.Input(
3131
min_shape=[1024, 1, 32, 32],
3232
opt_shape=[1024, 1, 33, 33],
3333
max_shape=[1024, 1, 34, 34],
3434
)],
3535
"enabled_precisions": {torch.float, torch.half} # Run with FP16
3636
}
37-
trt_ts_module = trtorch.compile(scripted_model, compile_settings)
37+
trt_ts_module = torch_tensorrt.ts.ompile(scripted_model, **compile_settings)
3838
input_data = torch.randn((1024, 1, 32, 32))
3939
input_data = input_data.half().to("cuda")
4040
pytorch_out = model.forward(input_data)
4141

42-
trtorch_out = trt_ts_module(input_data)
42+
torch_tensorrt_out = trt_ts_module(input_data)
4343
print('PyTorch output: \n', pytorch_out[0, :, :, 0])
44-
print('TRTorch output: \n', trtorch_out[0, :, :, 0])
45-
cal_max_diff(pytorch_out, trtorch_out)
44+
print('Torch-TensorRT output: \n', torch_tensorrt_out[0, :, :, 0])
45+
cal_max_diff(pytorch_out, torch_tensorrt_out)
4646

4747

4848
if __name__ == "__main__":

examples/int8/benchmark/BUILD

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ cc_library(
1010
"benchmark.h",
1111
],
1212
deps = [
13-
"//cpp:trtorch",
13+
"//cpp:torch_tensorrt",
1414
"@libtorch",
1515
"@libtorch//:caffe2",
1616
],

examples/int8/benchmark/benchmark.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
#include "cuda_runtime_api.h"
44
#include "torch/script.h"
55
#include "torch/torch.h"
6-
#include "trtorch/trtorch.h"
6+
#include "torch_tensorrt/torch_tensorrt.h"
77

88
#include "timer.h"
99

0 commit comments

Comments
 (0)