Skip to content

Commit 8b6d80c

Browse files
committed
Updated the code for requested changes
Signed-off-by: Bo Wang <[email protected]>
1 parent 2ae7cd1 commit 8b6d80c

File tree

3 files changed

+47
-30
lines changed

3 files changed

+47
-30
lines changed

examples/README.md

Lines changed: 25 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -66,14 +66,17 @@ from torch.utils import cpp_extension
6666
6767
dir_path = os.path.dirname(os.path.realpath(__file__))
6868
69+
# library_dirs should point to the libtrtorch.so, include_dirs should point to the dir that include the headers
70+
# 1) download the latest package from https://github.com/NVIDIA/TRTorch/releases/
71+
# 2) Extract the file from downloaded package, we will get the "trtorch" directory
72+
# 3) Set trtorch_path to that directory
73+
trtorch_path = os.path.abspath("trtorch")
74+
6975
ext_modules = [
7076
cpp_extension.CUDAExtension('elu_converter', ['elu_converter.cpp'],
71-
library_dirs=[(
72-
dir_path + "/../../bazel-bin/cpp/api/lib/"
73-
)],
77+
library_dirs=[(trtorch_path + "/lib/")],
7478
libraries=["trtorch"],
75-
include_dirs=[dir_path + "/../../"]
76-
)
79+
include_dirs=[trtorch_path + "/include/trtorch/"])
7780
]
7881
7982
setup(
@@ -83,7 +86,9 @@ setup(
8386
)
8487
```
8588
Make sure to include the path for header files in `include_dirs` and the path
86-
for dependent libraries in `library_dirs`. You could also add other compilation
89+
for dependent libraries in `library_dirs`. Generally speaking, you should download
90+
the latest package from [here](https://github.com/NVIDIA/TRTorch/releases), extract
91+
the files, and the set the `trtorch_path` to it. You could also add other compilation
8792
flags in cpp_extension if you need. Then, run above python scripts as:
8893
```shell
8994
python3 setup.py install --user
@@ -140,20 +145,20 @@ if __name__ == "__main__":
140145
Run this script, we can get the Tensor before and after ELU operator.
141146
### Example Output
142147
```bash
143-
graph(%self : __torch__.Elu,
144-
%x.1 : Tensor):
145-
%2 : __torch__.torch.nn.modules.activation.ELU = prim::GetAttr[name="elu"](%self)
146-
%4 : Tensor = prim::CallMethod[name="forward"](%2, %x.1) # elu_converter_test.py:13:15
147-
return (%4)
148-
149-
tensor([[ 1.3482, 1.9848, -1.0818, -1.3252, 0.2470, 0.7011, 0.3174, -1.8349,
150-
0.3024, -0.0453, -0.0681, -1.7377, 1.5909, 0.2549, -0.3029, 0.2583,
151-
0.0242, 2.0748, -0.5454, 0.7137, 1.6688, 0.7108, -0.8681, 0.2486,
152-
-1.3981, 1.0241, 1.2413, 0.2725, 1.4265, 0.9329, 0.4020, -2.6813]])
153-
tensor([[ 1.3486, 1.9844, -0.6611, -0.7344, 0.2471, 0.7012, 0.3174, -0.8403,
154-
0.3025, -0.0443, -0.0659, -0.8242, 1.5908, 0.2549, -0.2615, 0.2583,
155-
0.0242, 2.0742, -0.4204, 0.7139, 1.6689, 0.7109, -0.5801, 0.2485,
156-
-0.7529, 1.0244, 1.2412, 0.2725, 1.4268, 0.9331, 0.4021, -0.9316]],
148+
PyTorch output:
149+
tensor([[ 0.8804, 2.4355, -0.7920, -0.2070, -0.5352, 0.4775, 1.3604, -0.3350,
150+
-0.1802, -0.7563, -0.1758, 0.4067, 1.2510, -0.7100, -0.6221, -0.7207,
151+
-0.1118, 0.9966, 1.6396, -0.1367, -0.5742, 0.5859, 0.8511, 0.6572,
152+
-0.3481, 0.5933, -0.0488, -0.4287, -0.4102, -0.7402, 0.7515, -0.7710]],
153+
device='cuda:0', dtype=torch.float16)
154+
TRTorch output:
155+
tensor([[ 0.8804, 2.4355, -0.7920, -0.2070, -0.5356, 0.4775, 1.3604, -0.3347,
156+
-0.1802, -0.7563, -0.1758, 0.4067, 1.2510, -0.7100, -0.6221, -0.7207,
157+
-0.1117, 0.9966, 1.6396, -0.1368, -0.5747, 0.5859, 0.8511, 0.6572,
158+
-0.3484, 0.5933, -0.0486, -0.4285, -0.4102, -0.7402, 0.7515, -0.7710]],
157159
device='cuda:0', dtype=torch.float16)
160+
Maximum differnce between TRTorch and PyTorch:
161+
tensor(0.0005, device='cuda:0', dtype=torch.float16)
162+
158163

159164
```

examples/elu_converter/elu_converter_test.py

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import torch
22
import trtorch
33

4+
# After "python3 setup install", you should find this .so file under generated "build" directory
45
torch.ops.load_library('./build/lib.linux-x86_64-3.6/elu_converter.cpython-36m-x86_64-linux-gnu.so')
56

67

@@ -14,14 +15,17 @@ def forward(self, x):
1415
return self.elu(x)
1516

1617

18+
def MaxDiff(pytorch_out, trtorch_out):
19+
diff = torch.sub(pytorch_out, trtorch_out)
20+
abs_diff = torch.abs(diff)
21+
max_diff = torch.max(abs_diff)
22+
print("Maximum differnce between TRTorch and PyTorch: \n", max_diff)
23+
24+
1725
def main():
18-
data = torch.randn((1, 1, 2, 2)).to("cuda")
1926
model = Elu().eval() #.cuda()
2027

21-
# traced_model = torch.jit.trace(model, [data])
2228
scripted_model = torch.jit.script(model)
23-
print(scripted_model.graph)
24-
# torch.jit.save(scripted_model, 'elu.jit')
2529
compile_settings = {
2630
"input_shapes": [{
2731
"min": [1024, 1, 32, 32],
@@ -33,11 +37,13 @@ def main():
3337
}
3438
trt_ts_module = trtorch.compile(scripted_model, compile_settings)
3539
input_data = torch.randn((1024, 1, 32, 32))
36-
print(input_data[0, :, :, 0])
3740
input_data = input_data.half().to("cuda")
38-
result = trt_ts_module(input_data)
39-
print(result[0, :, :, 0])
40-
# torch.jit.save(trt_ts_module, "trt_ts_module.ts")
41+
pytorch_out = model.forward(input_data)
42+
43+
trtorch_out = trt_ts_module(input_data)
44+
print('PyTorch output: \n', pytorch_out[0, :, :, 0])
45+
print('TRTorch output: \n', trtorch_out[0, :, :, 0])
46+
MaxDiff(pytorch_out, trtorch_out)
4147

4248

4349
if __name__ == "__main__":

examples/elu_converter/setup.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,17 @@
44

55
dir_path = os.path.dirname(os.path.realpath(__file__))
66

7+
# library_dirs should point to the libtrtorch.so, include_dirs should point to the dir that include the headers
8+
# 1) download the latest package from https://github.com/NVIDIA/TRTorch/releases/
9+
# 2) Extract the file from downloaded package, we will get the "trtorch" directory
10+
# 3) Set trtorch_path to that directory
11+
trtorch_path = os.path.abspath("trtorch")
12+
713
ext_modules = [
814
cpp_extension.CUDAExtension('elu_converter', ['elu_converter.cpp'],
9-
library_dirs=[(dir_path + "/../../bazel-bin/cpp/api/lib/")],
15+
library_dirs=[(trtorch_path + "/lib/")],
1016
libraries=["trtorch"],
11-
include_dirs=[dir_path + "/../../"])
17+
include_dirs=[trtorch_path + "/include/trtorch/"])
1218
]
1319

1420
setup(

0 commit comments

Comments
 (0)