Skip to content

Commit a146e8c

Browse files
committed
reffactor the example directory and elu_converter.cpp
Signed-off-by: Bo Wang <[email protected]>
1 parent 1376536 commit a146e8c

File tree

5 files changed

+40
-73
lines changed

5 files changed

+40
-73
lines changed

examples/README.md renamed to examples/custom_converters/README.md

Lines changed: 13 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -22,36 +22,29 @@ Once we are clear about these rules and writing patterns, we can create a sepera
2222
#include "core/conversion/converters/converters.h"
2323
#include "core/util/prelude.h"
2424

25-
namespace trtorch {
26-
namespace core {
27-
namespace conversion {
28-
namespace converters {
29-
namespace impl {
30-
namespace {
31-
32-
auto acthardtanh TRTORCH_UNUSED = RegisterNodeConversionPatterns().pattern(
25+
namespace my_custom_converters {
26+
27+
auto actelu = trtorch::core::conversion::converters::RegisterNodeConversionPatterns().pattern(
3328
{"aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> (Tensor)",
34-
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
29+
[](trtorch::core::conversion::ConversionCtx* ctx,
30+
const torch::jit::Node* n,
31+
trtorch::core::conversion::converters::args& args) -> bool {
3532
auto in = args[0].ITensorOrFreeze(ctx);
3633
auto alpha = args[1].unwrapToDouble();
3734

3835
auto new_layer = ctx->net->addActivation(*in, nvinfer1::ActivationType::kELU);
39-
TRTORCH_CHECK(new_layer, "Unable to create layer for aten::elu");
36+
if (!(new_layer)) {
37+
std::cerr << "Unable to create layer for aten::elu" << std::endl;
38+
}
4039

4140
new_layer->setAlpha(alpha);
42-
new_layer->setName(util::node_info(n).c_str());
43-
auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
41+
new_layer->setName(trtorch::core::util::node_info(n).c_str());
42+
ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
4443

45-
LOG_DEBUG("Output shape: " << out_tensor->getDimensions());
4644
return true;
4745
}});
4846

49-
} // namespace
50-
} // namespace impl
51-
} // namespace converters
52-
} // namespace conversion
53-
} // namespace core
54-
} // namespace trtorch
47+
} // namespace my_custom_converters
5548
```
5649
5750
## Generate `.so` library
@@ -155,23 +148,4 @@ if __name__ == "__main__":
155148
main()
156149

157150
```
158-
Run this script, we can get the Tensor before and after ELU operator.
159-
### Example Output
160-
```bash
161-
PyTorch output:
162-
tensor([[ 0.8804, 2.4355, -0.7920, -0.2070, -0.5352, 0.4775, 1.3604, -0.3350,
163-
-0.1802, -0.7563, -0.1758, 0.4067, 1.2510, -0.7100, -0.6221, -0.7207,
164-
-0.1118, 0.9966, 1.6396, -0.1367, -0.5742, 0.5859, 0.8511, 0.6572,
165-
-0.3481, 0.5933, -0.0488, -0.4287, -0.4102, -0.7402, 0.7515, -0.7710]],
166-
device='cuda:0', dtype=torch.float16)
167-
TRTorch output:
168-
tensor([[ 0.8804, 2.4355, -0.7920, -0.2070, -0.5356, 0.4775, 1.3604, -0.3347,
169-
-0.1802, -0.7563, -0.1758, 0.4067, 1.2510, -0.7100, -0.6221, -0.7207,
170-
-0.1117, 0.9966, 1.6396, -0.1368, -0.5747, 0.5859, 0.8511, 0.6572,
171-
-0.3484, 0.5933, -0.0486, -0.4285, -0.4102, -0.7402, 0.7515, -0.7710]],
172-
device='cuda:0', dtype=torch.float16)
173-
Maximum differnce between TRTorch and PyTorch:
174-
tensor(0.0005, device='cuda:0', dtype=torch.float16)
175-
176-
177-
```
151+
Run this script, we can get the different outputs from PyTorch and TRTorch.
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
#include "core/conversion/converters/converters.h"
2+
#include "core/util/prelude.h"
3+
4+
namespace my_custom_converters {
5+
6+
auto actelu = trtorch::core::conversion::converters::RegisterNodeConversionPatterns().pattern(
7+
{"aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> (Tensor)",
8+
[](trtorch::core::conversion::ConversionCtx* ctx,
9+
const torch::jit::Node* n,
10+
trtorch::core::conversion::converters::args& args) -> bool {
11+
auto in = args[0].ITensorOrFreeze(ctx);
12+
auto alpha = args[1].unwrapToDouble();
13+
14+
auto new_layer = ctx->net->addActivation(*in, nvinfer1::ActivationType::kELU);
15+
if (!(new_layer)) {
16+
std::cerr << "Unable to create layer for aten::elu" << std::endl;
17+
}
18+
19+
new_layer->setAlpha(alpha);
20+
new_layer->setName(trtorch::core::util::node_info(n).c_str());
21+
ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
22+
23+
return true;
24+
}});
25+
26+
} // namespace my_custom_converters

examples/elu_converter/setup.py renamed to examples/custom_converters/elu_converter/setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
# 1) download the latest package from https://github.com/NVIDIA/TRTorch/releases/
99
# 2) Extract the file from downloaded package, we will get the "trtorch" directory
1010
# 3) Set trtorch_path to that directory
11-
trtorch_path = os.path.abspath("trtorch")
11+
trtorch_path = os.path.abspath("/home/bowa/Downloads/trtorch")
1212

1313
ext_modules = [
1414
cpp_extension.CUDAExtension('elu_converter', ['elu_converter.cpp'],

examples/elu_converter/elu_converter.cpp

Lines changed: 0 additions & 33 deletions
This file was deleted.

0 commit comments

Comments
 (0)