@@ -22,36 +22,29 @@ Once we are clear about these rules and writing patterns, we can create a sepera
22
22
#include " core/conversion/converters/converters.h"
23
23
#include " core/util/prelude.h"
24
24
25
- namespace trtorch {
26
- namespace core {
27
- namespace conversion {
28
- namespace converters {
29
- namespace impl {
30
- namespace {
31
-
32
- auto acthardtanh TRTORCH_UNUSED = RegisterNodeConversionPatterns().pattern(
25
+ namespace my_custom_converters {
26
+
27
+ auto actelu = trtorch::core::conversion::converters::RegisterNodeConversionPatterns().pattern(
33
28
{"aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> (Tensor)",
34
- [ ] (ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
29
+ [ ] (trtorch::core::conversion::ConversionCtx* ctx,
30
+ const torch::jit::Node* n,
31
+ trtorch::core::conversion::converters::args& args) -> bool {
35
32
auto in = args[ 0] .ITensorOrFreeze(ctx);
36
33
auto alpha = args[ 1] .unwrapToDouble();
37
34
38
35
auto new_layer = ctx->net->addActivation(*in, nvinfer1::ActivationType::kELU);
39
- TRTORCH_CHECK(new_layer, "Unable to create layer for aten::elu");
36
+ if (!(new_layer)) {
37
+ std::cerr << "Unable to create layer for aten::elu" << std::endl;
38
+ }
40
39
41
40
new_layer->setAlpha (alpha);
42
- new_layer->setName(util::node_info(n).c_str());
43
- auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
41
+ new_layer->setName(trtorch::core:: util::node_info(n).c_str());
42
+ ctx->AssociateValueAndTensor(n->outputs()[ 0] , new_layer->getOutput(0));
44
43
45
- LOG_DEBUG("Output shape: " << out_tensor->getDimensions());
46
44
return true;
47
45
}});
48
46
49
- } // namespace
50
- } // namespace impl
51
- } // namespace converters
52
- } // namespace conversion
53
- } // namespace core
54
- } // namespace trtorch
47
+ } // namespace my_custom_converters
55
48
```
56
49
57
50
## Generate `.so` library
@@ -155,23 +148,4 @@ if __name__ == "__main__":
155
148
main()
156
149
157
150
```
158
- Run this script, we can get the Tensor before and after ELU operator.
159
- ### Example Output
160
- ``` bash
161
- PyTorch output:
162
- tensor([[ 0.8804, 2.4355, -0.7920, -0.2070, -0.5352, 0.4775, 1.3604, -0.3350,
163
- -0.1802, -0.7563, -0.1758, 0.4067, 1.2510, -0.7100, -0.6221, -0.7207,
164
- -0.1118, 0.9966, 1.6396, -0.1367, -0.5742, 0.5859, 0.8511, 0.6572,
165
- -0.3481, 0.5933, -0.0488, -0.4287, -0.4102, -0.7402, 0.7515, -0.7710]],
166
- device=' cuda:0' , dtype=torch.float16)
167
- TRTorch output:
168
- tensor([[ 0.8804, 2.4355, -0.7920, -0.2070, -0.5356, 0.4775, 1.3604, -0.3347,
169
- -0.1802, -0.7563, -0.1758, 0.4067, 1.2510, -0.7100, -0.6221, -0.7207,
170
- -0.1117, 0.9966, 1.6396, -0.1368, -0.5747, 0.5859, 0.8511, 0.6572,
171
- -0.3484, 0.5933, -0.0486, -0.4285, -0.4102, -0.7402, 0.7515, -0.7710]],
172
- device=' cuda:0' , dtype=torch.float16)
173
- Maximum differnce between TRTorch and PyTorch:
174
- tensor(0.0005, device=' cuda:0' , dtype=torch.float16)
175
-
176
-
177
- ```
151
+ Run this script, we can get the different outputs from PyTorch and TRTorch.
0 commit comments