@@ -42,24 +42,25 @@ convert(tanh, kTANH);
42
42
43
43
auto acthardtanh TRTORCH_UNUSED =
44
44
RegisterNodeConversionPatterns ()
45
- .pattern({" aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> (Tensor)" ,
46
- [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
47
- auto in = args[0 ].ITensorOrFreeze (ctx);
48
- auto min = args[1 ].unwrapToDouble ();
49
- auto max = args[2 ].unwrapToDouble ();
45
+ .pattern(
46
+ {" aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> (Tensor)" ,
47
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
48
+ auto in = args[0 ].ITensorOrFreeze (ctx);
49
+ auto min = args[1 ].unwrapToDouble ();
50
+ auto max = args[2 ].unwrapToDouble ();
50
51
51
- auto new_layer = ctx->net ->addActivation (*in, nvinfer1::ActivationType::kCLIP );
52
- TRTORCH_CHECK (new_layer, " Unable to create layer for aten::hardtanh" );
52
+ auto new_layer = ctx->net ->addActivation (*in, nvinfer1::ActivationType::kCLIP );
53
+ TRTORCH_CHECK (new_layer, " Unable to create layer for aten::hardtanh" );
53
54
54
- new_layer->setAlpha (min);
55
- new_layer->setBeta (max);
55
+ new_layer->setAlpha (min);
56
+ new_layer->setBeta (max);
56
57
57
- new_layer->setName (util::node_info (n).c_str ());
58
- auto out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], new_layer->getOutput (0 ));
58
+ new_layer->setName (util::node_info (n).c_str ());
59
+ auto out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], new_layer->getOutput (0 ));
59
60
60
- LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
61
- return true ;
62
- }})
61
+ LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
62
+ return true ;
63
+ }})
63
64
.pattern({// TODO: Remove after functionalization
64
65
" aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> (Tensor(a!))" ,
65
66
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
@@ -125,34 +126,35 @@ auto acthardtanh TRTORCH_UNUSED =
125
126
LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
126
127
return true ;
127
128
}})
128
- .pattern({" aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> (Tensor)" ,
129
- [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
130
- auto self = args[0 ].ITensorOrFreeze (ctx);
131
- auto negative_slopeScalar = args[1 ].unwrapToScalar ().to <float >();
129
+ .pattern(
130
+ {" aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> (Tensor)" ,
131
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
132
+ auto self = args[0 ].ITensorOrFreeze (ctx);
133
+ auto negative_slopeScalar = args[1 ].unwrapToScalar ().to <float >();
132
134
133
- auto new_layer = ctx->net ->addActivation (*self, nvinfer1::ActivationType::kLEAKY_RELU );
134
- new_layer->setAlpha (negative_slopeScalar);
135
+ auto new_layer = ctx->net ->addActivation (*self, nvinfer1::ActivationType::kLEAKY_RELU );
136
+ new_layer->setAlpha (negative_slopeScalar);
135
137
136
- new_layer->setName (util::node_info (n).c_str ());
137
- auto out_tensor = new_layer->getOutput (0 );
138
- out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], out_tensor);
139
- LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
140
- return true ;
141
- }})
142
- .pattern({" aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)" ,
143
- [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
144
- auto self = args[0 ].ITensorOrFreeze (ctx);
145
- auto negative_slopeScalar = args[1 ].unwrapToScalar ().to <float >();
146
-
147
- auto new_layer = ctx->net ->addActivation (*self, nvinfer1::ActivationType::kLEAKY_RELU );
148
- new_layer->setAlpha (negative_slopeScalar);
138
+ new_layer->setName (util::node_info (n).c_str ());
139
+ auto out_tensor = new_layer->getOutput (0 );
140
+ out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], out_tensor);
141
+ LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
142
+ return true ;
143
+ }})
144
+ .pattern(
145
+ {" aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)" ,
146
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
147
+ auto self = args[0 ].ITensorOrFreeze (ctx);
148
+ auto negative_slopeScalar = args[1 ].unwrapToScalar ().to <float >();
149
149
150
- new_layer->setName (util::node_info (n).c_str ());
151
- auto out_tensor = new_layer->getOutput (0 );
152
- out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], out_tensor);
153
- LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
154
- return true ;
155
- }})
150
+ auto new_layer = ctx->net ->addActivation (*self, nvinfer1::ActivationType::kLEAKY_RELU );
151
+ new_layer->setAlpha (negative_slopeScalar);
152
+ new_layer->setName (util::node_info (n).c_str ());
153
+ auto out_tensor = new_layer->getOutput (0 );
154
+ out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], out_tensor);
155
+ LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
156
+ return true ;
157
+ }})
156
158
.pattern({" aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> (Tensor)" ,
157
159
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
158
160
auto in = args[0 ].ITensorOrFreeze (ctx);
@@ -167,7 +169,33 @@ auto acthardtanh TRTORCH_UNUSED =
167
169
auto out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], new_layer->getOutput (0 ));
168
170
LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
169
171
return true ;
170
- }});
172
+ }})
173
+ .pattern(
174
+ {" aten::gelu(Tensor self) -> (Tensor)" ,
175
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
176
+ auto in = args[0 ].ITensorOrFreeze (ctx);
177
+ nvinfer1::DataType type = in->getType ();
178
+ TRTORCH_CHECK (
179
+ type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF ,
180
+ " gelu only supports kFLOAT and kHALF" );
181
+ std::string pluginName = " CustomGeluPluginDynamic" ;
182
+ nvinfer1::PluginFieldCollection fc;
183
+ std::vector<nvinfer1::PluginField> f;
184
+ int type_id = 0 ; // Integer encoding the DataType (0: FP32, 1: FP16)
185
+ if (type == nvinfer1::DataType::kHALF )
186
+ type_id = 1 ;
187
+ f.emplace_back (nvinfer1::PluginField (" type_id" , &type_id, nvinfer1::PluginFieldType::kINT32 , 1 ));
188
+ fc.nbFields = f.size ();
189
+ fc.fields = f.data ();
190
+ nvinfer1::IPluginV2* pluginV2 = ctx->mPluginRegistry .at (pluginName)->createPlugin (" gelu" , &fc);
191
+ TRTORCH_CHECK (pluginV2, " Unable to create gelu plugin from TensorRT plugin registry" << *n);
192
+ auto new_layer = ctx->net ->addPluginV2 (reinterpret_cast <nvinfer1::ITensor* const *>(&in), 1 , *pluginV2);
193
+ new_layer->setName (" gelu" );
194
+ auto out_tensor = new_layer->getOutput (0 );
195
+ out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], out_tensor);
196
+ LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
197
+ return true ;
198
+ }});
171
199
172
200
} // namespace
173
201
} // namespace impl
0 commit comments