@@ -42,25 +42,24 @@ convert(tanh, kTANH);
42
42
43
43
auto acthardtanh TRTORCH_UNUSED =
44
44
RegisterNodeConversionPatterns ()
45
- .pattern(
46
- {" aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> (Tensor)" ,
47
- [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
48
- auto in = args[0 ].ITensorOrFreeze (ctx);
49
- auto min = args[1 ].unwrapToDouble ();
50
- auto max = args[2 ].unwrapToDouble ();
45
+ .pattern({" aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> (Tensor)" ,
46
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
47
+ auto in = args[0 ].ITensorOrFreeze (ctx);
48
+ auto min = args[1 ].unwrapToDouble ();
49
+ auto max = args[2 ].unwrapToDouble ();
51
50
52
- auto new_layer = ctx->net ->addActivation (*in, nvinfer1::ActivationType::kCLIP );
53
- TRTORCH_CHECK (new_layer, " Unable to create layer for aten::hardtanh" );
51
+ auto new_layer = ctx->net ->addActivation (*in, nvinfer1::ActivationType::kCLIP );
52
+ TRTORCH_CHECK (new_layer, " Unable to create layer for aten::hardtanh" );
54
53
55
- new_layer->setAlpha (min);
56
- new_layer->setBeta (max);
54
+ new_layer->setAlpha (min);
55
+ new_layer->setBeta (max);
57
56
58
- new_layer->setName (util::node_info (n).c_str ());
59
- auto out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], new_layer->getOutput (0 ));
57
+ new_layer->setName (util::node_info (n).c_str ());
58
+ auto out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], new_layer->getOutput (0 ));
60
59
61
- LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
62
- return true ;
63
- }})
60
+ LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
61
+ return true ;
62
+ }})
64
63
.pattern({// TODO: Remove after functionalization
65
64
" aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> (Tensor(a!))" ,
66
65
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
@@ -126,35 +125,33 @@ auto acthardtanh TRTORCH_UNUSED =
126
125
LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
127
126
return true ;
128
127
}})
129
- .pattern(
130
- {" aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> (Tensor)" ,
131
- [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
132
- auto self = args[0 ].ITensorOrFreeze (ctx);
133
- auto negative_slopeScalar = args[1 ].unwrapToScalar ().to <float >();
128
+ .pattern({" aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> (Tensor)" ,
129
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
130
+ auto self = args[0 ].ITensorOrFreeze (ctx);
131
+ auto negative_slopeScalar = args[1 ].unwrapToScalar ().to <float >();
134
132
135
- auto new_layer = ctx->net ->addActivation (*self, nvinfer1::ActivationType::kLEAKY_RELU );
136
- new_layer->setAlpha (negative_slopeScalar);
133
+ auto new_layer = ctx->net ->addActivation (*self, nvinfer1::ActivationType::kLEAKY_RELU );
134
+ new_layer->setAlpha (negative_slopeScalar);
137
135
138
- new_layer->setName (util::node_info (n).c_str ());
139
- auto out_tensor = new_layer->getOutput (0 );
140
- out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], out_tensor);
141
- LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
142
- return true ;
143
- }})
144
- .pattern(
145
- {" aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)" ,
146
- [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
147
- auto self = args[0 ].ITensorOrFreeze (ctx);
148
- auto negative_slopeScalar = args[1 ].unwrapToScalar ().to <float >();
136
+ new_layer->setName (util::node_info (n).c_str ());
137
+ auto out_tensor = new_layer->getOutput (0 );
138
+ out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], out_tensor);
139
+ LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
140
+ return true ;
141
+ }})
142
+ .pattern({" aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)" ,
143
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
144
+ auto self = args[0 ].ITensorOrFreeze (ctx);
145
+ auto negative_slopeScalar = args[1 ].unwrapToScalar ().to <float >();
149
146
150
- auto new_layer = ctx->net ->addActivation (*self, nvinfer1::ActivationType::kLEAKY_RELU );
151
- new_layer->setAlpha (negative_slopeScalar);
152
- new_layer->setName (util::node_info (n).c_str ());
153
- auto out_tensor = new_layer->getOutput (0 );
154
- out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], out_tensor);
155
- LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
156
- return true ;
157
- }})
147
+ auto new_layer = ctx->net ->addActivation (*self, nvinfer1::ActivationType::kLEAKY_RELU );
148
+ new_layer->setAlpha (negative_slopeScalar);
149
+ new_layer->setName (util::node_info (n).c_str ());
150
+ auto out_tensor = new_layer->getOutput (0 );
151
+ out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], out_tensor);
152
+ LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
153
+ return true ;
154
+ }})
158
155
.pattern({" aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> (Tensor)" ,
159
156
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
160
157
auto in = args[0 ].ITensorOrFreeze (ctx);
@@ -169,33 +166,36 @@ auto acthardtanh TRTORCH_UNUSED =
169
166
auto out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], new_layer->getOutput (0 ));
170
167
LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
171
168
return true ;
172
- }})
173
- .pattern(
174
- {" aten::gelu(Tensor self) -> (Tensor)" ,
175
- [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
176
- auto in = args[0 ].ITensorOrFreeze (ctx);
177
- nvinfer1::DataType type = in->getType ();
178
- TRTORCH_CHECK (
179
- type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF ,
180
- " gelu only supports kFLOAT and kHALF" );
181
- std::string pluginName = " CustomGeluPluginDynamic" ;
182
- nvinfer1::PluginFieldCollection fc;
183
- std::vector<nvinfer1::PluginField> f;
184
- int type_id = 0 ; // Integer encoding the DataType (0: FP32, 1: FP16)
185
- if (type == nvinfer1::DataType::kHALF )
186
- type_id = 1 ;
187
- f.emplace_back (nvinfer1::PluginField (" type_id" , &type_id, nvinfer1::PluginFieldType::kINT32 , 1 ));
188
- fc.nbFields = f.size ();
189
- fc.fields = f.data ();
190
- nvinfer1::IPluginV2* pluginV2 = ctx->mPluginRegistry .at (pluginName)->createPlugin (" gelu" , &fc);
191
- TRTORCH_CHECK (pluginV2, " Unable to create gelu plugin from TensorRT plugin registry" << *n);
192
- auto new_layer = ctx->net ->addPluginV2 (reinterpret_cast <nvinfer1::ITensor* const *>(&in), 1 , *pluginV2);
193
- new_layer->setName (" gelu" );
194
- auto out_tensor = new_layer->getOutput (0 );
195
- out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], out_tensor);
196
- LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
197
- return true ;
198
- }});
169
+ }})
170
+ .pattern({" aten::gelu(Tensor self) -> (Tensor)" ,
171
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
172
+ auto in = args[0 ].ITensorOrFreeze (ctx);
173
+ nvinfer1::DataType type = in->getType ();
174
+ TRTORCH_CHECK (
175
+ type == nvinfer1::DataType::kFLOAT || type == nvinfer1::DataType::kHALF ,
176
+ " gelu only supports kFLOAT and kHALF" );
177
+ std::string pluginName = " CustomGeluPluginDynamic" ;
178
+ nvinfer1::PluginFieldCollection fc;
179
+ std::vector<nvinfer1::PluginField> f;
180
+ int type_id = ctx->settings .op_precision == nvinfer1::DataType::kFLOAT
181
+ ? 0
182
+ : 1 ; // Integer encoding the DataType (0: FP32, 1: FP16)
183
+ f.emplace_back (nvinfer1::PluginField (" type_id" , &type_id, nvinfer1::PluginFieldType::kINT32 , 1 ));
184
+ fc.nbFields = f.size ();
185
+ fc.fields = f.data ();
186
+
187
+ auto creator = getPluginRegistry ()->getPluginCreator (" CustomGeluPluginDynamic" , " 1" , " " );
188
+ auto gelu_plugin = creator->createPlugin (" gelu" , &fc);
189
+
190
+ TRTORCH_CHECK (gelu_plugin, " Unable to create gelu plugin from TensorRT plugin registry" << *n);
191
+ auto new_layer =
192
+ ctx->net ->addPluginV2 (reinterpret_cast <nvinfer1::ITensor* const *>(&in), 1 , *gelu_plugin);
193
+ new_layer->setName (" gelu" );
194
+ auto out_tensor = new_layer->getOutput (0 );
195
+ out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], out_tensor);
196
+ LOG_DEBUG (" Output shape: " << out_tensor->getDimensions ());
197
+ return true ;
198
+ }});
199
199
200
200
} // namespace
201
201
} // namespace impl
0 commit comments