Skip to content

Commit b3be1a3

Browse files
committed
Fix layer_norm.cpp linter error.
Signed-off-by: Yu-Te Cheng <[email protected]>
1 parent f04e3be commit b3be1a3

File tree

1 file changed

+47
-35
lines changed

1 file changed

+47
-35
lines changed

core/conversion/converters/impl/layer_norm.cpp

Lines changed: 47 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@ nvinfer1::ILayer* add_elementwise(
2424
auto selfDim = util::toVec(self->getDimensions());
2525
auto otherDim = util::toVec(other->getDimensions());
2626
if (selfDim.size() != otherDim.size()) {
27-
// other is with dynamic shape, need to expand its dimension now and get its shape at runtime
27+
// other is with dynamic shape, need to expand its dimension now and get its
28+
// shape at runtime
2829
if (otherDim.end() != std::find(otherDim.begin(), otherDim.end(), -1)) {
2930
auto thOtherStaticShapeMask = torch::ones(selfDim.size(), torch::kInt32);
3031
auto thOtherDynamicShapeMask = torch::zeros(selfDim.size(), torch::kInt32);
@@ -39,7 +40,8 @@ nvinfer1::ILayer* add_elementwise(
3940
auto otherStaticShapeMask = tensor_to_const(ctx, thOtherStaticShapeMask);
4041
auto otherDynamicShapeMask = tensor_to_const(ctx, thOtherDynamicShapeMask);
4142
auto selfShape = ctx->net->addShape(*self)->getOutput(0);
42-
// size of dynamic dimension of other need to the same as that of corresponding dimension of self
43+
// size of dynamic dimension of other need to the same as that of
44+
// corresponding dimension of self
4345
auto otherDynamicShape =
4446
ctx->net->addElementWise(*selfShape, *otherDynamicShapeMask, nvinfer1::ElementWiseOperation::kPROD)
4547
->getOutput(0);
@@ -52,7 +54,8 @@ nvinfer1::ILayer* add_elementwise(
5254
otherShuffle->setInput(1, *targetOtherShape);
5355
other = otherShuffle->getOutput(0);
5456
} else {
55-
// other is with static shape, expand dimension to make tow tensor have the same number of dimension
57+
// other is with static shape, expand dimension to make tow tensor have
58+
// the same number of dimension
5659
auto otherShuffle = ctx->net->addShuffle(*other);
5760
otherShuffle->setReshapeDimensions(util::toDimsPad(otherDim, selfDim.size()));
5861
other = otherShuffle->getOutput(0);
@@ -68,28 +71,26 @@ nvinfer1::ILayer* add_elementwise(
6871
return ele;
6972
}
7073

71-
7274
auto layer_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().pattern({
7375
R"SIG(aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? gamma, Tensor? beta,
7476
float eps, bool cudnn_enabled) -> (Tensor))SIG",
7577
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
76-
7778
auto input = args[0].ITensor(); // assumes non-static input Tensor
7879
auto orig_shape = input->getDimensions();
7980
auto shape = util::toVec(orig_shape);
8081

8182
/* Layer_Norm normalizes over last N dimensions.
8283
normalizaed_shape could be (C,H,W), (H,W), or (W). */
83-
84+
8485
auto normalized_shape = args[1].unwrapToIntList();
8586
auto normalized_shape_vec = util::toVec(util::toDims(normalized_shape));
8687

87-
8888
torch::Tensor gamma, beta;
8989
gamma = args[2].unwrapToTensor();
9090
beta = args[3].unwrapToTensor();
9191

92-
// Remove batch dimension from input shape for expand_size, which will be used to create weights for addScaleNd later.
92+
// Remove batch dimension from input shape for expand_size, which will
93+
// be used to create weights for addScaleNd later.
9394
auto expand_size = shape;
9495
expand_size.erase(expand_size.begin(), expand_size.begin() + 1);
9596
auto gamma_expand = gamma.expand(expand_size);
@@ -119,15 +120,15 @@ auto layer_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().
119120
repeats_expected.push_back(repeat);
120121
}
121122

122-
int repeats_expected_rank = repeats_expected.size(); // 4
123-
auto mean_layer_expected_out_dims = mean_layer_expected_out->getDimensions(); // 1
124-
auto num_expand_dims_expected = repeats_expected_rank - mean_layer_expected_out_dims.nbDims; // 3
125-
123+
int repeats_expected_rank = repeats_expected.size();
124+
auto mean_layer_expected_out_dims = mean_layer_expected_out->getDimensions();
125+
auto num_expand_dims_expected = repeats_expected_rank - mean_layer_expected_out_dims.nbDims;
126+
126127
if (num_expand_dims_expected > 0) {
127128
nvinfer1::Dims reshape_expected_dims;
128129
reshape_expected_dims.nbDims = repeats_expected.size();
129130
for (int i = 0; i < num_expand_dims_expected; i++) {
130-
reshape_expected_dims.d[repeats_expected.size() - 1 - i ] = 1;
131+
reshape_expected_dims.d[repeats_expected.size() - 1 - i] = 1;
131132
}
132133
for (int i = 0; i < mean_layer_expected_out_dims.nbDims; i++) {
133134
reshape_expected_dims.d[i] = mean_layer_expected_out_dims.d[i];
@@ -147,44 +148,49 @@ auto layer_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().
147148
concat_layer->setAxis(i);
148149
mean_layer_expected_out = concat_layer->getOutput(0);
149150
}
150-
151151

152152
// X-E[x]
153-
auto sub = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUB, input, mean_layer_expected_out, (util::node_info(n) + "_sub").c_str());
153+
auto sub = add_elementwise(
154+
ctx,
155+
nvinfer1::ElementWiseOperation::kSUB,
156+
input,
157+
mean_layer_expected_out,
158+
(util::node_info(n) + "_sub").c_str());
154159
TRTORCH_CHECK(sub, "Unable to create Add layer from node: " << *n);
155160
sub->setName((util::node_info(n) + "_sub").c_str());
156161
auto xsubmean = sub->getOutput(0);
157162

158163
// Variance
159164
float pow_scalar = 2;
160165
auto exponent = tensor_to_const(ctx, torch::tensor({pow_scalar}));
161-
auto pow = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kPOW, xsubmean, exponent, (util::node_info(n) +"_pow").c_str());
166+
auto pow = add_elementwise(
167+
ctx, nvinfer1::ElementWiseOperation::kPOW, xsubmean, exponent, (util::node_info(n) + "_pow").c_str());
162168
TRTORCH_CHECK(pow, "Unable to create Power layer from node: " << *n);
163-
pow->setName((util::node_info(n) +"_pow").c_str());
169+
pow->setName((util::node_info(n) + "_pow").c_str());
164170
auto pow_out = pow->getOutput(0);
165171

166172
auto mean_layer_var = ctx->net->addReduce(*pow_out, nvinfer1::ReduceOperation::kAVG, axis_mask, false);
167173
TRTORCH_CHECK(mean_layer_var, "Unable to create mean_layer_var from node: " << *n);
168174
mean_layer_var->setName((util::node_info(n) + "_mean_var").c_str());
169175
auto mean_layer_var_out = mean_layer_var->getOutput(0);
170-
171-
// Expand output of mean_layer_var to the same shape as original input.
176+
177+
// Expand output of mean_layer_var to the same shape as original
178+
// input.
172179
c10::List<int64_t> repeats_var;
173180
for (size_t i = 0; i < shape.size(); i++) {
174-
auto repeat = i > (shape.size()-normalized_shape_vec.size()-1) ? shape[i] : 1;
181+
auto repeat = i > (shape.size() - normalized_shape_vec.size() - 1) ? shape[i] : 1;
175182
repeats_var.push_back(repeat);
176183
}
177184

178185
int repeats_var_rank = repeats_var.size();
179-
auto mean_layer_var_out_dims = mean_layer_var_out->getDimensions();
186+
auto mean_layer_var_out_dims = mean_layer_var_out->getDimensions();
180187
auto num_expand_dims_var = repeats_var_rank - mean_layer_var_out_dims.nbDims;
181-
182188

183189
if (num_expand_dims_var > 0) {
184190
nvinfer1::Dims reshape_dims_var;
185191
reshape_dims_var.nbDims = repeats_var.size();
186192
for (int i = 0; i < num_expand_dims_var; i++) {
187-
reshape_dims_var.d[repeats_var.size() - 1 - i ] = 1;
193+
reshape_dims_var.d[repeats_var.size() - 1 - i] = 1;
188194
}
189195
for (int i = 0; i < mean_layer_var_out_dims.nbDims; i++) {
190196
reshape_dims_var.d[i] = mean_layer_var_out_dims.d[i];
@@ -195,7 +201,6 @@ auto layer_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().
195201
reshape_layer_var->setReshapeDimensions(reshape_dims_var);
196202
mean_layer_var_out = reshape_layer_var->getOutput(0);
197203
}
198-
199204

200205
for (int i = repeats_var.size() - 1; i >= 0; --i) {
201206
std::vector<nvinfer1::ITensor*> tensors_vec;
@@ -207,34 +212,41 @@ auto layer_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().
207212
mean_layer_var_out = concat_layer->getOutput(0);
208213
}
209214

210-
// add eps
215+
// add eps
211216
auto eps_tensor = tensor_to_const(ctx, torch::tensor({eps}));
212-
auto add = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUM, mean_layer_var_out, eps_tensor, (util::node_info(n)+"_add").c_str());
217+
auto add = add_elementwise(
218+
ctx,
219+
nvinfer1::ElementWiseOperation::kSUM,
220+
mean_layer_var_out,
221+
eps_tensor,
222+
(util::node_info(n) + "_add").c_str());
213223
TRTORCH_CHECK(add, "Unable to create Add layer from node: " << *n);
214-
add->setName((util::node_info(n)+"_add").c_str());
224+
add->setName((util::node_info(n) + "_add").c_str());
215225
auto add_out = add->getOutput(0);
216226

217227
// add Unary layer for sqrt((var + eps))
218228
auto unary = ctx->net->addUnary(*add_out, nvinfer1::UnaryOperation::kSQRT);
219-
TRTORCH_CHECK(unary, "Unable to create unary layer from node: " << *n);
220-
unary->setName((util::node_info(n)+"_unary_sqrt").c_str());
229+
TRTORCH_CHECK(unary, "Unable to create unary layer from node: " << *n);
230+
unary->setName((util::node_info(n) + "_unary_sqrt").c_str());
221231
auto unary_out = unary->getOutput(0);
222232

223-
224233
// (x - E[x]) / sqrt((var + eps))
225-
auto div= add_elementwise(ctx, nvinfer1::ElementWiseOperation::kDIV, xsubmean, unary_out, (util::node_info(n)+"_div").c_str());
234+
auto div = add_elementwise(
235+
ctx, nvinfer1::ElementWiseOperation::kDIV, xsubmean, unary_out, (util::node_info(n) + "_div").c_str());
226236
TRTORCH_CHECK(div, "Unable to create div layer from node: " << *n);
227-
div->setName((util::node_info(n)+"_div").c_str());
237+
div->setName((util::node_info(n) + "_div").c_str());
228238
auto div_out = div->getOutput(0);
229239

230-
// Set up gamma_weights and beta_weights from gamma_expand and beta_expand
240+
// Set up gamma_weights and beta_weights from gamma_expand and
241+
// beta_expand
231242
auto gamma_weights = Weights(ctx, gamma_expand);
232243
auto beta_weights = Weights(ctx, beta_expand);
233244

234245
auto power = Weights(ctx, at::ones_like(gamma_expand));
235-
auto scale_nd = ctx->net->addScaleNd(*div_out, nvinfer1::ScaleMode::kELEMENTWISE, beta_weights.data, gamma_weights.data, power.data, 1);
246+
auto scale_nd = ctx->net->addScaleNd(
247+
*div_out, nvinfer1::ScaleMode::kELEMENTWISE, beta_weights.data, gamma_weights.data, power.data, 1);
236248

237-
scale_nd->setName((util::node_info(n)+"_scale_nd").c_str());
249+
scale_nd->setName((util::node_info(n) + "_scale_nd").c_str());
238250
auto scale_nd_out = scale_nd->getOutput(0);
239251

240252
ctx->AssociateValueAndTensor(n->outputs()[0], scale_nd_out);

0 commit comments

Comments
 (0)