Skip to content

Commit baaac38

Browse files
committed
refactor: Applying linting
Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent c189747 commit baaac38

File tree

7 files changed

+122
-72
lines changed

7 files changed

+122
-72
lines changed

core/conversion/converters/converter_util.cpp

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,13 @@ namespace core {
77
namespace conversion {
88
namespace converters {
99

10-
nvinfer1::ITensor* addPadding(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* tensor, int nDim, bool trailing, bool use_zeros) {
10+
nvinfer1::ITensor* addPadding(
11+
ConversionCtx* ctx,
12+
const torch::jit::Node* n,
13+
nvinfer1::ITensor* tensor,
14+
int nDim,
15+
bool trailing,
16+
bool use_zeros) {
1117
const auto dims = tensor->getDimensions();
1218

1319
if (dims.nbDims < nDim) {
@@ -28,7 +34,13 @@ nvinfer1::ITensor* addPadding(ConversionCtx* ctx, const torch::jit::Node* n, nvi
2834
}
2935
}
3036

31-
nvinfer1::ITensor* addUnpadding(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* tensor, int nDim, bool trailing, bool use_zeros) {
37+
nvinfer1::ITensor* addUnpadding(
38+
ConversionCtx* ctx,
39+
const torch::jit::Node* n,
40+
nvinfer1::ITensor* tensor,
41+
int nDim,
42+
bool trailing,
43+
bool use_zeros) {
3244
const auto dims = tensor->getDimensions();
3345
if (dims.nbDims > nDim) {
3446
auto newDims = dims;

core/conversion/converters/converter_util.h

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,29 @@ namespace trtorch {
1212
namespace core {
1313
namespace conversion {
1414
namespace converters {
15-
16-
// If nDim < tensor size, adds shuffle layer to pad tensor with 1s (at the end if trailing) and returns (nDim-dimensional) shuffle layer's output.
17-
// Otherwise, does nothing and passes tensor through.
18-
// use _zeros controls whether we should be using 0 instead of -1 on the shape.
19-
nvinfer1::ITensor* addPadding(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* tensor, int nDim, bool trailing=true, bool use_zeros=true);
20-
21-
// If nDim < tensor size, adds shuffle layer to un-pad tensor (at the end if trailing) and returns (nDim-dimensional) shuffle layer's output
22-
// Otherwise, does nothing and passes tensor through.
23-
// use _zeros controls whether we should be using 0 instead of -1 on the shape.
24-
nvinfer1::ITensor* addUnpadding(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* tensor, int nDim, bool trailing=true, bool use_zeros=true);
25-
15+
16+
// If nDim < tensor size, adds shuffle layer to pad tensor with 1s (at the end if trailing) and returns
17+
// (nDim-dimensional) shuffle layer's output. Otherwise, does nothing and passes tensor through. use _zeros controls
18+
// whether we should be using 0 instead of -1 on the shape.
19+
nvinfer1::ITensor* addPadding(
20+
ConversionCtx* ctx,
21+
const torch::jit::Node* n,
22+
nvinfer1::ITensor* tensor,
23+
int nDim,
24+
bool trailing = true,
25+
bool use_zeros = true);
26+
27+
// If nDim < tensor size, adds shuffle layer to un-pad tensor (at the end if trailing) and returns (nDim-dimensional)
28+
// shuffle layer's output Otherwise, does nothing and passes tensor through. use _zeros controls whether we should be
29+
// using 0 instead of -1 on the shape.
30+
nvinfer1::ITensor* addUnpadding(
31+
ConversionCtx* ctx,
32+
const torch::jit::Node* n,
33+
nvinfer1::ITensor* tensor,
34+
int nDim,
35+
bool trailing = true,
36+
bool use_zeros = true);
37+
2638
} // namespace converters
2739
} // namespace conversion
2840
} // namespace core

core/conversion/converters/impl/batch_norm.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ auto batch_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().
4343

4444
// Expand spatial dims from 1D to 2D if needed
4545
bool expandDims = (orig_shape.nbDims < 4);
46-
46+
4747
if (expandDims) {
4848
input = addPadding(ctx, n, input, 4);
4949
}

core/conversion/converters/impl/conv_deconv.cpp

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,20 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
2222

2323
auto dims = in->getDimensions();
2424
auto orig_dims = dims;
25-
LOG_DEBUG("Original input dims: " << orig_dims);
25+
LOG_DEBUG("Input dims: " << orig_dims);
26+
LOG_DEBUG("Weights: " << w);
27+
LOG_DEBUG("stride: " << stride);
28+
LOG_DEBUG("padding: " << padding);
29+
LOG_DEBUG("dilation: " << dilation);
30+
LOG_DEBUG("out_padding: " << out_padding);
31+
LOG_DEBUG("groups: " << groups);
2632

2733
// Expand spatial dims from 1D to 2D if needed
2834
bool expandDims = (orig_dims.nbDims < 4);
2935
if (expandDims) {
3036
in = addPadding(ctx, n, in, 4);
3137
dims = in->getDimensions();
38+
LOG_DEBUG("Reshaped Input dims: " << dims);
3239
}
3340
if (w.shape.nbDims < 4) {
3441
for (int i = w.shape.nbDims; i < 4; ++i) {
@@ -37,27 +44,24 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
3744
w.shape.nbDims = 4;
3845
w.kernel_shape.nbDims = 2;
3946
w.kernel_shape.d[1] = 1;
47+
LOG_DEBUG("Reshaped Weights: " << w);
4048
}
41-
if (stride.nbDims==1) {
49+
if (stride.nbDims == 1) {
4250
stride = util::unsqueezeDims(stride, 1, 1);
51+
LOG_DEBUG("Reshaped stride: " << stride);
4352
}
44-
if (dilation.nbDims==1) {
53+
if (dilation.nbDims == 1) {
4554
dilation = util::unsqueezeDims(dilation, 1, 1);
55+
LOG_DEBUG("Reshaped dilation: " << dilation);
4656
}
47-
if (padding.nbDims==1) {
57+
if (padding.nbDims == 1) {
4858
padding = util::unsqueezeDims(padding, 1, 0);
59+
LOG_DEBUG("Reshaped padding: " << padding);
4960
}
50-
if (out_padding.nbDims==1) {
61+
if (out_padding.nbDims == 1) {
5162
out_padding = util::unsqueezeDims(out_padding, 1, 0);
63+
LOG_DEBUG("Reshaped out_padding: " << out_padding);
5264
}
53-
54-
LOG_DEBUG("Input dims: " << dims);
55-
LOG_DEBUG("Weights: " << w);
56-
LOG_DEBUG("stride: " << stride);
57-
LOG_DEBUG("padding: " << padding);
58-
LOG_DEBUG("dilation: " << dilation);
59-
LOG_DEBUG("out_padding: " << out_padding);
60-
LOG_DEBUG("groups: " << groups);
6165

6266
nvinfer1::ILayer* new_layer;
6367
if (transposed) {
@@ -104,12 +108,12 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
104108
conv->setNbGroups(groups);
105109
new_layer = conv;
106110
}
107-
111+
108112
new_layer->setName(util::node_info(n).c_str());
109-
113+
110114
// Un-expand spatial dims back to 1D if needed
111115
auto out = addUnpadding(ctx, n, new_layer->getOutput(0), orig_dims.nbDims);
112-
116+
113117
ctx->AssociateValueAndTensor(n->outputs()[0], out);
114118

115119
LOG_DEBUG("Output tensor shape: " << out->getDimensions());

core/conversion/converters/impl/pooling.cpp

Lines changed: 47 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,20 @@ namespace converters {
1010
namespace impl {
1111
namespace {
1212

13-
bool GlobalPoolingConverter(ConversionCtx* ctx, const torch::jit::Node* n, args& args, nvinfer1::PoolingType pool_type)
14-
{
13+
bool GlobalPoolingConverter(
14+
ConversionCtx* ctx,
15+
const torch::jit::Node* n,
16+
args& args,
17+
nvinfer1::PoolingType pool_type) {
1518
auto in = args[0].ITensorOrFreeze(ctx);
1619
nvinfer1::Dims dims = in->getDimensions();
1720
// Generate a bitmask of all 1s except the last 2 bits (N and C axes)
1821
uint32_t reduceAxes = ((1 << dims.nbDims) - 1) & ~0b11;
19-
auto* new_layer = ctx->net->addReduce(*in, pool_type == nvinfer1::PoolingType::kMAX ? nvinfer1::ReduceOperation::kMAX : nvinfer1::ReduceOperation::kAVG , reduceAxes, /*keepDimensions=*/true);
22+
auto* new_layer = ctx->net->addReduce(
23+
*in,
24+
pool_type == nvinfer1::PoolingType::kMAX ? nvinfer1::ReduceOperation::kMAX : nvinfer1::ReduceOperation::kAVG,
25+
reduceAxes,
26+
/*keepDimensions=*/true);
2027

2128
new_layer->setName(util::node_info(n).c_str());
2229

@@ -26,26 +33,31 @@ bool GlobalPoolingConverter(ConversionCtx* ctx, const torch::jit::Node* n, args&
2633
return true;
2734
}
2835

29-
bool AdaptivePoolingConverter(ConversionCtx* ctx, const torch::jit::Node* n, args& args, nvinfer1::PoolingType pool_type) {
36+
bool AdaptivePoolingConverter(
37+
ConversionCtx* ctx,
38+
const torch::jit::Node* n,
39+
args& args,
40+
nvinfer1::PoolingType pool_type) {
3041
auto in = args[0].ITensorOrFreeze(ctx);
3142
auto out_size = util::toDims(args[1].unwrapToIntList());
3243

3344
// Corner case: when out dimension is all ones, replace with simpler operation
34-
if (out_size.d[0] == 1 && (out_size.nbDims < 2 || out_size.d[1] == 1 ) && (out_size.nbDims < 3 || out_size.d[2] == 1 )) {
45+
if (out_size.d[0] == 1 && (out_size.nbDims < 2 || out_size.d[1] == 1) &&
46+
(out_size.nbDims < 3 || out_size.d[2] == 1)) {
3547
return GlobalPoolingConverter(ctx, n, args, pool_type);
3648
}
3749

3850
auto orig_dims = in->getDimensions();
39-
bool expandDims = (orig_dims.nbDims < 4);
40-
51+
bool expandDims = (orig_dims.nbDims < 4);
52+
4153
if (expandDims) {
4254
in = addPadding(ctx, n, in, 4, false, false);
4355
}
44-
56+
4557
if (out_size.nbDims == 1) {
4658
out_size = util::unsqueezeDims(out_size, 0, 1);
4759
}
48-
60+
4961
auto in_shape = util::toVec(in->getDimensions());
5062
nvinfer1::ILayer* new_layer = nullptr;
5163

@@ -57,29 +69,37 @@ bool AdaptivePoolingConverter(ConversionCtx* ctx, const torch::jit::Node* n, arg
5769
LOG_WARNING(
5870
"Adaptive pooling layer will be run through ATen (on CPU), via not TensorRT, performace will suffer. Consider switching either to static input shape or moving to non adaptive pooling");
5971
#endif
60-
61-
TRTORCH_CHECK(pool_type == nvinfer1::PoolingType::kAVERAGE,
62-
"Unable to create MAX pooling (interpolation) plugin from node" << *n);
72+
73+
TRTORCH_CHECK(
74+
pool_type == nvinfer1::PoolingType::kAVERAGE,
75+
"Unable to create MAX pooling (interpolation) plugin from node" << *n);
6376

6477
auto out_shape = in_shape;
6578
std::copy_n(out_size.d, out_size.nbDims, out_shape.begin() + (in_shape.size() - out_size.nbDims));
6679

6780
auto creator = new plugins::InterpolatePluginCreator();
68-
auto plugin = creator->createPlugin("adaptive_pool2d", in_shape, out_shape,
69-
util::toVec(out_size), {}, std::string("adaptive_pool2d"), false, false);
81+
auto plugin = creator->createPlugin(
82+
"adaptive_pool2d",
83+
in_shape,
84+
out_shape,
85+
util::toVec(out_size),
86+
{},
87+
std::string("adaptive_pool2d"),
88+
false,
89+
false);
7090

7191
new_layer = ctx->net->addPluginV2(reinterpret_cast<nvinfer1::ITensor* const*>(&in), 1, *plugin);
7292
TRTORCH_CHECK(new_layer, "Unable to create pooling (interpolation) plugin from node" << *n);
7393

7494
} else {
7595
std::vector<int64_t> stride(out_size.nbDims);
76-
for (size_t i = 0; i < out_size.nbDims; i++) {
96+
for (int64_t i = 0; i < out_size.nbDims; i++) {
7797
stride[(stride.size() - 1) - i] = in_shape[(in_shape.size() - 1) - i] / out_size.d[(out_size.nbDims - 1) - i];
7898
}
7999
LOG_DEBUG("Stride: " << util::toDims(stride));
80100

81101
std::vector<int64_t> window(out_size.nbDims);
82-
for (size_t i = 0; i < out_size.nbDims; i++) {
102+
for (int64_t i = 0; i < out_size.nbDims; i++) {
83103
window[window.size() - 1 - i] =
84104
in_shape[in_shape.size() - 1 - i] - (out_size.d[out_size.nbDims - 1 - i] - 1) * stride[stride.size() - 1 - i];
85105
}
@@ -92,18 +112,18 @@ bool AdaptivePoolingConverter(ConversionCtx* ctx, const torch::jit::Node* n, arg
92112
new_layer = pooling_layer;
93113
}
94114

95-
new_layer->setName(util::node_info(n).c_str());
115+
new_layer->setName(util::node_info(n).c_str());
96116
auto layer_output = addUnpadding(ctx, n, new_layer->getOutput(0), orig_dims.nbDims, false, false);
97117

98118
ctx->AssociateValueAndTensor(n->outputs()[0], layer_output);
99119
LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions());
100120

101121
return true;
102122
}
103-
123+
104124
bool PoolingConverter(ConversionCtx* ctx, const torch::jit::Node* n, args& args, nvinfer1::PoolingType pool_type) {
105125
auto in = args[0].ITensorOrFreeze(ctx);
106-
126+
107127
// Max Pool needs at least 4D input
108128
auto orig_dims = in->getDimensions();
109129
bool expandDims = (orig_dims.nbDims < 4);
@@ -131,7 +151,7 @@ bool PoolingConverter(ConversionCtx* ctx, const torch::jit::Node* n, args& args,
131151
if (stride.nbDims == 1) {
132152
stride = util::unsqueezeDims(stride, 0, 1);
133153
}
134-
154+
135155
LOG_DEBUG("kernel_size: " << kernel_size);
136156
LOG_DEBUG("padding: " << padding);
137157
LOG_DEBUG("stride: " << stride);
@@ -165,20 +185,20 @@ bool PoolingConverter(ConversionCtx* ctx, const torch::jit::Node* n, args& args,
165185

166186
auto padding_mode =
167187
ceil_mode ? nvinfer1::PaddingMode::kEXPLICIT_ROUND_UP : nvinfer1::PaddingMode::kEXPLICIT_ROUND_DOWN;
168-
188+
169189
new_layer->setName(util::node_info(n).c_str());
170190
new_layer->setPaddingMode(padding_mode);
171191
new_layer->setPaddingNd(padding);
172192
new_layer->setStrideNd(stride);
173-
193+
174194
if (stride.nbDims != 2 && ctx->settings.device.device_type == nvinfer1::DeviceType::kDLA) {
175195
if (!ctx->settings.device.allow_gpu_fallback) {
176196
TRTORCH_THROW_ERROR("DLA Pooling stride is limited to 2D, allow GPU fallback");
177197
} else {
178198
LOG_WARNING("DLA Pooling stride is limited to 2D, will run on GPU");
179199
}
180200
}
181-
201+
182202
auto out_tensor = addUnpadding(ctx, n, new_layer->getOutput(0), orig_dims.nbDims, false, true);
183203
ctx->AssociateValueAndTensor(n->outputs()[0], out_tensor);
184204

@@ -220,12 +240,12 @@ auto pooling_registrations TRTORCH_UNUSED =
220240
}})
221241
.pattern({"aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> (Tensor)",
222242
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
223-
return AdaptivePoolingConverter(ctx, n, args, nvinfer1::PoolingType::kAVERAGE);
224-
}})
243+
return AdaptivePoolingConverter(ctx, n, args, nvinfer1::PoolingType::kAVERAGE);
244+
}})
225245
.pattern({"aten::adaptive_avg_pool2d(Tensor self, int[2] output_size) -> (Tensor)",
226246
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
227-
return AdaptivePoolingConverter(ctx, n, args, nvinfer1::PoolingType::kAVERAGE);
228-
}});
247+
return AdaptivePoolingConverter(ctx, n, args, nvinfer1::PoolingType::kAVERAGE);
248+
}});
229249
} // namespace
230250
} // namespace impl
231251
} // namespace converters

core/util/trt_util.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -167,9 +167,9 @@ nvinfer1::Dims unsqueezeDims(const nvinfer1::Dims& d, int pos, int val, bool use
167167
nvinfer1::Dims dims;
168168
for (int i = 0, j = 0; j <= d.nbDims; j++) {
169169
// add new dimension at pos
170-
if (j == pos)
170+
if (j == pos) {
171171
dims.d[j] = val;
172-
else {
172+
} else {
173173
dims.d[j] = (use_zeros && d.d[i] == -1) ? 0 : d.d[i];
174174
++i;
175175
}
@@ -187,8 +187,9 @@ nvinfer1::Dims squeezeDims(const nvinfer1::Dims& d, int pos, bool use_zeros) {
187187
nvinfer1::Dims dims;
188188
int j = 0;
189189
for (int i = 0; i < d.nbDims; i++) {
190-
if (i != pos)
190+
if (i != pos) {
191191
dims.d[j++] = (use_zeros && d.d[i] == -1) ? 0 : d.d[i];
192+
}
192193
}
193194
dims.nbDims = j;
194195

tests/core/conversion/converters/test_pooling.cpp

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -463,26 +463,27 @@ TEST(Converters, ATenAdaptiveAvgPool2DConvertsCorrectlyWithDynamicInput) {
463463
}
464464

465465
TEST(Converters, ATenAdaptiveAvgPool1DConvertsCorrectly) {
466-
const auto graph = R"IR(
466+
const auto graph =
467+
R"IR(
467468
graph(%0 : Tensor):
468469
%2 : int = prim::Constant[value=1]()
469470
%6 : int[] = prim::ListConstruct(%2)
470471
%10 : Tensor = aten::adaptive_avg_pool1d(%0, %6)
471472
return (%10))IR";
472473

473-
auto g = std::make_shared<torch::jit::Graph>();
474-
torch::jit::parseIR(graph, g.get());
474+
auto g = std::make_shared<torch::jit::Graph>();
475+
torch::jit::parseIR(graph, g.get());
475476

476-
// PyTorch MaxPool needs a 3D input
477-
auto in = at::randint(-5, 5, {1, 3, 16}, at::kCUDA);
477+
// PyTorch MaxPool needs a 3D input
478+
auto in = at::randint(-5, 5, {1, 3, 16}, at::kCUDA);
478479

479-
auto jit_in = at::clone(in);
480-
auto params = trtorch::core::conversion::get_named_params(g->inputs(), {});
481-
auto jit_results = trtorch::tests::util::RunGraph(g, params, {jit_in});
480+
auto jit_in = at::clone(in);
481+
auto params = trtorch::core::conversion::get_named_params(g->inputs(), {});
482+
auto jit_results = trtorch::tests::util::RunGraph(g, params, {jit_in});
482483

483-
auto trt_in = at::clone(in);
484-
params = trtorch::core::conversion::get_named_params(g->inputs(), {});
485-
auto trt_results = trtorch::tests::util::RunGraphEngine(g, params, {trt_in});
484+
auto trt_in = at::clone(in);
485+
params = trtorch::core::conversion::get_named_params(g->inputs(), {});
486+
auto trt_results = trtorch::tests::util::RunGraphEngine(g, params, {trt_in});
486487

487-
ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[0], trt_results[0], 1.0));
488+
ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[0], trt_results[0], 1.0));
488489
}

0 commit comments

Comments
 (0)