Skip to content

Commit e1ba7c0

Browse files
committed
Implemented 1D conv, refactored pooling, 1D and special case for adaptive pooling
Signed-off-by: Boris Fomitchev <[email protected]>
1 parent 72cb449 commit e1ba7c0

File tree

11 files changed

+422
-393
lines changed

11 files changed

+422
-393
lines changed

core/conversion/converters/BUILD

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,12 @@ cc_library(
2929
cc_library(
3030
name = "converters",
3131
hdrs = [
32-
"converters.h"
32+
"converters.h",
33+
"converter_util.h",
3334
],
3435
srcs = [
3536
"NodeConverterRegistry.cpp",
37+
"converter_util.cpp",
3638
"impl/activation.cpp",
3739
"impl/batch_norm.cpp",
3840
"impl/concat.cpp",
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
#include "core/conversion/converters/converter_util.h"
2+
#include "core/conversion/converters/converters.h"
3+
#include "core/util/prelude.h"
4+
5+
namespace trtorch {
6+
namespace core {
7+
namespace conversion {
8+
namespace converters {
9+
10+
nvinfer1::ILayer* addPaddingLayer(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* tensor, int nDim, bool trailing, bool use_zeros) {
11+
const auto dims = tensor->getDimensions();
12+
13+
if (dims.nbDims < nDim) {
14+
auto newDims = dims;
15+
for (int dim = dims.nbDims; dim < nDim; ++dim)
16+
newDims = util::unsqueezeDims(newDims, trailing ? dim : 0, 1, use_zeros);
17+
LOG_DEBUG(
18+
"Input shape is less than 4D got: " << dims
19+
<< ", inserting shuffle layer to reshape to 4D tensor shape: " << newDims);
20+
21+
LOG_DEBUG("Original shape: " << dims << ", reshaping to: " << newDims);
22+
auto shuffle_layer = ctx->net->addShuffle(*tensor);
23+
TRTORCH_CHECK(shuffle_layer, "Unable to create shuffle layer");
24+
shuffle_layer->setReshapeDimensions(newDims);
25+
shuffle_layer->setZeroIsPlaceholder(use_zeros);
26+
shuffle_layer->setName((util::node_info(n) + " [Reshape to " + util::toStr(newDims) + ']').c_str());
27+
return shuffle_layer;
28+
} else
29+
return nullptr;
30+
}
31+
32+
nvinfer1::ILayer* addUnpaddingLayer(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* tensor, int nDim, bool trailing, bool use_zeros) {
33+
const auto dims = tensor->getDimensions();
34+
if (dims.nbDims > nDim) {
35+
auto newDims = dims;
36+
for (int dim = dims.nbDims; dim > nDim; --dim)
37+
newDims = util::squeezeDims(newDims, trailing ? dim - 1 : 0);
38+
LOG_DEBUG("Original shape: " << dims << ", reshaping to: " << newDims);
39+
auto shuffle_layer = ctx->net->addShuffle(*tensor);
40+
TRTORCH_CHECK(shuffle_layer, "Unable to create shuffle layer");
41+
shuffle_layer->setReshapeDimensions(newDims);
42+
shuffle_layer->setZeroIsPlaceholder(use_zeros);
43+
shuffle_layer->setName((util::node_info(n) + " [Reshape to " + util::toStr(newDims)).c_str() + ']');
44+
return shuffle_layer;
45+
} else
46+
return nullptr;
47+
}
48+
49+
} // namespace converters
50+
} // namespace conversion
51+
} // namespace core
52+
} // namespace trtorch
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
#pragma once
2+
3+
#include <map>
4+
#include <string>
5+
6+
#include "core/conversion/conversionctx/ConversionCtx.h"
7+
#include "core/conversion/converters/Weights.h"
8+
#include "core/conversion/var/Var.h"
9+
#include "core/util/prelude.h"
10+
11+
namespace trtorch {
12+
namespace core {
13+
namespace conversion {
14+
namespace converters {
15+
nvinfer1::ILayer* addPaddingLayer(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* tensor, int nDim, bool trailing=true, bool use_zeros=true);
16+
nvinfer1::ILayer* addUnpaddingLayer(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* tensor, int nDim, bool trailing=true, bool use_zeros=true);
17+
} // namespace converters
18+
} // namespace conversion
19+
} // namespace core
20+
} // namespace trtorch

core/conversion/converters/impl/batch_norm.cpp

Lines changed: 7 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
#include "core/conversion/converters/converter_util.h"
12
#include "core/conversion/converters/converters.h"
23
#include "core/util/prelude.h"
34
#include "torch/torch.h"
@@ -40,17 +41,9 @@ auto batch_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().
4041
LOG_DEBUG("training disregarded");
4142
LOG_DEBUG("cudnn disregarded");
4243

43-
auto should_unpack = util::toVec(orig_shape).size() < 4;
44-
if (should_unpack) {
45-
// expand spatial dims from 1D to 2D
46-
auto new_shape = util::toDimsTailPad(util::toVec(orig_shape), 4);
47-
LOG_DEBUG(
48-
"Input shape is less than 4D got: "
49-
<< orig_shape << ", inserting shuffle layer to reshape to 4D tensor shape: " << new_shape);
50-
auto in_shuffle = ctx->net->addShuffle(*input);
51-
in_shuffle->setReshapeDimensions(new_shape);
52-
in_shuffle->setName(std::string("[Reshape input to " + util::toStr(new_shape) + ']').c_str());
53-
input = in_shuffle->getOutput(0);
44+
auto expandDims = addPaddingLayer(ctx, n, input, 4);
45+
if (expandDims) {
46+
input = expandDims->getOutput(0);
5447
}
5548

5649
auto scale = gamma / torch::sqrt(var + eps);
@@ -65,14 +58,11 @@ auto batch_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().
6558
bn->setName(util::node_info(n).c_str());
6659
auto out_tensor = bn->getOutput(0);
6760

68-
if (should_unpack) {
61+
if (expandDims) {
6962
LOG_DEBUG("Inserting shuffle layer to reshape to back to original shape: " << orig_shape);
70-
auto out_shuffle = ctx->net->addShuffle(*out_tensor);
71-
out_shuffle->setReshapeDimensions(orig_shape);
72-
out_shuffle->setName(std::string("[Reshape output to " + util::toStr(orig_shape) + ']').c_str());
73-
out_tensor = out_shuffle->getOutput(0);
63+
auto new_layer = addUnpaddingLayer(ctx, n, out_tensor, orig_shape.nbDims);
64+
out_tensor = new_layer->getOutput(0);
7465
}
75-
7666
ctx->AssociateValueAndTensor(n->outputs()[0], out_tensor);
7767
return true;
7868
}});

core/conversion/converters/impl/conv_deconv.cpp

Lines changed: 46 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
#include "torch/torch.h"
2-
1+
#include "core/conversion/converters/converter_util.h"
32
#include "core/conversion/converters/converters.h"
43
#include "core/util/prelude.h"
4+
#include "torch/torch.h"
55

66
namespace trtorch {
77
namespace core {
@@ -14,15 +14,46 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
1414
auto in = args[0].ITensor(); // assumes non-static input Tensor
1515
auto w = Weights(ctx, args[1].unwrapToTensor());
1616
auto stride = util::toDims(args[3].unwrapToIntList());
17-
LOG_DEBUG("stride: " << stride);
1817
auto padding = util::toDims(args[4].unwrapToIntList());
19-
LOG_DEBUG("padding: " << padding);
2018
auto dilation = util::toDims(args[5].unwrapToIntList());
21-
LOG_DEBUG("dilation: " << dilation);
2219
bool transposed = args[6].unwrapToBool();
2320
auto out_padding = util::toDims(args[7].unwrapToIntList());
24-
LOG_DEBUG("out_padding: " << out_padding);
2521
int64_t groups = args[8].unwrapToInt();
22+
23+
auto dims = in->getDimensions();
24+
auto orig_dims = dims;
25+
LOG_DEBUG("Original input dims: " << orig_dims);
26+
27+
// Expand spatial dims from 1D to 2D if needed
28+
auto expandDims = addPaddingLayer(ctx, n, in, 4);
29+
if (expandDims) {
30+
auto tensorPtr = expandDims->getOutput(0);
31+
assert(tensorPtr);
32+
dims = tensorPtr->getDimensions();
33+
in = tensorPtr;
34+
}
35+
if (w.shape.nbDims < 4) {
36+
for (int i = w.shape.nbDims; i < 4; ++i)
37+
w.shape.d[i] = 1;
38+
w.shape.nbDims = 4;
39+
w.kernel_shape.nbDims = 2;
40+
w.kernel_shape.d[1] = 1;
41+
}
42+
if (stride.nbDims==1)
43+
stride = util::unsqueezeDims(stride, 1, 1);
44+
if (dilation.nbDims==1)
45+
dilation = util::unsqueezeDims(dilation, 1, 1);
46+
if (padding.nbDims==1)
47+
padding = util::unsqueezeDims(padding, 1, 0);
48+
if (out_padding.nbDims==1)
49+
out_padding = util::unsqueezeDims(out_padding, 1, 0);
50+
51+
LOG_DEBUG("Input dims: " << dims);
52+
LOG_DEBUG("Weights: " << w);
53+
LOG_DEBUG("stride: " << stride);
54+
LOG_DEBUG("padding: " << padding);
55+
LOG_DEBUG("dilation: " << dilation);
56+
LOG_DEBUG("out_padding: " << out_padding);
2657
LOG_DEBUG("groups: " << groups);
2758

2859
nvinfer1::ILayer* new_layer;
@@ -31,12 +62,11 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
3162
if (args[2].IValue()->isTensor()) {
3263
bias = Weights(ctx, args[2].unwrapToTensor());
3364
} else {
34-
bias = Weights(ctx, torch::zeros(args[1].unwrapToTensor().sizes()[1] * groups));
65+
bias = Weights(ctx, torch::zeros(w.shape.d[1] * groups));
3566
}
3667

3768
// shape of deconvolution's weight: [in, out/groups, ...]
38-
auto deconv = ctx->net->addDeconvolutionNd(
39-
*in, args[1].unwrapToTensor().sizes()[1] * groups, w.kernel_shape, w.data, bias.data);
69+
auto deconv = ctx->net->addDeconvolutionNd(*in, w.shape.d[1] * groups, w.kernel_shape, w.data, bias.data);
4070
TRTORCH_CHECK(deconv, "Unable to create deconvolution layer from node: " << *n);
4171

4272
deconv->setStrideNd(stride);
@@ -56,11 +86,11 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
5686
if (args[2].IValue()->isTensor()) {
5787
bias = Weights(ctx, args[2].unwrapToTensor());
5888
} else {
59-
bias = Weights(ctx, torch::zeros(args[1].unwrapToTensor().sizes()[0]));
89+
bias = Weights(ctx, torch::zeros(w.shape.d[0]));
6090
}
6191

6292
// shape of convolution's weight: [out, in/groups, ...]
63-
auto conv = ctx->net->addConvolutionNd(*in, args[1].unwrapToTensor().sizes()[0], w.kernel_shape, w.data, bias.data);
93+
auto conv = ctx->net->addConvolutionNd(*in, w.shape.d[0], w.kernel_shape, w.data, bias.data);
6494
TRTORCH_CHECK(conv, "Unable to create convolution layer from node: " << *n);
6595

6696
conv->setStrideNd(stride);
@@ -73,6 +103,11 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
73103
}
74104
new_layer->setName(util::node_info(n).c_str());
75105

106+
if (expandDims) {
107+
// Un-expand spatial dims back to 1D
108+
new_layer = addUnpaddingLayer(ctx, n, new_layer->getOutput(0), orig_dims.nbDims);
109+
}
110+
76111
auto out = ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
77112

78113
LOG_DEBUG("Output tensor shape: " << out->getDimensions());

0 commit comments

Comments
 (0)