Skip to content

Commit c189747

Browse files
authored
Merge pull request #435 from borisfom/conv1d+pool
Implemented 1D conv, refactored pooling, 1D and special case for adap…
2 parents 72cb449 + b237311 commit c189747

File tree

10 files changed

+426
-405
lines changed

10 files changed

+426
-405
lines changed

core/conversion/converters/BUILD

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,12 @@ cc_library(
2929
cc_library(
3030
name = "converters",
3131
hdrs = [
32-
"converters.h"
32+
"converters.h",
33+
"converter_util.h",
3334
],
3435
srcs = [
3536
"NodeConverterRegistry.cpp",
37+
"converter_util.cpp",
3638
"impl/activation.cpp",
3739
"impl/batch_norm.cpp",
3840
"impl/concat.cpp",
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
#include "core/conversion/converters/converter_util.h"
2+
#include "core/conversion/converters/converters.h"
3+
#include "core/util/prelude.h"
4+
5+
namespace trtorch {
6+
namespace core {
7+
namespace conversion {
8+
namespace converters {
9+
10+
nvinfer1::ITensor* addPadding(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* tensor, int nDim, bool trailing, bool use_zeros) {
11+
const auto dims = tensor->getDimensions();
12+
13+
if (dims.nbDims < nDim) {
14+
auto newDims = dims;
15+
for (int dim = dims.nbDims; dim < nDim; ++dim) {
16+
newDims = util::unsqueezeDims(newDims, trailing ? dim : 0, 1, use_zeros);
17+
}
18+
19+
LOG_DEBUG("Original shape: " << dims << ", reshaping to: " << newDims);
20+
auto shuffle_layer = ctx->net->addShuffle(*tensor);
21+
TRTORCH_CHECK(shuffle_layer, "Unable to create shuffle layer");
22+
shuffle_layer->setReshapeDimensions(newDims);
23+
shuffle_layer->setZeroIsPlaceholder(use_zeros);
24+
shuffle_layer->setName((util::node_info(n) + " [Reshape to " + util::toStr(newDims) + ']').c_str());
25+
return shuffle_layer->getOutput(0);
26+
} else {
27+
return tensor;
28+
}
29+
}
30+
31+
nvinfer1::ITensor* addUnpadding(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* tensor, int nDim, bool trailing, bool use_zeros) {
32+
const auto dims = tensor->getDimensions();
33+
if (dims.nbDims > nDim) {
34+
auto newDims = dims;
35+
for (int dim = dims.nbDims; dim > nDim; --dim) {
36+
newDims = util::squeezeDims(newDims, trailing ? dim - 1 : 0);
37+
}
38+
LOG_DEBUG("Original shape: " << dims << ", reshaping to: " << newDims);
39+
auto shuffle_layer = ctx->net->addShuffle(*tensor);
40+
TRTORCH_CHECK(shuffle_layer, "Unable to create shuffle layer");
41+
shuffle_layer->setReshapeDimensions(newDims);
42+
shuffle_layer->setZeroIsPlaceholder(use_zeros);
43+
shuffle_layer->setName((util::node_info(n) + " [Reshape to " + util::toStr(newDims)).c_str() + ']');
44+
return shuffle_layer->getOutput(0);
45+
} else {
46+
return tensor;
47+
}
48+
}
49+
50+
} // namespace converters
51+
} // namespace conversion
52+
} // namespace core
53+
} // namespace trtorch
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
#pragma once
2+
3+
#include <map>
4+
#include <string>
5+
6+
#include "core/conversion/conversionctx/ConversionCtx.h"
7+
#include "core/conversion/converters/Weights.h"
8+
#include "core/conversion/var/Var.h"
9+
#include "core/util/prelude.h"
10+
11+
namespace trtorch {
12+
namespace core {
13+
namespace conversion {
14+
namespace converters {
15+
16+
// If nDim < tensor size, adds shuffle layer to pad tensor with 1s (at the end if trailing) and returns (nDim-dimensional) shuffle layer's output.
17+
// Otherwise, does nothing and passes tensor through.
18+
// use _zeros controls whether we should be using 0 instead of -1 on the shape.
19+
nvinfer1::ITensor* addPadding(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* tensor, int nDim, bool trailing=true, bool use_zeros=true);
20+
21+
// If nDim < tensor size, adds shuffle layer to un-pad tensor (at the end if trailing) and returns (nDim-dimensional) shuffle layer's output
22+
// Otherwise, does nothing and passes tensor through.
23+
// use _zeros controls whether we should be using 0 instead of -1 on the shape.
24+
nvinfer1::ITensor* addUnpadding(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* tensor, int nDim, bool trailing=true, bool use_zeros=true);
25+
26+
} // namespace converters
27+
} // namespace conversion
28+
} // namespace core
29+
} // namespace trtorch

core/conversion/converters/impl/batch_norm.cpp

Lines changed: 8 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
#include "core/conversion/converters/converter_util.h"
12
#include "core/conversion/converters/converters.h"
23
#include "core/util/prelude.h"
34
#include "torch/torch.h"
@@ -40,17 +41,11 @@ auto batch_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().
4041
LOG_DEBUG("training disregarded");
4142
LOG_DEBUG("cudnn disregarded");
4243

43-
auto should_unpack = util::toVec(orig_shape).size() < 4;
44-
if (should_unpack) {
45-
// expand spatial dims from 1D to 2D
46-
auto new_shape = util::toDimsTailPad(util::toVec(orig_shape), 4);
47-
LOG_DEBUG(
48-
"Input shape is less than 4D got: "
49-
<< orig_shape << ", inserting shuffle layer to reshape to 4D tensor shape: " << new_shape);
50-
auto in_shuffle = ctx->net->addShuffle(*input);
51-
in_shuffle->setReshapeDimensions(new_shape);
52-
in_shuffle->setName(std::string("[Reshape input to " + util::toStr(new_shape) + ']').c_str());
53-
input = in_shuffle->getOutput(0);
44+
// Expand spatial dims from 1D to 2D if needed
45+
bool expandDims = (orig_shape.nbDims < 4);
46+
47+
if (expandDims) {
48+
input = addPadding(ctx, n, input, 4);
5449
}
5550

5651
auto scale = gamma / torch::sqrt(var + eps);
@@ -63,16 +58,8 @@ auto batch_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().
6358
auto bn = ctx->net->addScaleNd(
6459
*input, nvinfer1::ScaleMode::kCHANNEL, bias_weights.data, scale_weights.data, power.data, 1);
6560
bn->setName(util::node_info(n).c_str());
66-
auto out_tensor = bn->getOutput(0);
67-
68-
if (should_unpack) {
69-
LOG_DEBUG("Inserting shuffle layer to reshape to back to original shape: " << orig_shape);
70-
auto out_shuffle = ctx->net->addShuffle(*out_tensor);
71-
out_shuffle->setReshapeDimensions(orig_shape);
72-
out_shuffle->setName(std::string("[Reshape output to " + util::toStr(orig_shape) + ']').c_str());
73-
out_tensor = out_shuffle->getOutput(0);
74-
}
75-
61+
// Un-pad bn output if needed
62+
auto out_tensor = addUnpadding(ctx, n, bn->getOutput(0), orig_shape.nbDims);
7663
ctx->AssociateValueAndTensor(n->outputs()[0], out_tensor);
7764
return true;
7865
}});

core/conversion/converters/impl/conv_deconv.cpp

Lines changed: 50 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
#include "torch/torch.h"
2-
1+
#include "core/conversion/converters/converter_util.h"
32
#include "core/conversion/converters/converters.h"
43
#include "core/util/prelude.h"
4+
#include "torch/torch.h"
55

66
namespace trtorch {
77
namespace core {
@@ -14,15 +14,49 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
1414
auto in = args[0].ITensor(); // assumes non-static input Tensor
1515
auto w = Weights(ctx, args[1].unwrapToTensor());
1616
auto stride = util::toDims(args[3].unwrapToIntList());
17-
LOG_DEBUG("stride: " << stride);
1817
auto padding = util::toDims(args[4].unwrapToIntList());
19-
LOG_DEBUG("padding: " << padding);
2018
auto dilation = util::toDims(args[5].unwrapToIntList());
21-
LOG_DEBUG("dilation: " << dilation);
2219
bool transposed = args[6].unwrapToBool();
2320
auto out_padding = util::toDims(args[7].unwrapToIntList());
24-
LOG_DEBUG("out_padding: " << out_padding);
2521
int64_t groups = args[8].unwrapToInt();
22+
23+
auto dims = in->getDimensions();
24+
auto orig_dims = dims;
25+
LOG_DEBUG("Original input dims: " << orig_dims);
26+
27+
// Expand spatial dims from 1D to 2D if needed
28+
bool expandDims = (orig_dims.nbDims < 4);
29+
if (expandDims) {
30+
in = addPadding(ctx, n, in, 4);
31+
dims = in->getDimensions();
32+
}
33+
if (w.shape.nbDims < 4) {
34+
for (int i = w.shape.nbDims; i < 4; ++i) {
35+
w.shape.d[i] = 1;
36+
}
37+
w.shape.nbDims = 4;
38+
w.kernel_shape.nbDims = 2;
39+
w.kernel_shape.d[1] = 1;
40+
}
41+
if (stride.nbDims==1) {
42+
stride = util::unsqueezeDims(stride, 1, 1);
43+
}
44+
if (dilation.nbDims==1) {
45+
dilation = util::unsqueezeDims(dilation, 1, 1);
46+
}
47+
if (padding.nbDims==1) {
48+
padding = util::unsqueezeDims(padding, 1, 0);
49+
}
50+
if (out_padding.nbDims==1) {
51+
out_padding = util::unsqueezeDims(out_padding, 1, 0);
52+
}
53+
54+
LOG_DEBUG("Input dims: " << dims);
55+
LOG_DEBUG("Weights: " << w);
56+
LOG_DEBUG("stride: " << stride);
57+
LOG_DEBUG("padding: " << padding);
58+
LOG_DEBUG("dilation: " << dilation);
59+
LOG_DEBUG("out_padding: " << out_padding);
2660
LOG_DEBUG("groups: " << groups);
2761

2862
nvinfer1::ILayer* new_layer;
@@ -31,12 +65,11 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
3165
if (args[2].IValue()->isTensor()) {
3266
bias = Weights(ctx, args[2].unwrapToTensor());
3367
} else {
34-
bias = Weights(ctx, torch::zeros(args[1].unwrapToTensor().sizes()[1] * groups));
68+
bias = Weights(ctx, torch::zeros(w.shape.d[1] * groups));
3569
}
3670

3771
// shape of deconvolution's weight: [in, out/groups, ...]
38-
auto deconv = ctx->net->addDeconvolutionNd(
39-
*in, args[1].unwrapToTensor().sizes()[1] * groups, w.kernel_shape, w.data, bias.data);
72+
auto deconv = ctx->net->addDeconvolutionNd(*in, w.shape.d[1] * groups, w.kernel_shape, w.data, bias.data);
4073
TRTORCH_CHECK(deconv, "Unable to create deconvolution layer from node: " << *n);
4174

4275
deconv->setStrideNd(stride);
@@ -56,11 +89,11 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
5689
if (args[2].IValue()->isTensor()) {
5790
bias = Weights(ctx, args[2].unwrapToTensor());
5891
} else {
59-
bias = Weights(ctx, torch::zeros(args[1].unwrapToTensor().sizes()[0]));
92+
bias = Weights(ctx, torch::zeros(w.shape.d[0]));
6093
}
6194

6295
// shape of convolution's weight: [out, in/groups, ...]
63-
auto conv = ctx->net->addConvolutionNd(*in, args[1].unwrapToTensor().sizes()[0], w.kernel_shape, w.data, bias.data);
96+
auto conv = ctx->net->addConvolutionNd(*in, w.shape.d[0], w.kernel_shape, w.data, bias.data);
6497
TRTORCH_CHECK(conv, "Unable to create convolution layer from node: " << *n);
6598

6699
conv->setStrideNd(stride);
@@ -71,9 +104,13 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
71104
conv->setNbGroups(groups);
72105
new_layer = conv;
73106
}
107+
74108
new_layer->setName(util::node_info(n).c_str());
75-
76-
auto out = ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
109+
110+
// Un-expand spatial dims back to 1D if needed
111+
auto out = addUnpadding(ctx, n, new_layer->getOutput(0), orig_dims.nbDims);
112+
113+
ctx->AssociateValueAndTensor(n->outputs()[0], out);
77114

78115
LOG_DEBUG("Output tensor shape: " << out->getDimensions());
79116

0 commit comments

Comments
 (0)