Skip to content

Commit 8ad8fa7

Browse files
committed
refactor(//core/conversion/converters): Removing a bunch of boilerplate
and add some checking Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 259aa4c commit 8ad8fa7

File tree

12 files changed

+136
-144
lines changed

12 files changed

+136
-144
lines changed

.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,4 +13,4 @@ experiments/
1313
py/build/
1414
py/tmp/
1515
py/.eggs
16-
16+
.vscode/

core/conversion/converters/converters.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,12 @@ inline nvinfer1::ITensor* tensor_to_const(ConversionCtx* ctx, at::Tensor t) {
115115
return ctx->net->addConstant(t_weights.shape, t_weights.data)->getOutput(0);
116116
}
117117

118+
inline nvinfer1::ITensor* associate_value_and_tensor(ConversionCtx* ctx, const torch::jit::Value* value, nvinfer1::ITensor* tensor) {
119+
tensor->setName(value->debugName().c_str());
120+
ctx->value_tensor_map[value] = tensor;
121+
return tensor;
122+
}
123+
118124
} // namespace converters
119125
} // namespace conversion
120126
} // namespace core

core/conversion/converters/impl/activation.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,9 @@ namespace {
1818
"Unable to create " #act " layer from node: " << *n); \
1919
\
2020
new_layer->setName(util::node_info(n).c_str()); \
21-
auto out_value = n->outputs()[0]; \
22-
auto out_tensor = new_layer->getOutput(0); \
23-
out_tensor->setName(out_value->debugName().c_str()); \
24-
ctx->value_tensor_map[out_value] = out_tensor; \
25-
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); \
21+
associate_value_and_tensor(ctx, n->outputs()[0], new_layer->getOutput(0)); \
22+
LOG_DEBUG("Output tensor shape: " \
23+
<< new_layer->getOutput(0)->getDimensions()); \
2624
\
2725
return true; \
2826
} \
@@ -36,6 +34,8 @@ namespace {
3634
[](ConversionCtx *ctx, const torch::jit::Node *n, \
3735
args &args) -> bool { return act(ctx, n, args); }});
3836

37+
//TODO: remove support for conversion of implace operators and move to the functionalization pass
38+
3939
convert(relu, kRELU);
4040
convert(sigmoid, kSIGMOID);
4141
convert(tanh, kTANH);

core/conversion/converters/impl/batch_norm.cpp

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -31,13 +31,12 @@ bool ConvertConvBatchNorm(ConversionCtx* ctx, const torch::jit::Node* n, args& a
3131
auto bias = Weights(ctx, b);
3232

3333
auto bn_as_conv = ctx->net->addConvolutionNd(*input, weights.num_output_maps, weights.kernel_shape, weights.data, bias.data);
34+
TRTORCH_CHECK(bn_as_conv, "Unable to create fused batch norm from node: " << *n);
3435

3536
bn_as_conv->setName(util::node_info(n).c_str());
36-
auto out_value = n->outputs()[0];
37-
auto out_tensor = bn_as_conv->getOutput(0);
38-
out_tensor->setName(out_value->debugName().c_str());
39-
ctx->value_tensor_map[out_value] = out_tensor;
40-
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
37+
38+
auto bn_out = associate_value_and_tensor(ctx, n->outputs()[0], bn_as_conv->getOutput(0));
39+
LOG_DEBUG("Output tensor shape: " << bn_out->getDimensions());
4140
return true;
4241
}
4342

@@ -68,9 +67,8 @@ bool ConvertLinearBatchNorm(ConversionCtx* ctx, const torch::jit::Node* n, args&
6867
auto bn_biased_out = bn_biased->getOutput(0);
6968

7069
bn_biased->setName(util::node_info(n).c_str());
71-
auto out_value = n->outputs()[0];
72-
bn_biased_out->setName(out_value->debugName().c_str());
73-
ctx->value_tensor_map[out_value] = bn_biased_out;
70+
associate_value_and_tensor(ctx, n->outputs()[0], bn_biased_out);
71+
7472
return true;
7573
}
7674

core/conversion/converters/impl/constant.cpp

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,9 @@ auto constant_registrations = RegisterNodeConversionPatterns()
1919
auto t_weights = Weights(ctx, t);
2020
auto const_layer = ctx->net->addConstant(t_weights.shape, t_weights.data);
2121
const_layer->setName(util::node_info(n).c_str());
22-
auto out_value = n->outputs()[0];
23-
auto out_tensor = const_layer->getOutput(0);
24-
out_tensor->setName(out_value->debugName().c_str());
25-
ctx->value_tensor_map[out_value] = out_tensor;
26-
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
22+
auto const_out = associate_value_and_tensor(ctx, n->outputs()[0], const_layer->getOutput(0));
23+
24+
LOG_DEBUG("Output tensor shape: " << const_out->getDimensions());
2725

2826
return true;
2927
}

core/conversion/converters/impl/conv_deconv.cpp

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -65,13 +65,11 @@ auto conv_registrations = RegisterNodeConversionPatterns()
6565
conv->setNbGroups(groups);
6666
new_layer = conv;
6767
}
68-
6968
new_layer->setName(util::node_info(n).c_str());
70-
auto out_value = n->outputs()[0];
71-
auto out_tensor = new_layer->getOutput(0);
72-
out_tensor->setName(out_value->debugName().c_str());
73-
ctx->value_tensor_map[out_value] = out_tensor;
74-
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
69+
70+
auto out = associate_value_and_tensor(ctx, n->outputs()[0], new_layer->getOutput(0));
71+
72+
LOG_DEBUG("Output tensor shape: " << out->getDimensions());
7573

7674
return true;
7775
}

core/conversion/converters/impl/element_wise.cpp

Lines changed: 55 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,13 @@ nvinfer1::ILayer* add_elementwise(ConversionCtx* ctx, nvinfer1::ElementWiseOpera
1313
auto other_dims = other->getDimensions();
1414

1515
TRTORCH_CHECK(util::volume(self_dims) == util::volume(other_dims), "Found inputs to elementwise operation do not have the same number of elements:\n Found: self " << self_dims << " other " << other_dims);
16-
16+
1717
nvinfer1::ILayer* ele;
1818
if (scalar != 1) {
1919
LOG_WARNING("Please verify scalar handling in add converter, channel axis set to 3 but scaling is uniform");
2020

2121
auto shape = util::toVec(other_dims);
22-
22+
2323
if (shape.size() < 4) {
2424
auto new_shape = util::toDimsPad(shape, 4);
2525
LOG_DEBUG("Input shape is less than 4D got: " << util::toDims(shape) << ", inserting shuffle layers to reshape to 4D tensor shape: " << new_shape);
@@ -33,7 +33,7 @@ nvinfer1::ILayer* add_elementwise(ConversionCtx* ctx, nvinfer1::ElementWiseOpera
3333
self_shuffle->setName(std::string("[Reshape self to " + util::toStr(new_shape) + ']').c_str());
3434
self = self_shuffle->getOutput(0);
3535
}
36-
36+
3737
auto scale = Weights(ctx, scalar);
3838
auto scaled = ctx->net->addScaleNd(*other, nvinfer1::ScaleMode::kUNIFORM, {}, scale.data, {}, 0);
3939
auto scaled_other = scaled->getOutput(0);
@@ -45,48 +45,49 @@ nvinfer1::ILayer* add_elementwise(ConversionCtx* ctx, nvinfer1::ElementWiseOpera
4545
// shuffle->setName(std::string("[Reshape other to " + util::toStr(util::toDims(shape)) + ']').c_str());
4646
// scaled_other = shuffle->getOutput(0);
4747
// }
48-
48+
4949
ele = ctx->net->addElementWise(*self, *scaled_other, op);
5050
} else {
5151
ele = ctx->net->addElementWise(*self, *other, op);
5252
}
53+
5354
return ele;
54-
55+
5556
}
5657

5758
auto element_wise_registrations = RegisterNodeConversionPatterns()
5859
.pattern({
5960
"aten::add.Tensor(Tensor self, Tensor other, Scalar alpha=1) -> Tensor",
6061
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
61-
// Should implement self + alpha * other
62+
// Should implement self + alpha * other
6263
auto self = args[0].ITensor();
6364
auto other = args[1].ITensor();
6465
auto scalar = args[2].unwrapToScalar().to<float>();
6566
auto add = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUM, self, other, scalar);
67+
68+
TRTORCH_CHECK(add, "Unable to create add layer from node: " << *n);
69+
6670
add->setName(util::node_info(n).c_str());
67-
auto out_value = n->outputs()[0];
68-
auto out_tensor = add->getOutput(0);
69-
out_tensor->setName(out_value->debugName().c_str());
70-
ctx->value_tensor_map[out_value] = out_tensor;
71-
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
72-
71+
auto out = associate_value_and_tensor(ctx, n->outputs()[0], add->getOutput(0));
72+
73+
LOG_DEBUG("Output tensor shape: " << out->getDimensions());
7374
return true;
7475
}
7576
}).pattern({
7677
"aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> (Tensor(a!))",
7778
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
78-
// Should implement self + alpha * other
79+
// Should implement self + alpha * other
7980
auto self = args[0].ITensor();
8081
auto other = args[1].ITensor();
8182
auto scalar = args[2].unwrapToScalar().to<float>();
8283
auto add = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUM, self, other, scalar);
84+
85+
TRTORCH_CHECK(add, "Unable to create add layer from node: " << *n);
86+
8387
add->setName(util::node_info(n).c_str());
84-
auto out_value = n->outputs()[0];
85-
auto out_tensor = add->getOutput(0);
86-
out_tensor->setName(out_value->debugName().c_str());
87-
ctx->value_tensor_map[out_value] = out_tensor;
88-
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
89-
88+
auto out = associate_value_and_tensor(ctx, n->outputs()[0], add->getOutput(0));
89+
90+
LOG_DEBUG("Output tensor shape: " << out->getDimensions());
9091
return true;
9192
}
9293
}).pattern({
@@ -97,53 +98,53 @@ auto element_wise_registrations = RegisterNodeConversionPatterns()
9798
auto other = args[1].ITensor();
9899
auto scalar = args[2].unwrapToScalar().to<float>();
99100
auto sub = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUB, self, other, scalar);
101+
102+
TRTORCH_CHECK(sub, "Unable to create sub layer from node: " << *n);
103+
100104
sub->setName(util::node_info(n).c_str());
101-
auto out_value = n->outputs()[0];
102-
auto out_tensor = sub->getOutput(0);
103-
out_tensor->setName(out_value->debugName().c_str());
104-
ctx->value_tensor_map[out_value] = out_tensor;
105-
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
106-
105+
auto out = associate_value_and_tensor(ctx, n->outputs()[0], sub->getOutput(0));
106+
107+
LOG_DEBUG("Output tensor shape: " << out->getDimensions());
107108
return true;
108109
}
109110
}).pattern({
110-
"aten::div(Tensor self, Tensor other) -> Tensor",
111-
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
112-
// Should implement self / other
113-
auto self = args[0].ITensor();
114-
auto other = args[1].ITensor();
115-
auto div = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kDIV, self, other);
116-
div->setName(util::node_info(n).c_str());
117-
auto out_value = n->outputs()[0];
118-
auto out_tensor = div->getOutput(0);
119-
out_tensor->setName(out_value->debugName().c_str());
120-
ctx->value_tensor_map[out_value] = out_tensor;
121-
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
122-
111+
"aten::div(Tensor self, Tensor other) -> Tensor",
112+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
113+
// Should implement self / other
114+
auto self = args[0].ITensor();
115+
auto other = args[1].ITensor();
116+
auto div = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kDIV, self, other);
117+
118+
TRTORCH_CHECK(div, "Unable to create div layer from node: " << *n);
119+
120+
div->setName(util::node_info(n).c_str());
121+
auto out = associate_value_and_tensor(ctx, n->outputs()[0], div->getOutput(0));
122+
123+
LOG_DEBUG("Output tensor shape: " << out->getDimensions());
123124
return true;
124125
}
125126
}).pattern({
126-
"aten::mul(Tensor self, Tensor other) -> Tensor",
127-
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
128-
// Should implement self * other
129-
auto self = args[0].ITensor();
130-
auto other = args[1].ITensor();
131-
auto mul = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kPROD, self, other);
132-
mul->setName(util::node_info(n).c_str());
133-
auto out_value = n->outputs()[0];
134-
auto out_tensor = mul->getOutput(0);
135-
out_tensor->setName(out_value->debugName().c_str());
136-
ctx->value_tensor_map[out_value] = out_tensor;
137-
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
138-
127+
"aten::mul(Tensor self, Tensor other) -> Tensor",
128+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
129+
// Should implement self * other
130+
auto self = args[0].ITensor();
131+
auto other = args[1].ITensor();
132+
auto mul = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kPROD, self, other);
133+
134+
TRTORCH_CHECK(mul, "Unable to create mul layer from node: " << *n);
135+
136+
mul->setName(util::node_info(n).c_str());
137+
auto out = associate_value_and_tensor(ctx, n->outputs()[0], mul->getOutput(0));
138+
139+
LOG_DEBUG("Output tensor shape: " << out->getDimensions());
139140
return true;
140141
}
141142
});
142-
143-
143+
144+
144145
} // namespace
145146
} // namespace impl
146147
} // namespace converters
147148
} // namespace conversion
148149
} // namespace core
149-
} // trtorch
150+
} // trtorch

core/conversion/converters/impl/linear.cpp

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -22,21 +22,21 @@ auto linear_registrations = RegisterNodeConversionPatterns()
2222
TRTORCH_ASSERT(shape.size() >= 2, "aten::linear expects input tensors to be of shape [N,..., in features], but found input Tensor less than 2D");
2323

2424
if (shape.size() < 4) {
25-
// Flatten
25+
// Flatten
2626
std::vector<int64_t> new_shape;
2727
new_shape.push_back(shape[0]);
2828
new_shape.push_back(1);
2929
new_shape.push_back(1);
3030
new_shape.push_back(util::volume(util::toDims(shape)));
31-
31+
3232
auto new_dims = util::toDims(new_shape);
3333
LOG_DEBUG("Input shape is less than 4D got: " << util::toDims(shape) << ", inserting shuffle layer to reshape to 4D tensor shape: " << new_dims);
3434
auto in_shuffle = ctx->net->addShuffle(*in);
3535
in_shuffle->setReshapeDimensions(new_dims);
3636
in_shuffle->setName((util::node_info(n) + " [Input Reshape to " + util::toStr(new_dims) + ']').c_str());
3737
in = in_shuffle->getOutput(0);
3838
}
39-
39+
4040
auto w_tensor = args[1].IValue()->toTensor();
4141
Weights w = Weights(ctx, w_tensor);
4242

@@ -50,13 +50,10 @@ auto linear_registrations = RegisterNodeConversionPatterns()
5050
}
5151

5252
TRTORCH_CHECK(new_layer,"Unable to create linear layer from node: " << *n);
53-
53+
5454
new_layer->setName(util::node_info(n).c_str());
55-
auto out_value = n->outputs()[0];
56-
auto out_tensor = new_layer->getOutput(0);
57-
58-
out_tensor->setName(out_value->debugName().c_str());
59-
ctx->value_tensor_map[out_value] = out_tensor;
55+
auto out_tensor = associate_value_and_tensor(ctx, n->outputs()[0], new_layer->getOutput(0));
56+
6057
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
6158

6259
return true;
@@ -67,4 +64,4 @@ auto linear_registrations = RegisterNodeConversionPatterns()
6764
} // namespace converters
6865
} // namespace conversion
6966
} // namespace core
70-
} // trtorch
67+
} // trtorch

0 commit comments

Comments
 (0)