Skip to content

Commit 6ab9814

Browse files
committed
feat(//core/conversion/conversionctx): Move inline function to associate
values and tensors to a method in the conversion ctx Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 8ad8fa7 commit 6ab9814

File tree

10 files changed

+59
-45
lines changed

10 files changed

+59
-45
lines changed

core/conversion/conversionctx/ConversionCtx.cpp

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,11 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
3737
switch(settings.op_precision) {
3838
case nvinfer1::DataType::kHALF:
3939
cfg->setFlag(nvinfer1::BuilderFlag::kFP16);
40-
input_type = nvinfer1::DataType::kHALF;
40+
input_type = nvinfer1::DataType::kHALF;
4141
break;
4242
// case nvinfer1::DataType::kINT8:
4343
// cfg->setFlag(nvinfer1::BuilderFlag::kINT8);
44-
// input_type = nvinfer1::DataType::kFLOAT;
44+
// input_type = nvinfer1::DataType::kFLOAT;
4545
// break;
4646
case nvinfer1::DataType::kFLOAT:
4747
default:
@@ -80,13 +80,30 @@ ConversionCtx::~ConversionCtx() {
8080
free(ptr);
8181
}
8282
}
83-
83+
84+
nvinfer1::ITensor* ConversionCtx::AssociateValueAndTensor(const torch::jit::Value* value, nvinfer1::ITensor* tensor) {
85+
tensor->setName(value->debugName().c_str());
86+
this->value_tensor_map[value] = tensor;
87+
return tensor;
88+
}
89+
8490
std::string ConversionCtx::SerializeEngine() {
8591
auto engine = builder->buildEngineWithConfig(*net, *cfg);
8692
auto serialized_engine = engine->serialize();
8793
return std::string((const char*)serialized_engine->data(), serialized_engine->size());
8894
}
8995

96+
bool ConversionCtx::CheckLayerAddition(const torch::jit::Node* n) {
97+
for (auto out : n->outputs()) {
98+
auto iter = this->value_tensor_map.find(out);
99+
if (iter == this->value_tensor_map.end()) {
100+
LOG_WARNING("Node " << util::node_info(n) << " output: " << out->debugName() << " does not have a coresponding output, may potentially indicate a defective converter");
101+
return false;
102+
}
103+
}
104+
return true;
105+
}
106+
90107
} // namespace conversion
91108
} // namespace core
92109
} // namespace trtorch

core/conversion/conversionctx/ConversionCtx.h

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,12 +30,15 @@ struct BuilderSettings {
3030

3131
BuilderSettings() = default;
3232
BuilderSettings(const BuilderSettings& other) = default;
33-
friend std::ostream& operator<<(std::ostream& os, const BuilderSettings& s);
33+
friend std::ostream& operator<<(std::ostream& os, const BuilderSettings& s);
3434
};
35-
35+
3636
struct ConversionCtx {
3737
ConversionCtx(BuilderSettings settings);
3838
std::string SerializeEngine();
39+
nvinfer1::ITensor* AssociateValueAndTensor(const torch::jit::Value* value, nvinfer1::ITensor* tensor);
40+
bool CheckLayerAddition(const torch::jit::Node* n);
41+
3942
~ConversionCtx();
4043

4144
nvinfer1::IBuilder* builder;
@@ -50,12 +53,12 @@ struct ConversionCtx {
5053
// is constructed from a PyTorch Tensor it allocates the data here to store a
5154
// copy of the values
5255
std::vector<void*> builder_resources;
53-
56+
5457
std::unordered_map<const torch::jit::Value*, nvinfer1::ITensor*> value_tensor_map;
5558
std::unordered_map<const torch::jit::Value*, torch::jit::IValue> evaluated_value_map;
5659
};
5760

58-
} // namespace conversion
61+
} // namespace conversion
5962
} // namespace core
6063
} // namespace trtorch
61-
64+

core/conversion/converters/converters.h

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -69,12 +69,12 @@ class Arg {
6969
ArgContainer ptr_;
7070
Type type_;
7171
};
72-
73-
72+
73+
7474

7575
typedef std::vector<Arg> args;
7676
typedef std::function<bool(ConversionCtx*, const torch::jit::Node*, args&)> OpConverter;
77-
struct ConversionPattern {
77+
struct ConversionPattern {
7878
std::string signature;
7979
OpConverter converter;
8080
};
@@ -107,20 +107,14 @@ struct Weights {
107107
Weights();
108108
Weights(ConversionCtx* ctx, at::Tensor t);
109109
Weights(ConversionCtx* ctx, float val);
110-
friend std::ostream& operator<<(std::ostream& os, const Weights& w);
110+
friend std::ostream& operator<<(std::ostream& os, const Weights& w);
111111
};
112112

113113
inline nvinfer1::ITensor* tensor_to_const(ConversionCtx* ctx, at::Tensor t) {
114114
auto t_weights = Weights(ctx, t);
115115
return ctx->net->addConstant(t_weights.shape, t_weights.data)->getOutput(0);
116116
}
117117

118-
inline nvinfer1::ITensor* associate_value_and_tensor(ConversionCtx* ctx, const torch::jit::Value* value, nvinfer1::ITensor* tensor) {
119-
tensor->setName(value->debugName().c_str());
120-
ctx->value_tensor_map[value] = tensor;
121-
return tensor;
122-
}
123-
124118
} // namespace converters
125119
} // namespace conversion
126120
} // namespace core

core/conversion/converters/impl/activation.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ namespace {
1818
"Unable to create " #act " layer from node: " << *n); \
1919
\
2020
new_layer->setName(util::node_info(n).c_str()); \
21-
associate_value_and_tensor(ctx, n->outputs()[0], new_layer->getOutput(0)); \
21+
ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0)); \
2222
LOG_DEBUG("Output tensor shape: " \
2323
<< new_layer->getOutput(0)->getDimensions()); \
2424
\

core/conversion/converters/impl/batch_norm.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,10 @@ bool ConvertConvBatchNorm(ConversionCtx* ctx, const torch::jit::Node* n, args& a
3232

3333
auto bn_as_conv = ctx->net->addConvolutionNd(*input, weights.num_output_maps, weights.kernel_shape, weights.data, bias.data);
3434
TRTORCH_CHECK(bn_as_conv, "Unable to create fused batch norm from node: " << *n);
35-
35+
3636
bn_as_conv->setName(util::node_info(n).c_str());
3737

38-
auto bn_out = associate_value_and_tensor(ctx, n->outputs()[0], bn_as_conv->getOutput(0));
38+
auto bn_out = ctx->AssociateValueAndTensor(n->outputs()[0], bn_as_conv->getOutput(0));
3939
LOG_DEBUG("Output tensor shape: " << bn_out->getDimensions());
4040
return true;
4141
}
@@ -67,25 +67,25 @@ bool ConvertLinearBatchNorm(ConversionCtx* ctx, const torch::jit::Node* n, args&
6767
auto bn_biased_out = bn_biased->getOutput(0);
6868

6969
bn_biased->setName(util::node_info(n).c_str());
70-
associate_value_and_tensor(ctx, n->outputs()[0], bn_biased_out);
70+
ctx->AssociateValueAndTensor(n->outputs()[0], bn_biased_out);
7171

7272
return true;
7373
}
7474

7575
volatile auto batch_norm_registrations = RegisterNodeConversionPatterns()
7676
.pattern({
77-
R"SIG(aten::batch_norm(Tensor input, Tensor? gamma, Tensor? beta,
78-
Tensor? mean, Tensor? var,
77+
R"SIG(aten::batch_norm(Tensor input, Tensor? gamma, Tensor? beta,
78+
Tensor? mean, Tensor? var,
7979
bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor))SIG",
8080
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
8181
auto input = args[0].ITensor();
8282
auto shape = input->getDimensions();
8383
auto gamma = args[1].unwrapToTensor();
84-
84+
8585
if (/*training*/ args[5].unwrapToBool()) {
8686
LOG_WARNING("TensorRT only converts forward pass of graphs, but saw training = True, may see undefined behavior, consider placing module in eval mode");
8787
}
88-
88+
8989
// If gamma is None this fails
9090
if (util::volume(shape) == gamma.numel()) {
9191
return ConvertLinearBatchNorm(ctx, n, args);
@@ -101,4 +101,4 @@ volatile auto batch_norm_registrations = RegisterNodeConversionPatterns()
101101
} // namespace converters
102102
} // namespace conversion
103103
} // namespace core
104-
} // namespace trtorch
104+
} // namespace trtorch

core/conversion/converters/impl/constant.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,10 @@ auto constant_registrations = RegisterNodeConversionPatterns()
1919
auto t_weights = Weights(ctx, t);
2020
auto const_layer = ctx->net->addConstant(t_weights.shape, t_weights.data);
2121
const_layer->setName(util::node_info(n).c_str());
22-
auto const_out = associate_value_and_tensor(ctx, n->outputs()[0], const_layer->getOutput(0));
22+
auto const_out = ctx->AssociateValueAndTensor(n->outputs()[0], const_layer->getOutput(0));
2323

2424
LOG_DEBUG("Output tensor shape: " << const_out->getDimensions());
25-
25+
2626
return true;
2727
}
2828
});
@@ -31,5 +31,5 @@ auto constant_registrations = RegisterNodeConversionPatterns()
3131
} // namespace converters
3232
} // namespace conversion
3333
} // namespace core
34-
} // namespace trtorch
34+
} // namespace trtorch
3535

core/conversion/converters/impl/conv_deconv.cpp

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@ namespace impl {
99
namespace {
1010
auto conv_registrations = RegisterNodeConversionPatterns()
1111
.pattern({
12-
R"SIG(aten::_convolution(Tensor input, Tensor weight,
12+
R"SIG(aten::_convolution(Tensor input, Tensor weight,
1313
Tensor? bias, int[] stride, int[] padding,
14-
int[] dilation, bool transposed,
15-
int[] output_padding, int groups, bool benchmark,
14+
int[] dilation, bool transposed,
15+
int[] output_padding, int groups, bool benchmark,
1616
bool deterministic, bool cudnn_enabled) -> (Tensor))SIG",
1717
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
1818
auto in = args[0].ITensor();
19-
19+
2020
auto w = Weights(ctx, args[1].unwrapToTensor());
2121
auto stride = util::toDimsHW(args[3].unwrapToIntList());
2222
LOG_DEBUG("stride: " << stride);
@@ -27,11 +27,11 @@ auto conv_registrations = RegisterNodeConversionPatterns()
2727
bool transposed = args[6].unwrapToBool();
2828
auto out_padding = util::toDimsHW(args[7].unwrapToIntList());
2929
LOG_DEBUG("out_padding: " << out_padding);
30-
int64_t groups = args[8].unwrapToInt();
31-
30+
int64_t groups = args[8].unwrapToInt();
31+
3232
nvinfer1::ILayer* new_layer;
3333
if (transposed) {
34-
//TODO: Check deconv correctness
34+
//TODO: Check deconv correctness
3535
LOG_WARNING(ctx->logger, "Deconvolution converter has not be tested");
3636
nvinfer1::IDeconvolutionLayer* deconv;
3737
if (args[2].IValue()->isTensor()) {
@@ -54,9 +54,9 @@ auto conv_registrations = RegisterNodeConversionPatterns()
5454
} else {
5555
conv = ctx->net->addConvolutionNd(*in, w.num_output_maps, w.kernel_shape, w.data, Weights().data);
5656
}
57-
57+
5858
TRTORCH_CHECK(conv, "Unable to create convolution layer from node: " << *n);
59-
59+
6060
conv->setStrideNd(stride);
6161
conv->setPaddingMode(nvinfer1::PaddingMode::kCAFFE_ROUND_DOWN);
6262
conv->setPaddingNd(padding);
@@ -67,7 +67,7 @@ auto conv_registrations = RegisterNodeConversionPatterns()
6767
}
6868
new_layer->setName(util::node_info(n).c_str());
6969

70-
auto out = associate_value_and_tensor(ctx, n->outputs()[0], new_layer->getOutput(0));
70+
auto out = ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
7171

7272
LOG_DEBUG("Output tensor shape: " << out->getDimensions());
7373

@@ -79,4 +79,4 @@ auto conv_registrations = RegisterNodeConversionPatterns()
7979
} // namespace converters
8080
} // namespace conversion
8181
} // namespace core
82-
} // trtorch
82+
} // trtorch

core/conversion/converters/impl/linear.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ auto linear_registrations = RegisterNodeConversionPatterns()
5252
TRTORCH_CHECK(new_layer,"Unable to create linear layer from node: " << *n);
5353

5454
new_layer->setName(util::node_info(n).c_str());
55-
auto out_tensor = associate_value_and_tensor(ctx, n->outputs()[0], new_layer->getOutput(0));
55+
auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
5656

5757
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
5858

core/conversion/converters/impl/pooling.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ auto pooling_registrations = RegisterNodeConversionPatterns()
5252
new_layer->setPaddingMode(padding_mode);
5353

5454
new_layer->setName(util::node_info(n).c_str());
55-
auto out_tensor = associate_value_and_tensor(ctx, n->outputs()[0], new_layer->getOutput(0));
55+
auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
5656

5757
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
5858
return true;
@@ -97,7 +97,7 @@ auto pooling_registrations = RegisterNodeConversionPatterns()
9797
new_layer->setStrideNd(util::toDims(stride));
9898

9999
new_layer->setName(util::node_info(n).c_str());
100-
auto out_tensor = associate_value_and_tensor(ctx, n->outputs()[0], new_layer->getOutput(0));
100+
auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
101101

102102
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
103103
return true;

core/conversion/converters/impl/unary.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ namespace {
2323
"Unable to create " #unary " layer from node: " << *n); \
2424
\
2525
unary->setName(util::node_info(n).c_str()); \
26-
auto out_tensor = associate_value_and_tensor(ctx, \
26+
auto out_tensor = ctx->AssociateValueAndTensor( \
2727
n->outputs()[0], \
2828
unary->getOutput(0)); \
2929
LOG_DEBUG( \

0 commit comments

Comments
 (0)