Skip to content

Commit de8659b

Browse files
committed
refactor(//core/conversion): Now check to make sure all node outputs
have tensor matches, and move to checking with execptions instead of bool returns for the conversion stage Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 298b5bb commit de8659b

File tree

1 file changed

+34
-32
lines changed

1 file changed

+34
-32
lines changed

core/conversion/conversion.cpp

Lines changed: 34 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ namespace trtorch {
1010
namespace core {
1111
namespace conversion {
1212

13-
// Defined in core/conversion/conversion_blacklist.cpp
13+
// Defined in core/conversion/conversion_blacklist.cpp
1414
bool isNodeConversionBlacklisted(const torch::jit::Node* n);
1515

1616
bool OpSupported(const torch::jit::Node* n) {
@@ -24,8 +24,8 @@ c10::optional<torch::jit::IValue> EvaluateNode(ConversionCtx* ctx, const torch::
2424
// Also probably a better way to deal with the two error cases;
2525
TRTORCH_CHECK(level < limit, "Failed to evaluate node: " << *n \
2626
<< "Reason: Exceeded evaluation stack limit (limit=" \
27-
<< limit << ")");
28-
27+
<< limit << ")");
28+
2929
LOG_DEBUG(ctx->logger, "Evaluating " << util::node_info(n));
3030
evaluators::kwargs eval_args;
3131
for (auto eval_in : n->inputs()) {
@@ -55,7 +55,7 @@ c10::optional<torch::jit::IValue> EvaluateNode(ConversionCtx* ctx, const torch::
5555
return eval;
5656
}
5757

58-
bool AddLayer(ConversionCtx* ctx, const torch::jit::Node* n) {
58+
void AddLayer(ConversionCtx* ctx, const torch::jit::Node* n) {
5959
LOG_INFO(ctx->logger,
6060
"Adding Layer " << util::node_info(n) << " (ctx.AddLayer)");
6161
converters::args node_args;
@@ -87,36 +87,34 @@ bool AddLayer(ConversionCtx* ctx, const torch::jit::Node* n) {
8787
TRTORCH_THROW_ERROR("Unable to retrieve all node inputs for node: " \
8888
<< util::node_info(n) << " (ctx.AddLayer)\nSpecifically failed to retrieve value for input: " \
8989
<< *input_node);
90-
return false;
9190
}
92-
9391
}
9492

9593
if (n->inputs().size() != node_args.size()) {
9694
TRTORCH_THROW_ERROR("Unable to retrieve all node inputs for node: " << *n);
97-
return false;
9895
}
9996

100-
97+
10198
auto schema = n->maybeSchema();
10299
TRTORCH_CHECK(schema, "Unable to get schema for Node " << util::node_info(n) \
103100
<< " (conversion.AddLayer)");
104-
101+
105102
auto converter = converters::get_node_converter_for(schema);
106103
TRTORCH_CHECK(converter, "Unable to convert node: " << util::node_info(n) \
107104
<< " (conversion.AddLayer)\nSchema: " << *schema
108105
<< "\nConverter for " << schema->name()
109106
<< " requested, but no such converter was found.\nIf you need a converter for this operator, you can try implementing one yourself\n"
110-
<< "or request a converter: https://www.github.com/NVIDIA/TRTorch/issues");
111-
converter(ctx, n, node_args);
107+
<< "or request a converter: https://www.github.com/NVIDIA/TRTorch/issues");
112108

113-
return true;
109+
TRTORCH_CHECK(converter(ctx, n, node_args),
110+
"Converter for " << *schema << " failed to convert node: "
111+
<< util::node_info(n) << "please report this error to https://www.github.com/NVIDIA/TRTorch/issues");
114112
}
115113

116-
bool AddInputs(ConversionCtx* ctx,
114+
void AddInputs(ConversionCtx* ctx,
117115
at::ArrayRef<const torch::jit::Value*> inputs,
118116
std::vector<InputRange>& input_dims) {
119-
117+
120118
auto type_lut = torch::jit::script::string_to_type_lut();
121119
std::vector<const torch::jit::Value*> input_tensors;
122120
for (auto in : inputs) {
@@ -130,15 +128,15 @@ bool AddInputs(ConversionCtx* ctx,
130128
input_tensors.push_back(in);
131129
}
132130
}
133-
131+
134132
TRTORCH_CHECK(input_tensors.size() == input_dims.size(),
135133
"Expected dimension specifications for all input tensors" \
136134
<< ", but found " << input_tensors.size() \
137135
<< " input tensors and " \
138136
<< input_dims.size() << "dimension specs (conversion.AddInputs)");
139137

140138
auto profile = ctx->builder->createOptimizationProfile();
141-
139+
142140
for (size_t i = 0; i < input_tensors.size(); i++) {
143141
auto in = input_tensors[i];
144142
auto dims = input_dims[i];
@@ -158,20 +156,23 @@ bool AddInputs(ConversionCtx* ctx,
158156
}
159157

160158
TRTORCH_CHECK(profile->isValid(), "Optimization profile is invalid, please check the input range provided (conversion.AddInputs)");
161-
159+
162160
ctx->cfg->addOptimizationProfile(profile);
163-
return true;
164161
}
165162

166-
bool MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outputs) {
163+
void MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outputs) {
167164
for (auto out : outputs) {
168-
ctx->net->markOutput(*(ctx->value_tensor_map[out]));
165+
auto it = ctx->value_tensor_map.find(out);
166+
// Leaves the potential for unused outputs to be populated with nullptr "safely"
167+
TRTORCH_CHECK(it != ctx->value_tensor_map.end() && it->second,
168+
"No corresponding output TRT Tensor found for TorchScript output: " << out->debugName());
169+
auto out_tensor = it->second;
170+
ctx->net->markOutput(*out_tensor);
169171
LOG_INFO(ctx->logger,
170172
"Marking Output " << out->debugName() << " (ctx.MarkOutput)");
171173
}
172-
return true;
173174
}
174-
175+
175176
void AddParamsToCtxValueMap(ConversionCtx* ctx, GraphParams& params) {
176177
for (auto p : params) {
177178
ctx->evaluated_value_map[p.first] = torch::jit::IValue(p.second.clone());
@@ -191,13 +192,8 @@ void ConvertBlockToNetDef(ConversionCtx* ctx, const torch::jit::Block* b, ExtraI
191192
bool to_eval = evaluators::shouldEvalAtConversionTime(n);
192193
bool blacklisted = isNodeConversionBlacklisted(n);
193194
if (!to_eval && !blacklisted) {
194-
if (!AddLayer(ctx, n)) {
195-
//TODO: Exception things
196-
LOG_ERROR(ctx->logger,
197-
"Failed to add layer: " << *n \
198-
<< " (ctx.AddLayer)");
199-
return;
200-
}
195+
// Should error out if something fails
196+
AddLayer(ctx, n);
201197
} else {
202198
std::string reason = "";
203199
if (to_eval) {
@@ -207,7 +203,13 @@ void ConvertBlockToNetDef(ConversionCtx* ctx, const torch::jit::Block* b, ExtraI
207203
reason += " (explicitly blacklisted)";
208204
}
209205
LOG_DEBUG(ctx->logger,
210-
"Skipping Node: " << (n->kind().toQualString()) << reason);
206+
"Skipping Node: " << util::node_info(n) << reason);
207+
}
208+
}
209+
210+
for (const auto n : nodes) {
211+
if (converters::node_is_convertable(n)) {
212+
ctx->CheckLayerAddition(n);
211213
}
212214
}
213215

@@ -218,7 +220,7 @@ void ConvertBlockToNetDef(ConversionCtx* ctx, const torch::jit::Block* b, ExtraI
218220
// Converts a already lowered block (blocks with no sub blocks) to
219221
// a serialized TensorRT engine that can be deserialized and run
220222

221-
// Probably should consolidate these two functions
223+
// Probably should consolidate these two functions
222224
std::string ConvertBlockToEngine(const torch::jit::Block* b, ExtraInfo build_info, GraphParams& static_params) {
223225
ConversionCtx ctx(build_info.engine_settings);
224226
ConvertBlockToNetDef(&ctx, b, build_info, static_params);
@@ -247,7 +249,7 @@ bool VerifyConverterSupportForBlock(const torch::jit::Block* b) {
247249
for (auto s : unsupported_ops) {
248250
unsupported_msg << " - " << s << std::endl;
249251
}
250-
unsupported_msg << "You can either implement converters for these ops in your application or file a bug" << std::endl;
252+
unsupported_msg << "You can either implement converters for these ops in your application or request implementation" << std::endl;
251253
unsupported_msg << "https://www.github.com/nvidia/TRTorch/issues" << std::endl;
252254
LOG_ERROR(unsupported_msg.str());
253255
}

0 commit comments

Comments
 (0)