88#include " core/util/prelude.h"
99
1010#include " c10/util/intrusive_ptr.h"
11+ #include " core/conversion/converters/converter_util.h"
1112#include " core/conversion/tensorcontainer/TensorContainer.h"
13+ #include " core/util/trt_util.h"
1214
1315namespace torch_tensorrt {
1416namespace core {
@@ -212,6 +214,21 @@ void MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outp
212214 LOG_INFO (
213215 ctx->logger , " Marking Output " << out->debugName () << " named " << name << " in engine (ctx.MarkOutput)" );
214216 ctx->num_outputs += 1 ;
217+ } else if (out_ivalue.isTuple ()) {
218+ TORCHTRT_THROW_ERROR (" Tuple type. Only a single tensor or a TensorList type is supported." );
219+ } else if (out_ivalue.isList ()) {
220+ TORCHTRT_THROW_ERROR (" List type. Only a single tensor or a TensorList type is supported." );
221+ } else if (out_ivalue.isScalar ()) {
222+ TORCHTRT_THROW_ERROR (" Scalar type. Only a single tensor or a TensorList type is supported." );
223+ } else if (out_ivalue.isTensor ()) {
224+ // prim::NumToTensor will go to here
225+ std::string name = std::string (" output_" ) + std::to_string (ctx->num_outputs );
226+ auto out_tensor = converters::tensor_to_const (ctx, out_ivalue.toTensor (), " " );
227+ out_tensor->setName (name.c_str ());
228+ ctx->net ->markOutput (*out_tensor);
229+ LOG_INFO (
230+ ctx->logger , " Marking Output " << out->debugName () << " named " << name << " in engine (ctx.MarkOutput)" );
231+ ctx->num_outputs += 1 ;
215232 } else {
216233 TORCHTRT_THROW_ERROR (" Unknown output type. Only a single tensor or a TensorList type is supported." );
217234 }
@@ -364,6 +381,7 @@ void ConvertBlockToNetDef(
364381 ConversionInfo& build_info,
365382 ir::StaticParams& static_params) {
366383 LOG_INFO (ctx->logger , " Converting Block" );
384+ LOG_DEBUG (ctx->logger , *b->owningGraph ());
367385
368386 auto inputs = b->inputs ();
369387 AddParamsToCtxValueMap (ctx, static_params);
@@ -508,24 +526,37 @@ bool VerifyConverterSupportForBlock(const torch::jit::Block* b, bool suppress_er
508526 unsupported_msg << " https://www.github.com/nvidia/Torch-TensorRT/issues" << std::endl;
509527 unsupported_msg << std::endl << " In Module:" << std::endl;
510528
511- if (suppress_errors) {
529+ if (! suppress_errors) {
512530 LOG_ERROR (unsupported_msg.str ());
513531 }
514532
533+ std::unordered_map<std::string, std::unordered_set<std::string>> unsupported_node_locations;
515534 for (const auto n : b->nodes ()) {
516535 auto schema = n->maybeSchema ();
517536 if (schema) {
518537 for (const auto & x : unsupported_ops) {
519538 if (x.first == schema->operator_name ()) {
520- if (suppress_errors) {
521- LOG_ERROR (
522- " Unsupported operator: " << *schema << std::endl
523- << torch_tensorrt::core::util::GetPyTorchSourceCode (n) << std::endl);
539+ auto loc = unsupported_node_locations.find (x.second );
540+ if (loc == unsupported_node_locations.end ()) {
541+ unsupported_node_locations.insert ({x.second , {torch_tensorrt::core::util::GetPyTorchSourceCode (n)}});
542+ } else {
543+ loc->second .insert (torch_tensorrt::core::util::GetPyTorchSourceCode (n));
524544 }
525545 }
526546 }
527547 }
528548 }
549+
550+ for (const auto & type : unsupported_node_locations) {
551+ std::stringstream traceback;
552+ traceback << " Unsupported operator: " << type.first << std::endl;
553+ for (const auto & str : type.second ) {
554+ traceback << str;
555+ }
556+ auto tb_str = traceback.str ();
557+ LOG_ERROR (tb_str);
558+ }
559+
529560 return false ;
530561 }
531562
@@ -537,7 +568,7 @@ bool VerifyConverterSupportForBlock(const torch::jit::Block* b, bool suppress_er
537568 unsupported_msg
538569 << " This may be because there are no operators that can be added to the TensorRT graph or all operators have a resolved compile time value."
539570 << std::endl;
540- if (suppress_errors) {
571+ if (! suppress_errors) {
541572 LOG_ERROR (unsupported_msg.str ());
542573 }
543574 return false ;
0 commit comments