Skip to content

Commit 1ad2fc4

Browse files
committed
Merge branch 'master' into anuragd/adapt_new_threshold_criteria
2 parents 0b0ba8d + ba9f730 commit 1ad2fc4

File tree

13 files changed

+826
-216
lines changed

13 files changed

+826
-216
lines changed

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@ Resources:
1212
- [Comprehensive Discusion (GTC Event)](https://www.nvidia.com/en-us/on-demand/session/gtcfall21-a31107/)
1313
- [Pre-built Docker Container](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch). To use this container, make an NGC account and sign in to NVIDIA's registry with an API key. Refer to [this guide](https://docs.nvidia.com/ngc/ngc-catalog-user-guide/index.html#registering-activating-ngc-account) for the same.
1414

15+
## NVIDIA NGC Container
16+
Torch-TensorRT is distributed in the ready-to-run NVIDIA [NGC PyTorch Container](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch) starting with 21.11. We recommend using this prebuilt container to experiment & develop with Torch-TensorRT; it has all dependencies with the proper versions as well as example notebooks included.
1517

1618
## Building a docker container for Torch-TensorRT
1719

core/conversion/conversion.cpp

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,8 @@ void AddLayer(ConversionCtx* ctx, const torch::jit::Node* n) {
105105
// Node input has not been converted yet or is a prim op
106106
TORCHTRT_THROW_ERROR(
107107
"Unable to retrieve all node inputs for node: "
108-
<< util::node_info(n) << " (ctx.AddLayer)\nSpecifically failed to retrieve value for input: " << *input_node);
108+
<< util::node_info(n) << " (ctx.AddLayer)\nSpecifically failed to retrieve value for input: %"
109+
<< input->debugName());
109110
}
110111
}
111112

@@ -534,18 +535,22 @@ bool VerifyConverterSupportForBlock(const torch::jit::Block* b, bool suppress_er
534535
if (unsupported_ops.size() != 0) {
535536
std::stringstream unsupported_msg;
536537
unsupported_msg
537-
<< "Method requested cannot be compiled by Torch-TensorRT.TorchScript.\nUnsupported operators listed below:"
538+
<< "Method requested cannot be compiled end to end by Torch-TensorRT.TorchScript.\nUnsupported operators listed below:"
538539
<< std::endl;
539540
for (auto s : unsupported_ops) {
540541
unsupported_msg << " - " << s.second << std::endl;
541542
}
542-
unsupported_msg << "You can either implement converters for these ops in your application or request implementation"
543-
<< std::endl;
544-
unsupported_msg << "https://www.github.com/nvidia/Torch-TensorRT/issues" << std::endl;
545-
unsupported_msg << std::endl << "In Module:" << std::endl;
546543

547544
if (!suppress_errors) {
545+
unsupported_msg
546+
<< "You can either implement converters for these ops in your application or request implementation"
547+
<< std::endl;
548+
unsupported_msg << "https://www.github.com/nvidia/Torch-TensorRT/issues" << std::endl;
549+
unsupported_msg << std::endl << "In Module:" << std::endl;
550+
548551
LOG_ERROR(unsupported_msg.str());
552+
} else {
553+
LOG_INFO(unsupported_msg.str());
549554
}
550555

551556
std::unordered_map<std::string, std::unordered_set<std::string>> unsupported_node_locations;
@@ -572,7 +577,11 @@ bool VerifyConverterSupportForBlock(const torch::jit::Block* b, bool suppress_er
572577
traceback << str;
573578
}
574579
auto tb_str = traceback.str();
575-
LOG_ERROR(tb_str);
580+
if (!suppress_errors) {
581+
LOG_ERROR(tb_str);
582+
} else {
583+
LOG_DEBUG(tb_str);
584+
}
576585
}
577586

578587
return false;

core/conversion/evaluators/aten.cpp

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
#include <math.h>
2+
13
#include "ATen/core/List.h"
24
#include "ATen/core/functional.h"
35
#include "ATen/core/ivalue.h"
@@ -98,6 +100,17 @@ DEFINE_GENERIC_TWO_INPUT_EVALUATOR(
98100
"aten::ge.float_int(float a, int b) -> (bool)",
99101
}));
100102

103+
DEFINE_ARITHMATIC_TWO_INPUT_EVALUATOR(
104+
pow,
105+
"aten::pow",
106+
pow(a, b),
107+
std::set<std::string>({
108+
"aten::pow.int(int a, int b) -> (float)",
109+
"aten::pow.float(float a, float b) -> (float)",
110+
"aten::pow.int_float(int a, float b) -> (float)",
111+
"aten::pow.float_int(float a, int b) -> (float)",
112+
}));
113+
101114
DEFINE_TWO_INPUT_SIMPLE_EVALUATOR(
102115
and,
103116
"aten::__and__",

core/conversion/evaluators/eval_macros.h

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,53 @@
7777
}, \
7878
EvalOptions().validSchemas(schemas)});
7979

80+
#define DEFINE_ARITHMATIC_TWO_INPUT_EVALUATOR(name, node_kind, operation, schemas) \
81+
auto name##_registrations TORCHTRT_UNUSED = RegisterNodeEvaluators().evaluator( \
82+
{c10::Symbol::fromQualString(node_kind), \
83+
[](const torch::jit::Node* n, kwargs& args) -> c10::optional<torch::jit::IValue> { \
84+
if (args.at(n->input(0)).IValue()->isInt()) { \
85+
auto a = args.at(n->input(0)).unwrapToInt(); \
86+
if (args.at(n->input(1)).IValue()->isInt()) { \
87+
auto b = args.at(n->input(1)).unwrapToInt(); \
88+
return operation; \
89+
} else if (args.at(n->input(1)).IValue()->isDouble()) { \
90+
auto b = args.at(n->input(1)).unwrapToDouble(); \
91+
return operation; \
92+
} else if (args.at(n->input(1)).IValue()->isBool()) { \
93+
auto b = args.at(n->input(1)).unwrapToBool(); \
94+
return operation; \
95+
} else { \
96+
TORCHTRT_THROW_ERROR( \
97+
"Unimplemented data type for " \
98+
<< node_kind << " evaluator b arg:" << args.at(n->input(1)).IValue()->type()->str()); \
99+
return {}; \
100+
} \
101+
} else if (args.at(n->input(0)).IValue()->isDouble()) { \
102+
auto a = args.at(n->input(0)).unwrapToDouble(); \
103+
if (args.at(n->input(1)).IValue()->isInt()) { \
104+
auto b = args.at(n->input(1)).unwrapToInt(); \
105+
return operation; \
106+
} else if (args.at(n->input(1)).IValue()->isDouble()) { \
107+
auto b = args.at(n->input(1)).unwrapToDouble(); \
108+
return operation; \
109+
} else if (args.at(n->input(1)).IValue()->isBool()) { \
110+
auto b = args.at(n->input(1)).unwrapToBool(); \
111+
return operation; \
112+
} else { \
113+
TORCHTRT_THROW_ERROR( \
114+
"Unimplemented data type for " \
115+
<< node_kind << " evaluator b arg:" << args.at(n->input(1)).IValue()->type()->str()); \
116+
return {}; \
117+
} \
118+
} else { \
119+
TORCHTRT_THROW_ERROR( \
120+
"Unimplemented data type for " \
121+
<< node_kind << " evaluator a arg: " << args.at(n->input(0)).IValue()->type()->str()); \
122+
return {}; \
123+
} \
124+
}, \
125+
EvalOptions().validSchemas(schemas)});
126+
80127
#define DEFINE_TWO_INPUT_SIMPLE_EVALUATOR(node_kind, node_name, operation, type, schemas) \
81128
auto node_kind##_registrations TORCHTRT_UNUSED = RegisterNodeEvaluators().evaluator( \
82129
{c10::Symbol::fromQualString(node_name), \

docsrc/tutorials/installation.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ You need to download the tarball distributions of TensorRT and cuDNN from the NV
116116
* https://developer.nvidia.com/cudnn
117117
* https://developer.nvidia.com/tensorrt
118118

119-
Place these files in a directory (the directories ``thrid_party/distdir/[x86_64-linux-gnu | aarch64-linux-gnu]`` exist for this purpose)
119+
Place these files in a directory (the directories ``third_party/distdir/[x86_64-linux-gnu | aarch64-linux-gnu]`` exist for this purpose)
120120

121121
Then compile referencing the directory with the tarballs
122122

@@ -127,7 +127,7 @@ Release Build
127127

128128
.. code-block:: shell
129129
130-
bazel build //:libtorchtrt -c opt --distdir thrid_party/distdir/[x86_64-linux-gnu | aarch64-linux-gnu]
130+
bazel build //:libtorchtrt -c opt --distdir third_party/distdir/[x86_64-linux-gnu | aarch64-linux-gnu]
131131
132132
A tarball with the include files and library can then be found in ``bazel-bin``
133133

@@ -140,7 +140,7 @@ To build with debug symbols use the following command
140140

141141
.. code-block:: shell
142142
143-
bazel build //:libtorchtrt -c dbg --distdir thrid_party/distdir/[x86_64-linux-gnu | aarch64-linux-gnu]
143+
bazel build //:libtorchtrt -c dbg --distdir third_party/distdir/[x86_64-linux-gnu | aarch64-linux-gnu]
144144
145145
A tarball with the include files and library can then be found in ``bazel-bin``
146146

@@ -151,7 +151,7 @@ To build using the pre-CXX11 ABI use the ``pre_cxx11_abi`` config
151151

152152
.. code-block:: shell
153153
154-
bazel build //:libtorchtrt --config pre_cxx11_abi -c [dbg/opt] --distdir thrid_party/distdir/[x86_64-linux-gnu | aarch64-linux-gnu]
154+
bazel build //:libtorchtrt --config pre_cxx11_abi -c [dbg/opt] --distdir third_party/distdir/[x86_64-linux-gnu | aarch64-linux-gnu]
155155
156156
A tarball with the include files and library can then be found in ``bazel-bin``
157157

notebooks/Resnet50-example.ipynb

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
"cell_type": "markdown",
2727
"metadata": {},
2828
"source": [
29-
"<img src=\"http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png\" style=\"width: 90px; float: right;\">\n",
29+
"<img src=\"https://developer.download.nvidia.com/tesla/notebook_assets/nv_logo_torch_trt_resnet_notebook.png\" style=\"width: 90px; float: right;\">\n",
3030
"\n",
3131
"# Torch-TensorRT Getting Started - ResNet 50"
3232
]
@@ -63,7 +63,9 @@
6363
{
6464
"cell_type": "code",
6565
"execution_count": 2,
66-
"metadata": {},
66+
"metadata": {
67+
"collapsed": true
68+
},
6769
"outputs": [
6870
{
6971
"name": "stdout",
@@ -214,7 +216,7 @@
214216
"cell_type": "code",
215217
"execution_count": 3,
216218
"metadata": {
217-
"scrolled": true
219+
"collapsed": true
218220
},
219221
"outputs": [
220222
{
@@ -446,7 +448,7 @@
446448
"cell_type": "code",
447449
"execution_count": 4,
448450
"metadata": {
449-
"scrolled": true
451+
"collapsed": true
450452
},
451453
"outputs": [
452454
{

0 commit comments

Comments
 (0)