Skip to content

Commit 0c42277

Browse files
committed
refactor: Finishing porting namespace
Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 05b8376 commit 0c42277

File tree

197 files changed

+2226
-2067
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

197 files changed

+2226
-2067
lines changed

.dockerignore

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
bazel-*
2+
*.tar.gz
3+
*.whl
4+
*.bin
5+
docs/v*/**/*
6+
env/
7+
bazel
8+
bazel-bazel-test
9+
bazel-bin
10+
bazel-genfiles
11+
bazel-out
12+
bazel-testlogs
13+
bazel-TRTorch
14+
bazel-trtorch-testing
15+
third_party/pytorch
16+
*.jit
17+
*.jit.pt
18+
.\#*
19+
experiments/
20+
py/build/
21+
py/tmp/
22+
py/.eggs
23+
.vscode/
24+
.DS_Store
25+
._DS_Store
26+
*.pth
27+
*.pyc
28+
cpp/ptq/training/vgg16/data/*
29+
*.bin
30+
cpp/ptq/datasets/data/
31+
tests/accuracy/datasets/data/*
32+
._.DS_Store
33+
*.tar.gz
34+
*.tgz
35+
docsrc/_build
36+
docsrc/_notebooks
37+
docsrc/_cpp_api
38+
docsrc/_tmp
39+
*.so
40+
__pycache__
41+
*.egg-info
42+
dist
43+
bdist
44+
py/torch_tensorrt/_version.py
45+
py/torch_tensorrt/lib
46+
py/torch_tensorrt/include
47+
py/torch_tensorrt/bin
48+
py/torch_tensorrt/BUILD
49+
py/torch_tensorrt/LICENSE
50+
py/torch_tensorrt/WORKSPACE
51+
py/wheelhouse
52+
py/.eggs
53+
notebooks/.ipynb_checkpoints/
54+
*.cache
55+
tests/py/data
56+
examples/**/deps/**/*
57+
examples/torchtrt_runtime_example/torchtrt_runtime_example
58+
examples/int8/ptq/ptq
59+
examples/int8/qat/qat
60+
examples/int8/training/vgg16/data/*
61+
examples/int8/datasets/data/*
62+
env/**/*
63+
*.ts

bzl_def/BUILD

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,13 @@ config_setting(
1616
)
1717

1818
cc_library(
19-
name = "libtrtorch",
19+
name = "libtorchtrt",
2020
srcs = select({
2121
":windows": [
22-
"lib/x64/trtorch.dll",
22+
"lib/x64/torchtrt.dll",
2323
],
2424
"//conditions:default": [
25-
"lib/libtrtorch.so",
25+
"lib/libtorchtrt.so",
2626
],
2727
}),
2828
hdrs = glob([
@@ -33,47 +33,47 @@ cc_library(
3333
)
3434

3535
cc_library(
36-
name = "libtrtorchrt",
36+
name = "libtorchtrt_runtime",
3737
srcs = select({
3838
":windows": [
39-
"lib/x64/trtorchrt.dll"
39+
"lib/x64/torchtrt_runtime.dll"
4040
],
4141
"//conditions:default": [
42-
"lib/libtrtorchrt.so"
42+
"lib/libtorchtrt_runtime.so"
4343
]
4444
})
4545
)
4646

4747
cc_library(
48-
name = "libtrtorch_plugins",
48+
name = "libtorchtrt_plugins",
4949
srcs = select({
5050
":windows": [
51-
"lib/x64/trtorch_plugins.dll"
51+
"lib/x64/torchtrt_plugins.dll"
5252
],
5353
"//conditions:default": [
54-
"lib/libtrtorch_plugins.so"
54+
"lib/libtorchtrt_plugins.so"
5555
]
5656
}),
5757
hdrs = glob([
58-
"include/trtorch/core/plugins/**/*.h",
58+
"include/torch_tensorrt/core/plugins/**/*.h",
5959
]),
6060
strip_include_prefix = "include",
6161
includes = ["include/"]
6262
)
6363

6464
cc_library(
65-
name = "trtorch_core_hdrs",
65+
name = "torch_tensorrt_core_hdrs",
6666
hdrs = glob([
67-
"include/trtorch/core/**/*.h"
67+
"include/torch_tensorrt/core/**/*.h"
6868
]),
69-
strip_include_prefix = "include/trtorch",
70-
includes = ["include/trtorch/"]
69+
strip_include_prefix = "include/torch_tensorrt",
70+
includes = ["include/torch_tensorrt/"]
7171
)
7272

7373
# Alias for ease of use
7474
cc_library(
75-
name = "trtorch",
75+
name = "torch_tensorrt",
7676
deps = [
77-
":libtrtorch",
77+
":libtorchtrt",
7878
]
7979
)

bzl_def/WORKSPACE

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
workspace(name = "trtorch")
1+
workspace(name = "torch_tensorrt")

core/compiler.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
#include "core/partitioning/partitioning.h"
2626
#include "core/runtime/runtime.h"
2727

28-
namespace trtorch {
28+
namespace torch_tensorrt {
2929
namespace core {
3030

3131
void AddEngineToGraph(
@@ -356,7 +356,7 @@ std::string ConvertGraphToTRTEngine(const torch::jit::script::Module& mod, std::
356356
auto graph_and_parameters = lowering::Lower(mod, method_name, cfg.lower_info);
357357

358358
auto g = graph_and_parameters.first;
359-
TRTORCH_CHECK(
359+
TORCHTRT_CHECK(
360360
conversion::VerifyConverterSupportForBlock(g->block()),
361361
"Not all operations in graph are supported by the compiler");
362362
auto params = graph_and_parameters.second;
@@ -429,7 +429,7 @@ torch::jit::Module CompileGraph(const torch::jit::Module& mod, CompileSpec cfg)
429429
return mod;
430430
}
431431
} else {
432-
TRTORCH_CHECK(
432+
TORCHTRT_CHECK(
433433
conversion::VerifyConverterSupportForBlock(g->block()),
434434
"Not all operations in graph are supported by the compiler");
435435
auto engine = conversion::ConvertBlockToEngine(g->block(), cfg.convert_info, static_params);
@@ -459,8 +459,8 @@ torch::jit::script::Module EmbedEngineInNewModule(const std::string& engine, run
459459
}
460460

461461
void set_device(const int gpu_id) {
462-
TRTORCH_ASSERT(cudaSetDevice(gpu_id) == cudaSuccess, "Unable to set CUDA device: " << gpu_id);
462+
TORCHTRT_ASSERT(cudaSetDevice(gpu_id) == cudaSuccess, "Unable to set CUDA device: " << gpu_id);
463463
}
464464

465465
} // namespace core
466-
} // namespace trtorch
466+
} // namespace torch_tensorrt

core/compiler.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
#include "core/runtime/runtime.h"
1010
#include "torch/csrc/jit/api/module.h"
1111

12-
namespace trtorch {
12+
namespace torch_tensorrt {
1313
namespace core {
1414

1515
struct CompileSpec {

core/conversion/conversion.cpp

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
#include "c10/util/intrusive_ptr.h"
1111
#include "core/conversion/tensorcontainer/TensorContainer.h"
1212

13-
namespace trtorch {
13+
namespace torch_tensorrt {
1414
namespace core {
1515
namespace conversion {
1616

@@ -24,7 +24,7 @@ bool OpSupported(const torch::jit::Node* n) {
2424
c10::optional<torch::jit::IValue> EvaluateNode(ConversionCtx* ctx, const torch::jit::Node* n, int level, int limit) {
2525
// Check to see if you can just go through and eval all of these AOT (saves
2626
// the recursion) Also probably a better way to deal with the two error cases;
27-
TRTORCH_CHECK(
27+
TORCHTRT_CHECK(
2828
level < limit,
2929
"Failed to evaluate node: " << *n << "Reason: Exceeded evaluation stack limit (limit=" << limit << ")");
3030

@@ -56,7 +56,7 @@ c10::optional<torch::jit::IValue> EvaluateNode(ConversionCtx* ctx, const torch::
5656
}
5757
}
5858
} else {
59-
TRTORCH_THROW_ERROR(
59+
TORCHTRT_THROW_ERROR(
6060
"Failed to evaluate node: " << *n << "Reason: Node inputs cannot be evaluated at conversion time\n"
6161
<< "File a bug: https://www.github.com/NVIDIA/TRTorch/issues");
6262
return {};
@@ -101,28 +101,28 @@ void AddLayer(ConversionCtx* ctx, const torch::jit::Node* n) {
101101
}
102102
} else {
103103
// Node input has not been converted yet or is a prim op
104-
TRTORCH_THROW_ERROR(
104+
TORCHTRT_THROW_ERROR(
105105
"Unable to retrieve all node inputs for node: "
106106
<< util::node_info(n) << " (ctx.AddLayer)\nSpecifically failed to retrieve value for input: " << *input_node);
107107
}
108108
}
109109

110110
if (n->inputs().size() != node_args.size()) {
111-
TRTORCH_THROW_ERROR("Unable to retrieve all node inputs for node: " << *n);
111+
TORCHTRT_THROW_ERROR("Unable to retrieve all node inputs for node: " << *n);
112112
}
113113

114114
auto schema = n->maybeSchema();
115-
TRTORCH_CHECK(schema, "Unable to get schema for Node " << util::node_info(n) << " (conversion.AddLayer)");
115+
TORCHTRT_CHECK(schema, "Unable to get schema for Node " << util::node_info(n) << " (conversion.AddLayer)");
116116

117117
auto converter = converters::get_node_converter_for(schema);
118-
TRTORCH_CHECK(
118+
TORCHTRT_CHECK(
119119
converter,
120120
"Unable to convert node: "
121121
<< util::node_info(n) << " (conversion.AddLayer)\nSchema: " << *schema << "\nConverter for " << schema->name()
122122
<< " requested, but no such converter was found.\nIf you need a converter for this operator, you can try implementing one yourself\n"
123123
<< "or request a converter: https://www.github.com/NVIDIA/TRTorch/issues");
124124

125-
TRTORCH_CHECK(
125+
TORCHTRT_CHECK(
126126
converter(ctx, n, node_args),
127127
"Converter for " << *schema << " failed to convert node: " << util::node_info(n)
128128
<< "please report this error to https://www.github.com/NVIDIA/TRTorch/issues");
@@ -158,7 +158,7 @@ void AddInputs(
158158

159159
for (auto input : input_tensors) {
160160
const torch::jit::Value* in = input;
161-
TRTORCH_CHECK(
161+
TORCHTRT_CHECK(
162162
input_specs.find(in) != input_specs.end(),
163163
"Cannot find an input spec associated with input: " << in->debugName());
164164
ir::Input& spec = input_specs.find(in)->second;
@@ -170,7 +170,7 @@ void AddInputs(
170170
<< " in engine (conversion.AddInputs)");
171171

172172
auto trt_in = ctx->net->addInput(name.c_str(), spec.dtype, spec.input_shape);
173-
TRTORCH_CHECK(trt_in, "Failed to add input node: " << in->debugName() << " (conversion.AddInputs)");
173+
TORCHTRT_CHECK(trt_in, "Failed to add input node: " << in->debugName() << " (conversion.AddInputs)");
174174
trt_in->setAllowedFormats(1U << static_cast<int>(spec.format));
175175

176176
profile->setDimensions(trt_in->getName(), nvinfer1::OptProfileSelector::kMIN, spec.min);
@@ -185,7 +185,7 @@ void AddInputs(
185185
ctx->num_inputs += 1;
186186
}
187187

188-
TRTORCH_CHECK(
188+
TORCHTRT_CHECK(
189189
profile->isValid(),
190190
"Optimization profile is invalid, please check the input range provided (conversion.AddInputs)");
191191

@@ -213,7 +213,7 @@ void MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outp
213213
ctx->logger, "Marking Output " << out->debugName() << " named " << name << " in engine (ctx.MarkOutput)");
214214
ctx->num_outputs += 1;
215215
} else {
216-
TRTORCH_THROW_ERROR("Unknown output type. Only a single tensor or a TensorList type is supported.");
216+
TORCHTRT_THROW_ERROR("Unknown output type. Only a single tensor or a TensorList type is supported.");
217217
}
218218
}
219219
} else {
@@ -258,7 +258,7 @@ void MapIValues(
258258
auto input = ctx->value_tensor_map[p.first];
259259
ctx->value_tensor_map[p.second] = input;
260260
} else {
261-
TRTORCH_THROW_ERROR(
261+
TORCHTRT_THROW_ERROR(
262262
"Cannot find Value " << p.first->debugName() << " either evaluated values or tensor maps (MapIValues)");
263263
}
264264
}
@@ -271,7 +271,7 @@ void EvaluateConditionalBlock(ConversionCtx* ctx, const torch::jit::Node* n, boo
271271
output_type_includes_tensor = true;
272272
}
273273
}
274-
TRTORCH_CHECK(
274+
TORCHTRT_CHECK(
275275
!(contained_in_loop && output_type_includes_tensor),
276276
"TRTorch currently cannot compile conditionals within loops");
277277

@@ -301,7 +301,7 @@ void EvaluateConditionalBlock(ConversionCtx* ctx, const torch::jit::Node* n, boo
301301
} else if (converters::node_is_convertable(bn)) {
302302
AddLayer(ctx, bn);
303303
} else {
304-
TRTORCH_THROW_ERROR(
304+
TORCHTRT_THROW_ERROR(
305305
"TRTorch is unable to compile this conditional, a converter or evaluator is not available for node " << *bn);
306306
}
307307
}
@@ -332,7 +332,7 @@ void EvaluateLoopBlock(ConversionCtx* ctx, const torch::jit::Node* n) {
332332
} else if (bn->kind() == torch::jit::prim::If) {
333333
EvaluateConditionalBlock(ctx, bn, true);
334334
} else {
335-
TRTORCH_CHECK(
335+
TORCHTRT_CHECK(
336336
evaluators::shouldEvalAtConversionTime(bn),
337337
"TRTorch currently can only compile loops that are evaluatable at conversion time but node "
338338
<< *bn << " cannot be evaluated.");
@@ -383,7 +383,7 @@ void ConvertBlockToNetDef(
383383
if (n->outputs().size() > 1) { // For ListUnpack scenario
384384
if (eval.value().isTuple()) {
385385
auto eval_list = eval.value().toTuple();
386-
TRTORCH_CHECK(
386+
TORCHTRT_CHECK(
387387
eval_list->elements().size() == n->outputs().size(),
388388
"Size of evaluated results: " << eval_list->elements().size()
389389
<< " and node outputs size: " << n->outputs().size() << " must match.");
@@ -395,7 +395,7 @@ void ConvertBlockToNetDef(
395395
ctx->AssociateValueAndIValue(n->output(i), eval_output);
396396
}
397397
} else {
398-
TRTORCH_THROW_ERROR("Unsupported return type for evaluated node");
398+
TORCHTRT_THROW_ERROR("Unsupported return type for evaluated node");
399399
}
400400
} else if (eval.value().isCustomClass()) {
401401
auto container = eval.value().toCustomClass<TensorContainer>();
@@ -452,7 +452,7 @@ std::unordered_map<c10::OperatorName, std::string> GetUnsupportedOpsInBlock(cons
452452
for (const auto n : b->nodes()) {
453453
if (n->kind() != torch::jit::prim::Loop && n->kind() != torch::jit::prim::If && !OpSupported(n)) {
454454
auto schema = n->maybeSchema();
455-
TRTORCH_CHECK(
455+
TORCHTRT_CHECK(
456456
schema,
457457
"Unable to get schema for Node " << util::node_info(n) << " (conversion.VerifyCoverterSupportForBlock)");
458458
std::stringstream ss;
@@ -480,7 +480,7 @@ std::set<std::string> ConvertableOpsInBlock(const torch::jit::Block* b) {
480480
}
481481
if (converters::node_is_convertable(n)) {
482482
auto schema = n->maybeSchema();
483-
TRTORCH_CHECK(
483+
TORCHTRT_CHECK(
484484
schema, "Unable to get schema for Node " << util::node_info(n) << " (conversion.CheckForConvertableOps)");
485485
std::stringstream ss;
486486
ss << *schema;
@@ -518,7 +518,7 @@ bool VerifyConverterSupportForBlock(const torch::jit::Block* b, bool suppress_er
518518
if (suppress_errors) {
519519
LOG_ERROR(
520520
"Unsupported operator: " << *schema << std::endl
521-
<< trtorch::core::util::GetPyTorchSourceCode(n) << std::endl);
521+
<< torch_tensorrt::core::util::GetPyTorchSourceCode(n) << std::endl);
522522
}
523523
}
524524
}
@@ -548,4 +548,4 @@ bool VerifyConverterSupportForBlock(const torch::jit::Block* b, bool suppress_er
548548

549549
} // namespace conversion
550550
} // namespace core
551-
} // namespace trtorch
551+
} // namespace torch_tensorrt

core/conversion/conversion.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#include "core/ir/ir.h"
88
#include "torch/csrc/jit/ir/ir.h"
99

10-
namespace trtorch {
10+
namespace torch_tensorrt {
1111
namespace core {
1212
namespace conversion {
1313

@@ -35,4 +35,4 @@ c10::optional<torch::jit::IValue> EvaluateNode(
3535

3636
} // namespace conversion
3737
} // namespace core
38-
} // namespace trtorch
38+
} // namespace torch_tensorrt

0 commit comments

Comments
 (0)