Skip to content

Commit bc2978e

Browse files
committed
Merge branch 'snake_case_apis_cpp' into 'main'
refactor!: Changing the C++ api to be snake case See merge request nvidia/trtorch/Torch-TensorRT-Preview!3
2 parents 4d606bc + fc4ace5 commit bc2978e

File tree

130 files changed

+25444
-22415
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

130 files changed

+25444
-22415
lines changed

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,4 +56,5 @@ examples/int8/ptq/ptq
5656
examples/int8/qat/qat
5757
examples/int8/training/vgg16/data/*
5858
examples/int8/datasets/data/*
59-
env/**/*
59+
env/**/*
60+
bazel-Torch-TensorRT-Preview

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ More Information / System Architecture:
1414

1515
## Building a docker container for Torch-TensorRT Preview
1616

17-
We provide a `Dockerfile` in `docker/` directory. We build `Torch-TensorRT` on top of a `Pytorch NGC container` which provide basic dependencies (like CUDA, CUDNN, CUBLAS, TensorRT, Pytorch and others) The dependency libraries in the container can be found in the <a href="https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/index.html">release notes</a>.
17+
We provide a `Dockerfile` in `docker/` directory. We build `Torch-TensorRT` on top of a `Pytorch NGC container` which provide basic dependencies (like CUDA, CUDNN, CUBLAS, TensorRT, Pytorch and others) The dependency libraries in the container can be found in the <a href="https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/index.html">release notes</a>.
1818

1919
Please follow this instruction to build a Docker container.
2020

@@ -41,7 +41,7 @@ auto compile_settings = torch_tensorrt::ts::CompileSpec({input});
4141
// FP16 execution
4242
compile_settings.enabled_precisions = {torch::kHalf};
4343
// Compile module
44-
auto trt_mod = torch_tensorrt::ts::CompileModule(ts_mod, compile_settings);
44+
auto trt_mod = torch_tensorrt::ts::compile(ts_mod, compile_settings);
4545
// Run like normal
4646
auto results = trt_mod.forward({in_tensor});
4747
// Save module for later

core/partitioning/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,6 @@ torchtrt::ts::CompileSpec cfg(input_sizes);
6262
cfg.torch_fallback = torchtrt::CompileSpec::TorchFallback(true);
6363
cfg.torch_fallback.min_block_size = 2;
6464
cfg.torch_fallback.forced_fallback_ops.push_back("aten::relu");
65-
auto trt_mod = torchtrt::ts::CompileModule(mod, cfg);
65+
auto trt_mod = torchtrt::ts::compile(mod, cfg);
6666
auto out = trt_mod.forward({in});
6767
```

cpp/bin/torchtrtc/main.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -600,7 +600,7 @@ int main(int argc, char** argv) {
600600
// Instead of compiling, just embed engine in a PyTorch module
601601
if (embed_engine) {
602602
std::string serialized_engine = read_buf(real_input_path);
603-
auto trt_mod = torchtrt::ts::EmbedEngineInNewModule(serialized_engine, compile_settings.device);
603+
auto trt_mod = torchtrt::ts::embed_engine_in_new_module(serialized_engine, compile_settings.device);
604604
trt_mod.save(real_output_path);
605605
return 0;
606606
}
@@ -615,19 +615,19 @@ int main(int argc, char** argv) {
615615
}
616616

617617
if (require_full_compilation) {
618-
if (!torchtrt::ts::CheckMethodOperatorSupport(mod, "forward")) {
618+
if (!torchtrt::ts::check_method_operator_support(mod, "forward")) {
619619
torchtrt::logging::log(torchtrt::logging::Level::kERROR, "Module is not currently supported by Torch-TensorRT");
620620
return 1;
621621
}
622622
}
623623

624624
if (save_engine) {
625-
auto engine = torchtrt::ts::ConvertMethodToTRTEngine(mod, "forward", compile_settings);
625+
auto engine = torchtrt::ts::convert_method_to_trt_engine(mod, "forward", compile_settings);
626626
std::ofstream out(real_output_path);
627627
out << engine;
628628
out.close();
629629
} else {
630-
auto trt_mod = torchtrt::ts::CompileModule(mod, compile_settings);
630+
auto trt_mod = torchtrt::ts::compile(mod, compile_settings);
631631

632632
if (!no_threshold_check &&
633633
(compile_settings.enabled_precisions.size() == 1 &&

cpp/include/torch_tensorrt/torch_tensorrt.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -701,7 +701,7 @@ struct TORCHTRT_API CompileSpec {
701701
*
702702
* @returns bool: Method is supported by Torch-TensorRT.TorchScript
703703
*/
704-
TORCHTRT_API bool CheckMethodOperatorSupport(const torch::jit::Module& module, std::string method_name);
704+
TORCHTRT_API bool check_method_operator_support(const torch::jit::Module& module, std::string method_name);
705705

706706
/**
707707
* @brief Compile a TorchScript module for NVIDIA GPUs using TensorRT
@@ -717,7 +717,7 @@ TORCHTRT_API bool CheckMethodOperatorSupport(const torch::jit::Module& module, s
717717
*
718718
* @return: A new module trageting a TensorRT engine
719719
*/
720-
TORCHTRT_API torch::jit::Module CompileModule(const torch::jit::Module& module, CompileSpec info);
720+
TORCHTRT_API torch::jit::Module compile(const torch::jit::Module& module, CompileSpec info);
721721

722722
/**
723723
* @brief Compile a TorchScript method for NVIDIA GPUs using TensorRT
@@ -733,7 +733,7 @@ TORCHTRT_API torch::jit::Module CompileModule(const torch::jit::Module& module,
733733
* @return: std::string: Serialized TensorRT engine equivilant to the method
734734
* graph
735735
*/
736-
TORCHTRT_API std::string ConvertMethodToTRTEngine(
736+
TORCHTRT_API std::string convert_method_to_trt_engine(
737737
const torch::jit::Module& module,
738738
std::string method_name,
739739
CompileSpec info);
@@ -751,6 +751,6 @@ TORCHTRT_API std::string ConvertMethodToTRTEngine(
751751
*
752752
* @return: A new module trageting a TensorRT engine
753753
*/
754-
TORCHTRT_API torch::jit::Module EmbedEngineInNewModule(const std::string& engine, Device device);
754+
TORCHTRT_API torch::jit::Module embed_engine_in_new_module(const std::string& engine, Device device);
755755
} // namespace torchscript
756756
} // namespace torch_tensorrt

cpp/src/torch_tensorrt.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@ namespace torchscript {
1212
// Defined in compile_spec.cpp
1313
torch_tensorrt::core::CompileSpec to_internal_compile_spec(CompileSpec external);
1414

15-
bool CheckMethodOperatorSupport(const torch::jit::script::Module& module, std::string method_name) {
15+
bool check_method_operator_support(const torch::jit::script::Module& module, std::string method_name) {
1616
return torch_tensorrt::core::CheckMethodOperatorSupport(module, method_name);
1717
}
1818

19-
std::string ConvertMethodToTRTEngine(
19+
std::string convert_method_to_trt_engine(
2020
const torch::jit::script::Module& module,
2121
std::string method_name,
2222
CompileSpec info) {
@@ -26,14 +26,14 @@ std::string ConvertMethodToTRTEngine(
2626
return torch_tensorrt::core::ConvertGraphToTRTEngine(module, method_name, to_internal_compile_spec(info));
2727
}
2828

29-
torch::jit::script::Module CompileModule(const torch::jit::script::Module& module, CompileSpec info) {
29+
torch::jit::script::Module compile(const torch::jit::script::Module& module, CompileSpec info) {
3030
LOG_DEBUG(get_build_info());
3131
// Want to export a much simpler (non TRT header dependent) API so doing the
3232
// type conversion here
3333
return torch_tensorrt::core::CompileGraph(module, to_internal_compile_spec(info));
3434
}
3535

36-
torch::jit::Module EmbedEngineInNewModule(const std::string& engine, Device device) {
36+
torch::jit::Module embed_engine_in_new_module(const std::string& engine, Device device) {
3737
return torch_tensorrt::core::EmbedEngineInNewModule(engine, to_internal_cuda_device(device));
3838
}
3939

docs/_cpp_api/class_view_hierarchy.html

Lines changed: 114 additions & 111 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
<head>
44
<meta charset="utf-8"/>
55
<meta content="width=device-width, initial-scale=1.0" name="viewport"/>
6+
<meta content="Docutils 0.17.1: http://docutils.sourceforge.net/" name="generator"/>
67
<meta content="width=device-width,initial-scale=1" name="viewport"/>
78
<meta content="ie=edge" http-equiv="x-ua-compatible"/>
89
<meta content="Copy to clipboard" name="lang:clipboard.copy"/>
@@ -299,17 +300,17 @@
299300
</li>
300301
<li class="md-nav__item">
301302
<a class="md-nav__link" href="../_notebooks/lenet-getting-started.html">
302-
TRTorch Getting Started - LeNet
303+
Torch-TensorRT Getting Started - LeNet
303304
</a>
304305
</li>
305306
<li class="md-nav__item">
306307
<a class="md-nav__link" href="../_notebooks/ssd-object-detection-demo.html">
307-
Object Detection with TRTorch (SSD)
308+
Object Detection with Torch-TensorRT (SSD)
308309
</a>
309310
</li>
310311
<li class="md-nav__item">
311312
<a class="md-nav__link" href="../_notebooks/vgg-qat.html">
312-
Deploying Quantization Aware Trained models in INT8 using TRTorch
313+
Deploying Quantization Aware Trained models in INT8 using Torch-TensorRT
313314
</a>
314315
</li>
315316
<li class="md-nav__item">
@@ -392,114 +393,116 @@
392393
</div>
393394
<div class="md-content">
394395
<article class="md-content__inner md-typeset" role="main">
395-
<h1 id="cpp-api-class-view-hierarchy--page-root">
396-
Class Hierarchy
397-
<a class="headerlink" href="#cpp-api-class-view-hierarchy--page-root" title="Permalink to this headline">
398-
399-
</a>
400-
</h1>
401-
<ul class="treeView" id="class-treeView">
402-
<li>
403-
<ul class="collapsibleList">
404-
<li class="lastChild">
405-
Namespace
406-
<a href="namespace_torch_tensorrt.html#namespace-torch-tensorrt">
407-
torch_tensorrt
408-
</a>
409-
<ul>
410-
<li>
411-
Namespace
412-
<a href="namespace_torch_tensorrt__logging.html#namespace-torch-tensorrt-logging">
413-
torch_tensorrt::logging
414-
</a>
415-
<ul>
416-
<li class="lastChild">
417-
Enum
418-
<a href="enum_logging_8h_1a130f65408ad8cbaee060f05e8db69558.html#exhale-enum-logging-8h-1a130f65408ad8cbaee060f05e8db69558">
419-
Level
420-
</a>
421-
</li>
422-
</ul>
423-
</li>
424-
<li>
425-
Namespace
426-
<a href="namespace_torch_tensorrt__ptq.html#namespace-torch-tensorrt-ptq">
427-
torch_tensorrt::ptq
428-
</a>
429-
<ul>
430-
<li>
431-
Template Class
432-
<a href="classtorch__tensorrt_1_1ptq_1_1Int8CacheCalibrator.html#exhale-class-classtorch-tensorrt-1-1ptq-1-1Int8CacheCalibrator">
433-
Int8CacheCalibrator
434-
</a>
435-
</li>
436-
<li class="lastChild">
437-
Template Class
438-
<a href="classtorch__tensorrt_1_1ptq_1_1Int8Calibrator.html#exhale-class-classtorch-tensorrt-1-1ptq-1-1Int8Calibrator">
439-
Int8Calibrator
440-
</a>
441-
</li>
442-
</ul>
443-
</li>
444-
<li>
445-
Namespace
446-
<a href="namespace_torch_tensorrt__torchscript.html#namespace-torch-tensorrt-torchscript">
447-
torch_tensorrt::torchscript
448-
</a>
449-
<ul>
450-
<li class="lastChild">
451-
Struct
452-
<a href="structtorch__tensorrt_1_1torchscript_1_1CompileSpec.html#exhale-struct-structtorch-tensorrt-1-1torchscript-1-1CompileSpec">
453-
CompileSpec
454-
</a>
455-
</li>
456-
</ul>
457-
</li>
458-
<li>
459-
Struct
460-
<a href="structtorch__tensorrt_1_1Device.html#exhale-struct-structtorch-tensorrt-1-1Device">
461-
Device
462-
</a>
463-
<ul>
464-
<li class="lastChild">
465-
Class
466-
<a href="classtorch__tensorrt_1_1Device_1_1DeviceType.html#exhale-class-classtorch-tensorrt-1-1Device-1-1DeviceType">
467-
Device::DeviceType
468-
</a>
469-
</li>
470-
</ul>
471-
</li>
472-
<li>
473-
Struct
474-
<a href="structtorch__tensorrt_1_1Input.html#exhale-struct-structtorch-tensorrt-1-1Input">
475-
Input
476-
</a>
477-
</li>
478-
<li>
479-
Class
480-
<a href="classtorch__tensorrt_1_1DataType.html#exhale-class-classtorch-tensorrt-1-1DataType">
481-
DataType
482-
</a>
483-
</li>
484-
<li>
485-
Class
486-
<a href="classtorch__tensorrt_1_1TensorFormat.html#exhale-class-classtorch-tensorrt-1-1TensorFormat">
487-
TensorFormat
488-
</a>
489-
</li>
490-
<li class="lastChild">
491-
Enum
492-
<a href="enum_torch__tensorrt_8h_1a3fbe5d72e4fc624dbd038853079620eb.html#exhale-enum-torch-tensorrt-8h-1a3fbe5d72e4fc624dbd038853079620eb">
493-
EngineCapability
494-
</a>
495-
</li>
496-
</ul>
497-
</li>
498-
</ul>
499-
</li>
500-
<!-- only tree view element -->
501-
</ul>
502-
<!-- /treeView class-treeView -->
396+
<section id="class-hierarchy">
397+
<h1 id="cpp-api-class-view-hierarchy--page-root">
398+
Class Hierarchy
399+
<a class="headerlink" href="#cpp-api-class-view-hierarchy--page-root" title="Permalink to this headline">
400+
401+
</a>
402+
</h1>
403+
<ul class="treeView" id="class-treeView">
404+
<li>
405+
<ul class="collapsibleList">
406+
<li class="lastChild">
407+
Namespace
408+
<a href="namespace_torch_tensorrt.html#namespace-torch-tensorrt">
409+
torch_tensorrt
410+
</a>
411+
<ul>
412+
<li>
413+
Namespace
414+
<a href="namespace_torch_tensorrt__logging.html#namespace-torch-tensorrt-logging">
415+
torch_tensorrt::logging
416+
</a>
417+
<ul>
418+
<li class="lastChild">
419+
Enum
420+
<a href="enum_logging_8h_1a130f65408ad8cbaee060f05e8db69558.html#exhale-enum-logging-8h-1a130f65408ad8cbaee060f05e8db69558">
421+
Level
422+
</a>
423+
</li>
424+
</ul>
425+
</li>
426+
<li>
427+
Namespace
428+
<a href="namespace_torch_tensorrt__ptq.html#namespace-torch-tensorrt-ptq">
429+
torch_tensorrt::ptq
430+
</a>
431+
<ul>
432+
<li>
433+
Template Class
434+
<a href="classtorch__tensorrt_1_1ptq_1_1Int8CacheCalibrator.html#exhale-class-classtorch-tensorrt-1-1ptq-1-1Int8CacheCalibrator">
435+
Int8CacheCalibrator
436+
</a>
437+
</li>
438+
<li class="lastChild">
439+
Template Class
440+
<a href="classtorch__tensorrt_1_1ptq_1_1Int8Calibrator.html#exhale-class-classtorch-tensorrt-1-1ptq-1-1Int8Calibrator">
441+
Int8Calibrator
442+
</a>
443+
</li>
444+
</ul>
445+
</li>
446+
<li>
447+
Namespace
448+
<a href="namespace_torch_tensorrt__torchscript.html#namespace-torch-tensorrt-torchscript">
449+
torch_tensorrt::torchscript
450+
</a>
451+
<ul>
452+
<li class="lastChild">
453+
Struct
454+
<a href="structtorch__tensorrt_1_1torchscript_1_1CompileSpec.html#exhale-struct-structtorch-tensorrt-1-1torchscript-1-1CompileSpec">
455+
CompileSpec
456+
</a>
457+
</li>
458+
</ul>
459+
</li>
460+
<li>
461+
Struct
462+
<a href="structtorch__tensorrt_1_1Device.html#exhale-struct-structtorch-tensorrt-1-1Device">
463+
Device
464+
</a>
465+
<ul>
466+
<li class="lastChild">
467+
Class
468+
<a href="classtorch__tensorrt_1_1Device_1_1DeviceType.html#exhale-class-classtorch-tensorrt-1-1Device-1-1DeviceType">
469+
Device::DeviceType
470+
</a>
471+
</li>
472+
</ul>
473+
</li>
474+
<li>
475+
Struct
476+
<a href="structtorch__tensorrt_1_1Input.html#exhale-struct-structtorch-tensorrt-1-1Input">
477+
Input
478+
</a>
479+
</li>
480+
<li>
481+
Class
482+
<a href="classtorch__tensorrt_1_1DataType.html#exhale-class-classtorch-tensorrt-1-1DataType">
483+
DataType
484+
</a>
485+
</li>
486+
<li>
487+
Class
488+
<a href="classtorch__tensorrt_1_1TensorFormat.html#exhale-class-classtorch-tensorrt-1-1TensorFormat">
489+
TensorFormat
490+
</a>
491+
</li>
492+
<li class="lastChild">
493+
Enum
494+
<a href="enum_torch__tensorrt_8h_1a3fbe5d72e4fc624dbd038853079620eb.html#exhale-enum-torch-tensorrt-8h-1a3fbe5d72e4fc624dbd038853079620eb">
495+
EngineCapability
496+
</a>
497+
</li>
498+
</ul>
499+
</li>
500+
</ul>
501+
</li>
502+
<!-- only tree view element -->
503+
</ul>
504+
<!-- /treeView class-treeView -->
505+
</section>
503506
</article>
504507
</div>
505508
</div>

0 commit comments

Comments
 (0)