Skip to content

Commit f34e230

Browse files
committed
refactor!: Changing the C++ api to be snake case
BREAKING CHANGE: This changes the C++ API ::ts APIs to be snake case and for CompileModules to become just compile Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 4d606bc commit f34e230

24 files changed

+46
-45
lines changed

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,4 +56,5 @@ examples/int8/ptq/ptq
5656
examples/int8/qat/qat
5757
examples/int8/training/vgg16/data/*
5858
examples/int8/datasets/data/*
59-
env/**/*
59+
env/**/*
60+
bazel-Torch-TensorRT-Preview

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ More Information / System Architecture:
1414

1515
## Building a docker container for Torch-TensorRT Preview
1616

17-
We provide a `Dockerfile` in `docker/` directory. We build `Torch-TensorRT` on top of a `Pytorch NGC container` which provide basic dependencies (like CUDA, CUDNN, CUBLAS, TensorRT, Pytorch and others) The dependency libraries in the container can be found in the <a href="https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/index.html">release notes</a>.
17+
We provide a `Dockerfile` in `docker/` directory. We build `Torch-TensorRT` on top of a `Pytorch NGC container` which provide basic dependencies (like CUDA, CUDNN, CUBLAS, TensorRT, Pytorch and others) The dependency libraries in the container can be found in the <a href="https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/index.html">release notes</a>.
1818

1919
Please follow this instruction to build a Docker container.
2020

@@ -41,7 +41,7 @@ auto compile_settings = torch_tensorrt::ts::CompileSpec({input});
4141
// FP16 execution
4242
compile_settings.enabled_precisions = {torch::kHalf};
4343
// Compile module
44-
auto trt_mod = torch_tensorrt::ts::CompileModule(ts_mod, compile_settings);
44+
auto trt_mod = torch_tensorrt::ts::compile(ts_mod, compile_settings);
4545
// Run like normal
4646
auto results = trt_mod.forward({in_tensor});
4747
// Save module for later

core/partitioning/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,6 @@ torchtrt::ts::CompileSpec cfg(input_sizes);
6262
cfg.torch_fallback = torchtrt::CompileSpec::TorchFallback(true);
6363
cfg.torch_fallback.min_block_size = 2;
6464
cfg.torch_fallback.forced_fallback_ops.push_back("aten::relu");
65-
auto trt_mod = torchtrt::ts::CompileModule(mod, cfg);
65+
auto trt_mod = torchtrt::ts::compile(mod, cfg);
6666
auto out = trt_mod.forward({in});
6767
```

cpp/bin/torchtrtc/main.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -600,7 +600,7 @@ int main(int argc, char** argv) {
600600
// Instead of compiling, just embed engine in a PyTorch module
601601
if (embed_engine) {
602602
std::string serialized_engine = read_buf(real_input_path);
603-
auto trt_mod = torchtrt::ts::EmbedEngineInNewModule(serialized_engine, compile_settings.device);
603+
auto trt_mod = torchtrt::ts::embed_engine_in_new_module(serialized_engine, compile_settings.device);
604604
trt_mod.save(real_output_path);
605605
return 0;
606606
}
@@ -622,12 +622,12 @@ int main(int argc, char** argv) {
622622
}
623623

624624
if (save_engine) {
625-
auto engine = torchtrt::ts::ConvertMethodToTRTEngine(mod, "forward", compile_settings);
625+
auto engine = torchtrt::ts::convert_method_to_trt_engine(mod, "forward", compile_settings);
626626
std::ofstream out(real_output_path);
627627
out << engine;
628628
out.close();
629629
} else {
630-
auto trt_mod = torchtrt::ts::CompileModule(mod, compile_settings);
630+
auto trt_mod = torchtrt::ts::compile(mod, compile_settings);
631631

632632
if (!no_threshold_check &&
633633
(compile_settings.enabled_precisions.size() == 1 &&

cpp/include/torch_tensorrt/torch_tensorrt.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -701,7 +701,7 @@ struct TORCHTRT_API CompileSpec {
701701
*
702702
* @returns bool: Method is supported by Torch-TensorRT.TorchScript
703703
*/
704-
TORCHTRT_API bool CheckMethodOperatorSupport(const torch::jit::Module& module, std::string method_name);
704+
TORCHTRT_API bool check_method_operator_support(const torch::jit::Module& module, std::string method_name);
705705

706706
/**
707707
* @brief Compile a TorchScript module for NVIDIA GPUs using TensorRT
@@ -717,7 +717,7 @@ TORCHTRT_API bool CheckMethodOperatorSupport(const torch::jit::Module& module, s
717717
*
718718
* @return: A new module trageting a TensorRT engine
719719
*/
720-
TORCHTRT_API torch::jit::Module CompileModule(const torch::jit::Module& module, CompileSpec info);
720+
TORCHTRT_API torch::jit::Module compile(const torch::jit::Module& module, CompileSpec info);
721721

722722
/**
723723
* @brief Compile a TorchScript method for NVIDIA GPUs using TensorRT
@@ -733,7 +733,7 @@ TORCHTRT_API torch::jit::Module CompileModule(const torch::jit::Module& module,
733733
* @return: std::string: Serialized TensorRT engine equivilant to the method
734734
* graph
735735
*/
736-
TORCHTRT_API std::string ConvertMethodToTRTEngine(
736+
TORCHTRT_API std::string convert_method_to_trt_engine(
737737
const torch::jit::Module& module,
738738
std::string method_name,
739739
CompileSpec info);
@@ -751,6 +751,6 @@ TORCHTRT_API std::string ConvertMethodToTRTEngine(
751751
*
752752
* @return: A new module trageting a TensorRT engine
753753
*/
754-
TORCHTRT_API torch::jit::Module EmbedEngineInNewModule(const std::string& engine, Device device);
754+
TORCHTRT_API torch::jit::Module embed_engine_in_new_module(const std::string& engine, Device device);
755755
} // namespace torchscript
756756
} // namespace torch_tensorrt

cpp/src/torch_tensorrt.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@ namespace torchscript {
1212
// Defined in compile_spec.cpp
1313
torch_tensorrt::core::CompileSpec to_internal_compile_spec(CompileSpec external);
1414

15-
bool CheckMethodOperatorSupport(const torch::jit::script::Module& module, std::string method_name) {
15+
bool check_method_operator_support(const torch::jit::script::Module& module, std::string method_name) {
1616
return torch_tensorrt::core::CheckMethodOperatorSupport(module, method_name);
1717
}
1818

19-
std::string ConvertMethodToTRTEngine(
19+
std::string convert_method_to_trt_engine(
2020
const torch::jit::script::Module& module,
2121
std::string method_name,
2222
CompileSpec info) {
@@ -26,14 +26,14 @@ std::string ConvertMethodToTRTEngine(
2626
return torch_tensorrt::core::ConvertGraphToTRTEngine(module, method_name, to_internal_compile_spec(info));
2727
}
2828

29-
torch::jit::script::Module CompileModule(const torch::jit::script::Module& module, CompileSpec info) {
29+
torch::jit::script::Module compile(const torch::jit::script::Module& module, CompileSpec info) {
3030
LOG_DEBUG(get_build_info());
3131
// Want to export a much simpler (non TRT header dependent) API so doing the
3232
// type conversion here
3333
return torch_tensorrt::core::CompileGraph(module, to_internal_compile_spec(info));
3434
}
3535

36-
torch::jit::Module EmbedEngineInNewModule(const std::string& engine, Device device) {
36+
torch::jit::Module embed_engine_in_new_module(const std::string& engine, Device device) {
3737
return torch_tensorrt::core::EmbedEngineInNewModule(engine, to_internal_cuda_device(device));
3838
}
3939

examples/benchmark/main.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -127,11 +127,11 @@ int main(int argc, const char* argv[]) {
127127
compile_spec.enabled_precisions.insert(torch::kF16);
128128
#endif
129129

130-
auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
130+
auto trt_mod = torch_tensorrt::ts::compile(mod, compile_spec);
131131

132132
#ifdef SAVE_ENGINE
133133
std::cout << "Compiling graph to save as TRT engine (/tmp/engine_converted_from_jit.trt)" << std::endl;
134-
auto engine = torch_tensorrt::ts::ConvertMethodToTRTEngine(mod, "forward", compile_spec);
134+
auto engine = torch_tensorrt::ts::convert_method_to_trt_engine(mod, "forward", compile_spec);
135135
std::ofstream out("/tmp/engine_converted_from_jit.trt");
136136
out << engine;
137137
out.close();

examples/int8/ptq/main.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,14 +56,14 @@ torch::jit::Module compile_int8_model(const std::string& data_dir, torch::jit::M
5656

5757
#ifdef SAVE_ENGINE
5858
std::cout << "Compiling graph to save as TRT engine (/tmp/engine_converted_from_jit.trt)" << std::endl;
59-
auto engine = torch_tensorrt::ts::ConvertMethodToTRTEngine(mod, "forward", compile_spec);
59+
auto engine = torch_tensorrt::ts::convert_method_to_trt_engine(mod, "forward", compile_spec);
6060
std::ofstream out("/tmp/int8_engine_converted_from_jit.trt");
6161
out << engine;
6262
out.close();
6363
#endif
6464

6565
std::cout << "Compiling and quantizing module" << std::endl;
66-
auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
66+
auto trt_mod = torch_tensorrt::ts::compile(mod, compile_spec);
6767
return std::move(trt_mod);
6868
}
6969

examples/int8/qat/main.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,14 +40,14 @@ torch::jit::Module compile_int8_qat_model(const std::string& data_dir, torch::ji
4040

4141
#ifdef SAVE_ENGINE
4242
std::cout << "Compiling graph to save as TRT engine (/tmp/engine_converted_from_jit.trt)" << std::endl;
43-
auto engine = torch_tensorrt::ts::ConvertMethodToTRTEngine(mod, "forward", compile_spec);
43+
auto engine = torch_tensorrt::ts::convert_method_to_trt_engine(mod, "forward", compile_spec);
4444
std::ofstream out("/tmp/int8_engine_converted_from_jit.trt");
4545
out << engine;
4646
out.close();
4747
#endif
4848

4949
std::cout << "Compiling and quantizing module" << std::endl;
50-
auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
50+
auto trt_mod = torch_tensorrt::ts::compile(mod, compile_spec);
5151
return std::move(trt_mod);
5252
}
5353

tests/accuracy/test_dla_fp16_accuracy.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ TEST_P(AccuracyTests, DLAFP16AccuracyIsClose) {
3434
compile_spec.device.allow_gpu_fallback = true;
3535
compile_spec.workspace_size = 1 << 28;
3636

37-
auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
37+
auto trt_mod = torch_tensorrt::ts::compile(mod, compile_spec);
3838

3939
torch::Tensor trt_correct = torch::zeros({1}, {torch::kCUDA}), trt_total = torch::zeros({1}, {torch::kCUDA});
4040
for (auto batch : *eval_dataloader) {

0 commit comments

Comments
 (0)