Skip to content

Commit 6ebc1fb

Browse files
committed
refactor: apply linting
Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 3de9048 commit 6ebc1fb

File tree

7 files changed

+35
-31
lines changed

7 files changed

+35
-31
lines changed

core/conversion/conversionctx/ConversionCtx.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,8 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
5555
}
5656

5757
builder = make_trt(nvinfer1::createInferBuilder(logger));
58-
net = make_trt(builder->createNetworkV2(1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
58+
net = make_trt(
59+
builder->createNetworkV2(1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
5960

6061
LOG_DEBUG(build_settings);
6162
cfg = make_trt(builder->createBuilderConfig());
@@ -165,7 +166,7 @@ std::string ConversionCtx::SerializeEngine() {
165166
}
166167
auto serialized_network = engine->serialize();
167168
engine->destroy();
168-
#endif
169+
#endif
169170
auto engine_str = std::string((const char*)serialized_network->data(), serialized_network->size());
170171
return engine_str;
171172
}

core/conversion/converters/Weights.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#pragma once
22

3-
#include "core/util/prelude.h"
43
#include "core/conversion/conversionctx/ConversionCtx.h"
4+
#include "core/util/prelude.h"
55

66
namespace trtorch {
77
namespace core {

core/conversion/converters/impl/interpolate.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ void resize_layer_size(
114114
if (align_corners) {
115115
resize_layer->setCoordinateTransformation(nvinfer1::ResizeCoordinateTransformation::kALIGN_CORNERS);
116116
}
117-
#endif
117+
#endif
118118
auto layer_output = ctx->AssociateValueAndTensor(n->outputs()[0], resize_layer->getOutput(0));
119119

120120
LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions());

core/conversion/converters/impl/quantization.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ namespace conversion {
88
namespace converters {
99
namespace impl {
1010
namespace {
11-
11+
1212
#if NV_TENSORRT_MAJOR > 7
1313
// clang-format off
1414
auto quantization_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()

core/runtime/runtime.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,6 @@ class DeviceList {
7474
std::string dump_list();
7575
};
7676

77-
7877
DeviceList get_available_device_list();
7978
const std::unordered_map<std::string, std::string>& get_dla_supported_SMs();
8079

core/util/trt_util.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,27 +9,27 @@
99
namespace nvinfer1 {
1010

1111
#if NV_TENSORRT_MAJOR < 8
12-
12+
1313
#define TRT_ENGINE_CAPABILITY_STANDARD nvinfer1::EngineCapability::kDEFAULT
1414
#define TRT_ENGINE_CAPABILITY_SAFETY nvinfer1::EngineCapability::kSAFE_GPU
1515
#define TRT_ENGINE_CAPABILITY_DLA_STANDALONE nvinfer1::EngineCapability::kSAFE_DLA
16-
16+
1717
template <class T>
1818
std::shared_ptr<T> make_trt(T* p) {
19-
return std::shared_ptr<T>(p, [](T* p){p->destroy();});
19+
return std::shared_ptr<T>(p, [](T* p) { p->destroy(); });
2020
}
21-
21+
2222
#else
23-
23+
2424
#define TRT_ENGINE_CAPABILITY_STANDARD nvinfer1::EngineCapability::kSTANDARD
2525
#define TRT_ENGINE_CAPABILITY_SAFETY nvinfer1::EngineCapability::kSAFETY
2626
#define TRT_ENGINE_CAPABILITY_DLA_STANDALONE nvinfer1::EngineCapability::kDLA_STANDALONE
27-
27+
2828
template <class T>
2929
std::shared_ptr<T> make_trt(T* p) {
3030
return std::shared_ptr<T>(p);
3131
}
32-
32+
3333
#endif
3434

3535
inline std::ostream& operator<<(std::ostream& os, const nvinfer1::TensorFormat& format) {

py/trtorch/csrc/tensorrt_backend.cpp

Lines changed: 22 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -65,26 +65,30 @@ c10::impl::GenericList TensorRTBackend::execute(c10::IValue handle, c10::impl::G
6565
}
6666

6767
namespace {
68-
c10::IValue preprocess(const torch::jit::Module& mod,
69-
const c10::Dict<c10::IValue,
70-
// this API changed between 1.9 and 1.10
68+
c10::IValue preprocess(
69+
const torch::jit::Module& mod,
70+
const c10::Dict<
71+
c10::IValue,
72+
// this API changed between 1.9 and 1.10
7173
#if TORCH_VERSION_MAJOR < 2 && TORCH_VERSION_MINOR < 10
72-
c10::IValue>& method_compile_spec
73-
# else
74-
c10::IValue>& method_compile_spec, const torch::jit::BackendDebugHandleGenerator& generate_debug_handles
74+
c10::IValue>& method_compile_spec
75+
#else
76+
c10::IValue>& method_compile_spec,
77+
const torch::jit::BackendDebugHandleGenerator& generate_debug_handles
7578
#endif
76-
) {
77-
for (auto it = method_compile_spec.begin(), end = method_compile_spec.end(); it != end; ++it) {
78-
TRTORCH_CHECK(
79-
core::CheckMethodOperatorSupport(mod, it->key().toStringRef()),
80-
"Method " << it->key().toStringRef() << "cannot be compiled by TRTorch");
81-
}
82-
return mod._ivalue();
83-
};
84-
85-
static const std::string trt("tensorrt");
86-
static auto reg = torch::jit::backend<TensorRTBackend>(trt);
87-
static auto preproc_reg = torch::jit::backend_preprocess_register(trt, torch::jit::detail::BackendPreprocessFunction(preprocess));
79+
) {
80+
for (auto it = method_compile_spec.begin(), end = method_compile_spec.end(); it != end; ++it) {
81+
TRTORCH_CHECK(
82+
core::CheckMethodOperatorSupport(mod, it->key().toStringRef()),
83+
"Method " << it->key().toStringRef() << "cannot be compiled by TRTorch");
84+
}
85+
return mod._ivalue();
86+
};
87+
88+
static const std::string trt("tensorrt");
89+
static auto reg = torch::jit::backend<TensorRTBackend>(trt);
90+
static auto preproc_reg =
91+
torch::jit::backend_preprocess_register(trt, torch::jit::detail::BackendPreprocessFunction(preprocess));
8892
} // namespace
8993

9094
} // namespace backend

0 commit comments

Comments
 (0)