Skip to content

Commit 47ae984

Browse files
committed
chore: linter fixes
Signed-off-by: Dheeraj Peri <[email protected]>
1 parent b2c8f59 commit 47ae984

File tree

8 files changed

+179
-173
lines changed

8 files changed

+179
-173
lines changed

core/conversion/converters/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,6 @@ cc_library(
6262
"impl/constant_pad.cpp",
6363
"impl/conv_deconv.cpp",
6464
"impl/cumsum.cpp",
65-
"impl/dual_ops.cpp",
6665
"impl/einsum.cpp",
6766
"impl/element_wise.cpp",
6867
"impl/expand.cpp",

core/conversion/converters/impl/shuffle.cpp

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -67,33 +67,33 @@ static auto shuffle_registrations TORCHTRT_UNUSED =
6767
.pattern(
6868
{"aten::reshape(Tensor self, int[] shape) -> (Tensor)",
6969
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
70-
auto in = args[0].ITensorOrFreeze(ctx);
71-
auto in_shape = util::toVec(in->getDimensions());
72-
std::vector<int64_t> new_shape;
73-
nvinfer1::ITensor* shape_tensor;
74-
if (ctx->input_is_dynamic) {
75-
auto new_shape = args[1].unwrapToITensorList();
76-
auto concat_layer = ctx->net->addConcatenation(new_shape.data(), new_shape.size());
77-
TORCHTRT_CHECK(concat_layer, "Unable to create concatenation layer from node: " << *n);
78-
concat_layer->setAxis(static_cast<int32_t>(0));
79-
shape_tensor = concat_layer->getOutput(0);
80-
} else {
81-
auto new_shape = torch::reshape(torch::rand(in_shape), args[1].unwrapToIntList().vec()).sizes().vec();
82-
}
83-
auto shuffle = ctx->net->addShuffle(*in);
84-
shuffle->setName(util::node_info(n).c_str());
85-
TORCHTRT_CHECK(shuffle, "Unable to create shuffle layer from node: " << *n);
86-
87-
if (ctx->input_is_dynamic){
88-
shuffle->setInput(1, *shape_tensor);
89-
} else {
90-
shuffle->setReshapeDimensions(util::toDims(new_shape));
91-
}
92-
93-
auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], shuffle->getOutput(0));
94-
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
95-
96-
return true;
70+
auto in = args[0].ITensorOrFreeze(ctx);
71+
auto in_shape = util::toVec(in->getDimensions());
72+
std::vector<int64_t> new_shape;
73+
nvinfer1::ITensor* shape_tensor;
74+
if (ctx->input_is_dynamic) {
75+
auto new_shape = args[1].unwrapToITensorList();
76+
auto concat_layer = ctx->net->addConcatenation(new_shape.data(), new_shape.size());
77+
TORCHTRT_CHECK(concat_layer, "Unable to create concatenation layer from node: " << *n);
78+
concat_layer->setAxis(static_cast<int32_t>(0));
79+
shape_tensor = concat_layer->getOutput(0);
80+
} else {
81+
auto new_shape = torch::reshape(torch::rand(in_shape), args[1].unwrapToIntList().vec()).sizes().vec();
82+
}
83+
auto shuffle = ctx->net->addShuffle(*in);
84+
shuffle->setName(util::node_info(n).c_str());
85+
TORCHTRT_CHECK(shuffle, "Unable to create shuffle layer from node: " << *n);
86+
87+
if (ctx->input_is_dynamic) {
88+
shuffle->setInput(1, *shape_tensor);
89+
} else {
90+
shuffle->setReshapeDimensions(util::toDims(new_shape));
91+
}
92+
93+
auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], shuffle->getOutput(0));
94+
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
95+
96+
return true;
9797
}})
9898
.pattern(
9999
{"aten::view(Tensor(a) self, int[] size) -> (Tensor(a))",

core/conversion/evaluators/aten.cpp

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,11 @@ namespace conversion {
1919
namespace evaluators {
2020
namespace {
2121

22-
nvinfer1::ITensor* index_layer(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* input_tensor, int64_t index){
22+
nvinfer1::ITensor* index_layer(
23+
ConversionCtx* ctx,
24+
const torch::jit::Node* n,
25+
nvinfer1::ITensor* input_tensor,
26+
int64_t index) {
2327
// index to access needs to be an at::Tensor
2428
at::Tensor indices = torch::tensor({index}).to(torch::kI32);
2529
auto indices_out = torch_tensorrt::core::conversion::converters::tensor_to_const(ctx, indices);
@@ -30,15 +34,15 @@ nvinfer1::ITensor* index_layer(ConversionCtx* ctx, const torch::jit::Node* n, nv
3034
return indexed_tensor;
3135
}
3236

33-
c10::IValue dynamic_size_layer(ConversionCtx* ctx, const torch::jit::Node* n, kwargs& args){
37+
c10::IValue dynamic_size_layer(ConversionCtx* ctx, const torch::jit::Node* n, kwargs& args) {
3438
LOG_DEBUG("Using dynamic version of aten::size evaluator");
3539
auto in = args.at(n->input(0)).ITensorOrFreeze(ctx);
3640
LOG_DEBUG("Input dimensions: " << in->getDimensions());
3741
auto shape_layer = ctx->net->addShape(*in);
3842
TORCHTRT_CHECK(shape_layer, "Unable to create shape layer from node: " << *n);
3943
auto shape_1d_tensor = shape_layer->getOutput(0);
4044

41-
if (n->inputs().size() != 1){
45+
if (n->inputs().size() != 1) {
4246
auto maxDim = static_cast<int64_t>(in->getDimensions().nbDims);
4347
auto dim = args.at(n->input(1)).unwrapToInt();
4448
// Handle negative axis by refering to nbDims of input Tensor
@@ -306,7 +310,7 @@ auto aten_registrations TORCHTRT_UNUSED =
306310
if (n->inputs().size() == 1) {
307311
if (tensor_var.isITensor()) {
308312
auto tensor = tensor_var.ITensor();
309-
if (ctx->input_is_dynamic){
313+
if (ctx->input_is_dynamic) {
310314
return dynamic_size_layer(ctx, n, args);
311315
}
312316
return util::toVec(tensor->getDimensions());
@@ -322,7 +326,7 @@ auto aten_registrations TORCHTRT_UNUSED =
322326
} else {
323327
auto dim = args.at(n->input(1)).unwrapToInt();
324328
if (tensor_var.isITensor()) {
325-
if (ctx->input_is_dynamic){
329+
if (ctx->input_is_dynamic) {
326330
return dynamic_size_layer(ctx, n, args);
327331
}
328332
auto tensor = tensor_var.ITensor();
@@ -359,14 +363,14 @@ auto aten_registrations TORCHTRT_UNUSED =
359363
[](ConversionCtx* ctx, const torch::jit::Node* n, kwargs& args) -> c10::optional<torch::jit::IValue> {
360364
auto list_input = args.at(n->input(0));
361365
auto idx = args.at(n->input(1)).unwrapToInt();
362-
if (list_input.isIValue()){
363-
auto list = args.at(n->input(0)).IValue()->to<c10::List<c10::IValue>>();
364-
const int64_t list_size = list.size();
365-
const int64_t normalized_idx = normalizeIndex(idx, list_size);
366-
TORCHTRT_CHECK(
367-
normalized_idx >= 0 || normalized_idx < list_size, "List index out of range (aten::__getitem__)");
368-
return list.get(normalized_idx);
369-
} else if(list_input.isITensor()){
366+
if (list_input.isIValue()) {
367+
auto list = args.at(n->input(0)).IValue()->to<c10::List<c10::IValue>>();
368+
const int64_t list_size = list.size();
369+
const int64_t normalized_idx = normalizeIndex(idx, list_size);
370+
TORCHTRT_CHECK(
371+
normalized_idx >= 0 || normalized_idx < list_size, "List index out of range (aten::__getitem__)");
372+
return list.get(normalized_idx);
373+
} else if (list_input.isITensor()) {
370374
auto indexed_tensor = index_layer(ctx, n, list_input.ITensorOrFreeze(ctx), idx);
371375
auto tensor_holder = TensorContainer();
372376
tensor_holder.hold_tensor(indexed_tensor);

0 commit comments

Comments
 (0)