Skip to content

Commit ebfb086

Browse files
committed
[fix]: fix bug in aten::to, when network only have aten::to layer will change input name
Signed-off-by: inocsin <[email protected]>
1 parent c2fb43b commit ebfb086

File tree

2 files changed

+45
-15
lines changed

2 files changed

+45
-15
lines changed

core/conversion/converters/converter_util.cpp

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -129,24 +129,24 @@ nvinfer1::ITensor* applyIdentityOp(ConversionCtx* ctx, nvinfer1::ITensor* tensor
129129
}
130130

131131
nvinfer1::ITensor* castITensor(ConversionCtx* ctx, nvinfer1::ITensor* tensor, nvinfer1::DataType dtype) {
132-
if (tensor->getType() != dtype) {
133-
std::ostringstream tensor_id;
134-
tensor_id << reinterpret_cast<int*>(tensor);
132+
// No matter whether tensor->getType() == dtype, identity layer is always needed.
133+
// Otherwise will change the input tensor name in aten::to converter by AssociateValueAndTensor function
134+
// When the input of aten::to is network input, will cause error
135+
std::ostringstream tensor_id;
136+
tensor_id << reinterpret_cast<int*>(tensor);
135137

136-
auto id_layer = ctx->net->addIdentity(*tensor);
137-
TORCHTRT_CHECK(id_layer, "Unable to create identity layer for ITensor: " << tensor_id.str());
138-
auto casted_tensor = id_layer->getOutput(0);
139-
casted_tensor->setType(dtype);
138+
auto id_layer = ctx->net->addIdentity(*tensor);
139+
TORCHTRT_CHECK(id_layer, "Unable to create identity layer for ITensor: " << tensor_id.str());
140+
auto casted_tensor = id_layer->getOutput(0);
141+
casted_tensor->setType(dtype);
140142

141-
LOG_DEBUG(ctx->logger, "Casting ITensor " << tensor_id.str() << " from " << tensor->getType() << " to " << dtype);
143+
LOG_DEBUG(ctx->logger, "Casting ITensor " << tensor_id.str() << " from " << tensor->getType() << " to " << dtype);
144+
145+
std::stringstream ss;
146+
ss << "[Cast ITensor " << tensor_id.str() << " from " << tensor->getType() << " to " << dtype << "]";
147+
id_layer->setName(ss.str().c_str());
148+
return casted_tensor;
142149

143-
std::stringstream ss;
144-
ss << "[Cast ITensor " << tensor_id.str() << " from " << tensor->getType() << " to " << dtype << "]";
145-
id_layer->setName(ss.str().c_str());
146-
return casted_tensor;
147-
} else {
148-
return tensor;
149-
}
150150
}
151151

152152
nvinfer1::ITensor* tensor_to_const(ConversionCtx* ctx, at::Tensor t, const std::string& name) {

tests/core/conversion/converters/test_cast.cpp

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,36 @@ TEST(Converters, ATenBoolToINT32TensorConvertsCorrectly) {
135135
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt, 2e-6));
136136
}
137137

138+
139+
TEST(Converters, ATenToSingleConvertsCorrectly) {
140+
const auto graph = R"IR(
141+
graph(%y.1 : Tensor):
142+
%4 : int = prim::Constant[value=6]()
143+
%5 : bool = prim::Constant[value=0]()
144+
%6 : None = prim::Constant()
145+
%y0.1 : Tensor = aten::to(%y.1, %4, %5, %5, %6)
146+
return (%y0.1))IR";
147+
148+
auto g = std::make_shared<torch::jit::Graph>();
149+
150+
torch::jit::parseIR(graph, &*g);
151+
152+
auto in = at::randint(1, 10, {3}, {at::kCUDA});
153+
154+
auto jit_in = at::clone(in);
155+
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
156+
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in});
157+
158+
auto trt_in = at::clone(in);
159+
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
160+
auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {trt_in});
161+
162+
auto trt = trt_results[0].reshape(jit_results[0].sizes());
163+
ASSERT_TRUE(jit_results[0].scalar_type() == trt.scalar_type());
164+
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt, 2e-6));
165+
}
166+
167+
138168
TEST(Converters, ATenTypeAsConvertsCorrectly) {
139169
const auto graph = R"IR(
140170
graph(%0 : Tensor,

0 commit comments

Comments
 (0)