Skip to content

Commit 86b99ac

Browse files
committed
fix comments and fix bug
1 parent 9d98ca0 commit 86b99ac

File tree

5 files changed

+22
-9
lines changed

5 files changed

+22
-9
lines changed

paddle/fluid/inference/tensorrt/convert/conv2d_op.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ namespace paddle {
1818
namespace inference {
1919
namespace tensorrt {
2020

21-
bool if_skip_merging_optimize(TensorRTEngine* engine_,
21+
bool to_skip_merging_optimize(TensorRTEngine* engine_,
2222
const std::vector<int>& filters,
2323
const std::vector<int>& strides,
2424
const std::vector<int>& paddings,
@@ -101,7 +101,7 @@ class Conv2dOpConverter : public OpConverter {
101101
engine_->SetITensor(output_name, layer->getOutput(0));
102102

103103
if (test_mode ||
104-
if_skip_merging_optimize(engine_, {filter_h, filter_w}, strides,
104+
to_skip_merging_optimize(engine_, {filter_h, filter_w}, strides,
105105
paddings, op_desc.Input("Input").front())) {
106106
engine_->DeclareOutput(output_name);
107107
}

paddle/fluid/inference/tensorrt/engine.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,10 @@ void TensorRTEngine::DeclareOutput(const nvinfer1::ILayer *layer, int offset,
133133
buffer_sizes_[name] = 0;
134134
}
135135

136+
bool TensorRTEngine::HasDeclared(const std::string &name) {
137+
return buffer_sizes_.count(name) > 0;
138+
}
139+
136140
void TensorRTEngine::DeclareOutput(const std::string &name) {
137141
PADDLE_ENFORCE_EQ(0, buffer_sizes_.count(name), "duplicate output name %s",
138142
name);

paddle/fluid/inference/tensorrt/engine.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,8 @@ class TensorRTEngine : public EngineBase {
9191
const std::string& name);
9292
// Set the itensor_map_[name] as the network's output, and set its name.
9393
void DeclareOutput(const std::string& name);
94+
// Check if the ITensor has been declared
95+
bool HasDeclared(const std::string& name);
9496

9597
// GPU memory address for an ITensor with specific name. One can operate on
9698
// these memory directly for acceleration, for example, output the converted

paddle/fluid/inference/tests/api/trt_models_tester.cc

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -96,11 +96,16 @@ void CompareTensorRTWithFluid(int batch_size, std::string model_dirname) {
9696
}
9797
}
9898

99-
TEST(trt_models_test, main) {
100-
std::vector<std::string> infer_models = {"mobilenet", "resnet50",
101-
"resnext50"};
102-
for (auto &model_dir : infer_models) {
103-
CompareTensorRTWithFluid(1, FLAGS_dirname + "/" + model_dir);
104-
}
99+
TEST(trt_models_test, mobilenet) {
100+
CompareTensorRTWithFluid(1, FLAGS_dirname + "/mobilenet");
101+
}
102+
103+
TEST(trt_models_test, resnet50) {
104+
CompareTensorRTWithFluid(1, FLAGS_dirname + "/resnet50");
105105
}
106+
107+
TEST(trt_models_test, resnext50) {
108+
CompareTensorRTWithFluid(1, FLAGS_dirname + "/resnext50");
109+
}
110+
106111
} // namespace paddle

paddle/fluid/operators/tensorrt_engine_op.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,9 @@ class TensorRTEngineKernel : public framework::OpKernel<T> {
223223

224224
// Add outputs
225225
for (auto& output : output_maps) {
226-
engine->DeclareOutput(output);
226+
if (!engine->HasDeclared(output)) {
227+
engine->DeclareOutput(output);
228+
}
227229
}
228230

229231
engine->FreezeNetwork();

0 commit comments

Comments
 (0)