Skip to content

Commit 726f2ce

Browse files
authored
Fix bug of referencing a temporary variable. (#14614)
test=develop
1 parent db9284e commit 726f2ce

File tree

4 files changed

+11
-2
lines changed

4 files changed

+11
-2
lines changed

paddle/fluid/inference/api/analysis_predictor.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -284,6 +284,7 @@ bool AnalysisPredictor::GetFetch(std::vector<PaddleTensor> *outputs,
284284
framework::GetFetchVariable(*scope, "fetch", idx);
285285
auto type = fetch.type();
286286
auto output = &(outputs->at(i));
287+
output->name = fetchs_[idx]->Input("X")[0];
287288
if (type == typeid(float)) {
288289
GetFetchOne<float>(fetch, output);
289290
output->dtype = PaddleDType::FLOAT32;

paddle/fluid/inference/api/analysis_predictor.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ class AnalysisPredictor : public PaddlePredictor {
109109
std::map<std::string, size_t> feed_names_;
110110
std::vector<framework::OpDesc *> fetchs_;
111111
// Memory buffer for feed inputs. The temporary LoDTensor will cause serious
112-
// concurrency problems, so cache them.
112+
// concurrency problems, wrong results and memory leak, so cache them.
113113
std::vector<framework::LoDTensor> feed_tensors_;
114114
details::TensorArrayBatchCleaner tensor_array_batch_cleaner_;
115115

paddle/fluid/inference/api/api_impl.cc

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -185,8 +185,12 @@ bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
185185
<< inputs.size();
186186
return false;
187187
}
188+
189+
// Cache the inputs memory for better concurrency performance.
190+
feed_tensors_.resize(inputs.size());
191+
188192
for (size_t i = 0; i < inputs.size(); ++i) {
189-
framework::LoDTensor input;
193+
auto &input = feed_tensors_[i];
190194
framework::DDim ddim = framework::make_ddim(inputs[i].shape);
191195
void *input_ptr;
192196
if (inputs[i].dtype == PaddleDType::INT64) {
@@ -261,6 +265,7 @@ bool NativePaddlePredictor::GetFetch(std::vector<PaddleTensor> *outputs,
261265
framework::GetFetchVariable(*scope, "fetch", idx);
262266
auto type = fetch.type();
263267
auto output = &(outputs->at(i));
268+
output->name = fetchs_[idx]->Input("X")[0];
264269
if (type == typeid(float)) {
265270
GetFetchOne<float>(fetch, output);
266271
output->dtype = PaddleDType::FLOAT32;

paddle/fluid/inference/api/api_impl.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,9 @@ class NativePaddlePredictor : public PaddlePredictor {
6969
std::vector<framework::OpDesc *> feeds_;
7070
std::map<std::string, size_t> feed_names_;
7171
std::vector<framework::OpDesc *> fetchs_;
72+
// Memory buffer for feed inputs. The temporary LoDTensor will cause serious
73+
// concurrency problems, wrong results and memory leak, so cache them.
74+
std::vector<framework::LoDTensor> feed_tensors_;
7275
// Do not use unique_ptr, use parent scope to delete
7376
framework::Scope *sub_scope_{nullptr};
7477
details::TensorArrayBatchCleaner tensor_array_batch_cleaner_;

0 commit comments

Comments
 (0)