Skip to content

Commit 90bc14d

Browse files
authored
simple fix on inference tester helper (#13507)
1 parent a54c423 commit 90bc14d

File tree

2 files changed

+14
-13
lines changed

2 files changed

+14
-13
lines changed

paddle/fluid/inference/api/api_impl.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,9 @@
2020
#include <string>
2121
#include <vector>
2222

23-
#include "paddle/fluid/inference/api/paddle_inference_api.h"
24-
2523
#include "paddle/fluid/framework/ddim.h"
2624
#include "paddle/fluid/framework/lod_tensor.h"
25+
#include "paddle/fluid/inference/api/paddle_inference_api.h"
2726
#include "paddle/fluid/inference/io.h"
2827
#include "paddle/fluid/platform/init.h"
2928
#include "paddle/fluid/platform/profiler.h"

paddle/fluid/inference/tests/api/tester_helper.h

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,8 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
7474
}
7575
}
7676

77-
std::unique_ptr<PaddlePredictor> GetPrediction(AnalysisConfig config,
78-
bool use_analysis = true) {
77+
std::unique_ptr<PaddlePredictor> CreateTestPredictor(
78+
const AnalysisConfig &config, bool use_analysis = true) {
7979
if (use_analysis) {
8080
return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
8181
config);
@@ -92,7 +92,7 @@ size_t GetSize(const PaddleTensor &out) {
9292

9393
std::unordered_map<std::string, int> GetFuseStatis(AnalysisConfig config,
9494
int *num_ops) {
95-
auto predictor = GetPrediction(config);
95+
auto predictor = CreateTestPredictor(config);
9696
AnalysisPredictor *analysis_predictor =
9797
dynamic_cast<AnalysisPredictor *>(predictor.get());
9898
auto &fuse_statis = analysis_predictor->analysis_argument()
@@ -113,11 +113,12 @@ std::unordered_map<std::string, int> GetFuseStatis(AnalysisConfig config,
113113
}
114114

115115
void TestOneThreadPrediction(
116-
AnalysisConfig config, const std::vector<std::vector<PaddleTensor>> inputs,
116+
const AnalysisConfig &config,
117+
const std::vector<std::vector<PaddleTensor>> &inputs,
117118
std::vector<PaddleTensor> *outputs, bool use_analysis = true) {
118119
int batch_size = FLAGS_batch_size;
119120
int num_times = FLAGS_repeat;
120-
auto predictor = GetPrediction(config, use_analysis);
121+
auto predictor = CreateTestPredictor(config, use_analysis);
121122
Timer timer;
122123
timer.tic();
123124
for (int i = 0; i < num_times; i++) {
@@ -130,7 +131,8 @@ void TestOneThreadPrediction(
130131
}
131132

132133
void TestMultiThreadPrediction(
133-
AnalysisConfig config, const std::vector<std::vector<PaddleTensor>> inputs,
134+
const AnalysisConfig &config,
135+
const std::vector<std::vector<PaddleTensor>> &inputs,
134136
std::vector<PaddleTensor> *outputs, int num_threads,
135137
bool use_analysis = true) {
136138
int batch_size = FLAGS_batch_size;
@@ -140,7 +142,7 @@ void TestMultiThreadPrediction(
140142
// TODO(yanchunwei): Bug here, the analyzer phase can't be parallelled
141143
// because AttentionLSTM's hard code nodeid will be damanged.
142144
for (int tid = 0; tid < num_threads; ++tid) {
143-
predictors.emplace_back(GetPrediction(config, use_analysis));
145+
predictors.emplace_back(CreateTestPredictor(config, use_analysis));
144146
}
145147
for (int tid = 0; tid < num_threads; ++tid) {
146148
threads.emplace_back([&, tid]() {
@@ -164,8 +166,8 @@ void TestMultiThreadPrediction(
164166
}
165167
}
166168

167-
void TestPrediction(AnalysisConfig config,
168-
const std::vector<std::vector<PaddleTensor>> inputs,
169+
void TestPrediction(const AnalysisConfig &config,
170+
const std::vector<std::vector<PaddleTensor>> &inputs,
169171
std::vector<PaddleTensor> *outputs, int num_threads,
170172
bool use_analysis = FLAGS_use_analysis) {
171173
LOG(INFO) << "use_analysis: " << use_analysis;
@@ -178,8 +180,8 @@ void TestPrediction(AnalysisConfig config,
178180
}
179181

180182
void CompareNativeAndAnalysis(
181-
AnalysisConfig config,
182-
const std::vector<std::vector<PaddleTensor>> inputs) {
183+
const AnalysisConfig &config,
184+
const std::vector<std::vector<PaddleTensor>> &inputs) {
183185
std::vector<PaddleTensor> native_outputs, analysis_outputs;
184186
TestOneThreadPrediction(config, inputs, &native_outputs, false);
185187
TestOneThreadPrediction(config, inputs, &analysis_outputs, true);

0 commit comments

Comments
 (0)