Skip to content

Commit 6ea9d1b

Browse files
committed
add analysis_predictor in vis_demo
test=develop
1 parent f444a72 commit 6ea9d1b

File tree

4 files changed

+15
-14
lines changed

4 files changed

+15
-14
lines changed

paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,7 @@ void Main(bool use_gpu) {
4242
config.use_gpu = use_gpu;
4343
config.fraction_of_gpu_memory = 0.15;
4444
config.device = 0;
45-
auto predictor =
46-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
45+
auto predictor = CreatePaddlePredictor<NativeConfig>(config);
4746

4847
for (int batch_id = 0; batch_id < 3; batch_id++) {
4948
//# 2. Prepare input.
@@ -85,8 +84,7 @@ void MainThreads(int num_threads, bool use_gpu) {
8584
config.use_gpu = use_gpu;
8685
config.fraction_of_gpu_memory = 0.15;
8786
config.device = 0;
88-
auto main_predictor =
89-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
87+
auto main_predictor = CreatePaddlePredictor<NativeConfig>(config);
9088

9189
std::vector<std::thread> threads;
9290
for (int tid = 0; tid < num_threads; ++tid) {

paddle/fluid/inference/api/demo_ci/vis_demo.cc

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,12 +34,13 @@ DEFINE_bool(use_gpu, false, "Whether use gpu.");
3434
namespace paddle {
3535
namespace demo {
3636

37+
using contrib::AnalysisConfig;
3738
/*
38-
* Use the native fluid engine to inference the demo.
39+
* Use the native and analysis fluid engine to inference the demo.
3940
*/
4041
void Main(bool use_gpu) {
41-
std::unique_ptr<PaddlePredictor> predictor;
42-
NativeConfig config;
42+
std::unique_ptr<PaddlePredictor> predictor, analysis_predictor;
43+
AnalysisConfig config;
4344
config.param_file = FLAGS_modeldir + "/__params__";
4445
config.prog_file = FLAGS_modeldir + "/__model__";
4546
config.use_gpu = use_gpu;
@@ -49,8 +50,8 @@ void Main(bool use_gpu) {
4950
}
5051

5152
VLOG(3) << "init predictor";
52-
predictor =
53-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
53+
predictor = CreatePaddlePredictor<NativeConfig>(config);
54+
analysis_predictor = CreatePaddlePredictor<AnalysisConfig>(config);
5455

5556
VLOG(3) << "begin to process data";
5657
// Just a single batch of data.
@@ -68,7 +69,7 @@ void Main(bool use_gpu) {
6869
input.dtype = PaddleDType::FLOAT32;
6970

7071
VLOG(3) << "run executor";
71-
std::vector<PaddleTensor> output;
72+
std::vector<PaddleTensor> output, analysis_output;
7273
predictor->Run({input}, &output, 1);
7374

7475
VLOG(3) << "output.size " << output.size();
@@ -77,6 +78,10 @@ void Main(bool use_gpu) {
7778

7879
// compare with reference result
7980
CheckOutput(FLAGS_refer, tensor);
81+
82+
// the analysis_output has some diff with native_output,
83+
// TODO(luotao): add CheckOutput for analysis_output later.
84+
analysis_predictor->Run({input}, &analysis_output, 1);
8085
}
8186

8287
} // namespace demo

paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -311,8 +311,7 @@ TEST(Analyzer_rnn1, ZeroCopy) {
311311
auto predictor = CreatePaddlePredictor<AnalysisConfig>(config);
312312

313313
config.use_feed_fetch_ops = true;
314-
auto native_predictor =
315-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
314+
auto native_predictor = CreatePaddlePredictor<NativeConfig>(config);
316315

317316
config.use_feed_fetch_ops = true; // the analysis predictor needs feed/fetch.
318317
auto analysis_predictor = CreatePaddlePredictor<AnalysisConfig>(config);

paddle/fluid/inference/tests/api/tester_helper.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,8 +79,7 @@ std::unique_ptr<PaddlePredictor> CreateTestPredictor(
7979
if (use_analysis) {
8080
return CreatePaddlePredictor<contrib::AnalysisConfig>(config);
8181
} else {
82-
return CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(
83-
config);
82+
return CreatePaddlePredictor<NativeConfig>(config);
8483
}
8584
}
8685

0 commit comments

Comments
 (0)