Skip to content

Commit acfdbf0

Browse files
committed
enable ner analysis test and refine lac
1 parent df0c695 commit acfdbf0

File tree

6 files changed

+74
-39
lines changed

6 files changed

+74
-39
lines changed

paddle/fluid/inference/analysis/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ if (NOT EXISTS ${CHINESE_NER_INSTALL_DIR} AND WITH_TESTING AND WITH_INFERENCE)
8181
endif()
8282

8383
inference_analysis_test(test_analyzer_ner SRCS analyzer_ner_tester.cc
84-
EXTRA_DEPS paddle_inference_api paddle_fluid_api
84+
EXTRA_DEPS paddle_inference_api paddle_fluid_api analysis_predictor
8585
ARGS --infer_model=${CHINESE_NER_INSTALL_DIR}/model
8686
--infer_data=${CHINESE_NER_INSTALL_DIR}/data.txt)
8787

paddle/fluid/inference/analysis/analyzer_lac_tester.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,9 @@
1515
#include "paddle/fluid/inference/analysis/analyzer.h"
1616
#include <gtest/gtest.h>
1717
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
18-
#include "paddle/fluid/framework/ir/pass.h"
19-
#include "paddle/fluid/inference/analysis/ut_helper.h"
2018
#include "paddle/fluid/inference/api/analysis_predictor.h"
2119
#include "paddle/fluid/inference/api/helper.h"
22-
#include "paddle/fluid/inference/api/paddle_inference_api.h"
20+
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
2321
#include "paddle/fluid/platform/profiler.h"
2422

2523
DEFINE_string(infer_model, "", "model path for LAC");
@@ -160,7 +158,7 @@ void TestLACPrediction(const std::string &model_path,
160158
config.use_gpu = false;
161159
config.device = 0;
162160
config.specify_input_name = true;
163-
std::vector<PaddleTensor> input_slots, outputs_slots, ref_outputs_slots;
161+
std::vector<PaddleTensor> input_slots, outputs_slots;
164162
DataRecord data(data_file, batch_size);
165163
GetOneBatch(&input_slots, &data, batch_size);
166164
std::unique_ptr<PaddlePredictor> predictor;
@@ -217,6 +215,7 @@ void TestLACPrediction(const std::string &model_path,
217215
// run once for comparion as reference
218216
auto ref_predictor =
219217
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
218+
std::vector<PaddleTensor> ref_outputs_slots;
220219
ref_predictor->Run(input_slots, &ref_outputs_slots);
221220
EXPECT_EQ(ref_outputs_slots.size(), outputs_slots.size());
222221
auto &ref_out = ref_outputs_slots[0];
@@ -246,9 +245,10 @@ void TestLACPrediction(const std::string &model_path,
246245
}
247246
LOG(INFO) << "has num ops: " << num_ops;
248247
ASSERT_TRUE(fuse_statis.count("fc_fuse"));
249-
// ASSERT_TRUE(fuse_statis.count("fc_gru_fuse"));
250-
LOG(INFO) << "fc fuse num:" << fuse_statis.at("fc_fuse");
251-
// LOG(INFO) << "fc gru fuse num:" << fuse_statis.at("fc_gru_fuse");
248+
ASSERT_TRUE(fuse_statis.count("fc_gru_fuse"));
249+
EXPECT_EQ(fuse_statis.at("fc_fuse"), 1);
250+
EXPECT_EQ(fuse_statis.at("fc_gru_fuse"), 4);
251+
EXPECT_EQ(num_ops, 11);
252252
}
253253
}
254254

paddle/fluid/inference/analysis/analyzer_ner_tester.cc

Lines changed: 63 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,11 @@
1313
// limitations under the License.
1414

1515
#include "paddle/fluid/inference/analysis/analyzer.h"
16-
#include <google/protobuf/text_format.h>
1716
#include <gtest/gtest.h>
18-
#include "paddle/fluid/framework/ir/pass.h"
19-
#include "paddle/fluid/inference/analysis/ut_helper.h"
17+
#include "paddle/fluid/framework/ir/fuse_pass_base.h"
18+
#include "paddle/fluid/inference/api/analysis_predictor.h"
2019
#include "paddle/fluid/inference/api/helper.h"
21-
#include "paddle/fluid/inference/api/paddle_inference_api.h"
20+
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
2221
#include "paddle/fluid/platform/profiler.h"
2322

2423
DEFINE_string(infer_model, "", "model path");
@@ -112,19 +111,31 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
112111
const int chinese_ner_result_data[] = {30, 45, 41, 48, 17, 26,
113112
48, 39, 38, 16, 25};
114113

115-
void TestChineseNERPrediction() {
114+
void TestChineseNERPrediction(bool use_analysis) {
116115
NativeConfig config;
117116
config.prog_file = FLAGS_infer_model + "/__model__";
118117
config.param_file = FLAGS_infer_model + "/param";
119118
config.use_gpu = false;
120119
config.device = 0;
121120
config.specify_input_name = true;
122121

123-
auto predictor =
124-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
125-
std::vector<PaddleTensor> input_slots;
126-
std::vector<PaddleTensor> outputs;
122+
std::vector<PaddleTensor> input_slots, outputs;
123+
std::unique_ptr<PaddlePredictor> predictor;
127124
Timer timer;
125+
if (use_analysis) {
126+
AnalysisConfig cfg;
127+
cfg.prog_file = FLAGS_infer_model + "/__model__";
128+
cfg.param_file = FLAGS_infer_model + "/param";
129+
cfg.use_gpu = false;
130+
cfg.device = 0;
131+
cfg.specify_input_name = true;
132+
cfg.enable_ir_optim = true;
133+
predictor =
134+
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(cfg);
135+
} else {
136+
predictor =
137+
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
138+
}
128139

129140
if (FLAGS_test_all_data) {
130141
LOG(INFO) << "test all data";
@@ -165,10 +176,51 @@ void TestChineseNERPrediction() {
165176
for (size_t i = 0; i < std::min(11UL, size); i++) {
166177
PADDLE_ENFORCE(result[i], chinese_ner_result_data[i]);
167178
}
179+
180+
if (use_analysis) {
181+
// run once for comparion as reference
182+
auto ref_predictor =
183+
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
184+
std::vector<PaddleTensor> ref_outputs_slots;
185+
ref_predictor->Run(input_slots, &ref_outputs_slots);
186+
EXPECT_EQ(ref_outputs_slots.size(), outputs.size());
187+
auto &ref_out = ref_outputs_slots[0];
188+
size_t ref_size =
189+
std::accumulate(ref_out.shape.begin(), ref_out.shape.end(), 1,
190+
[](int a, int b) { return a * b; });
191+
EXPECT_EQ(size, ref_size);
192+
int64_t *pdata_ref = static_cast<int64_t *>(ref_out.data.data());
193+
for (size_t i = 0; i < size; ++i) {
194+
EXPECT_EQ(pdata_ref[i], result[i]);
195+
}
196+
197+
AnalysisPredictor *analysis_predictor =
198+
dynamic_cast<AnalysisPredictor *>(predictor.get());
199+
auto &fuse_statis = analysis_predictor->analysis_argument()
200+
.Get<std::unordered_map<std::string, int>>(
201+
framework::ir::kFuseStatisAttr);
202+
for (auto &item : fuse_statis) {
203+
LOG(INFO) << "fused " << item.first << " " << item.second;
204+
}
205+
int num_ops = 0;
206+
for (auto &node :
207+
analysis_predictor->analysis_argument().main_dfg->nodes.nodes()) {
208+
if (node->IsFunction()) {
209+
++num_ops;
210+
}
211+
}
212+
LOG(INFO) << "has num ops: " << num_ops;
213+
ASSERT_TRUE(fuse_statis.count("fc_fuse"));
214+
ASSERT_TRUE(fuse_statis.count("fc_gru_fuse"));
215+
EXPECT_EQ(fuse_statis.at("fc_fuse"), 1);
216+
EXPECT_EQ(fuse_statis.at("fc_gru_fuse"), 2);
217+
EXPECT_EQ(num_ops, 14);
218+
}
168219
}
169220

170-
// Directly infer with the original model.
171-
TEST(Analyzer, Chinese_ner) { TestChineseNERPrediction(); }
221+
TEST(Analyzer_Chinese_ner, native) { TestChineseNERPrediction(false); }
222+
223+
TEST(Analyzer_Chinese_ner, analysis) { TestChineseNERPrediction(true); }
172224

173225
} // namespace inference
174226
} // namespace paddle

paddle/fluid/inference/analysis/analyzer_tester.cc

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,6 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir,
283283

284284
base_predictor->Run(input_slots, &base_outputs);
285285

286-
LOG(INFO) << "===========profile result===========";
287286
if (num_threads == 1) {
288287
// Prepare inputs.
289288
Timer timer;
@@ -324,7 +323,6 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir,
324323
threads[i].join();
325324
}
326325
}
327-
LOG(INFO) << "=====================================";
328326

329327
if (use_analysis && activate_ir) {
330328
AnalysisPredictor *analysis_predictor =

paddle/fluid/inference/api/CMakeLists.txt

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -44,20 +44,7 @@ function(inference_api_test TARGET_NAME)
4444
endfunction(inference_api_test)
4545

4646
cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS lod_tensor)
47-
cc_library(analysis_predictor SRCS analysis_predictor.cc DEPS paddle_inference_api
48-
analysis
49-
ir_pass_manager
50-
pass
51-
fc_fuse_pass
52-
fc_lstm_fuse_pass
53-
fc_gru_fuse_pass
54-
seq_concat_fc_fuse_pass
55-
graph_viz_pass
56-
infer_clean_graph_pass
57-
graph_pattern_detector
58-
infer_clean_graph_pass
59-
attention_lstm_fuse_pass)
60-
47+
cc_library(analysis_predictor SRCS analysis_predictor.cc DEPS paddle_inference_api analysis)
6148
cc_test(test_paddle_inference_api
6249
SRCS api_tester.cc
6350
DEPS paddle_inference_api)

paddle/fluid/inference/api/helper.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -124,11 +124,9 @@ std::string DescribeTensor(const PaddleTensor &tensor) {
124124

125125
void PrintTime(int batch_size, int repeat, int num_threads, int tid,
126126
double latency) {
127-
LOG(INFO) << "=====================================";
128-
LOG(INFO) << "batch_size: " << batch_size << ", repeat: " << repeat
127+
LOG(INFO) << "====== batch_size: " << batch_size << ", repeat: " << repeat
129128
<< ", threads: " << num_threads << ", thread id: " << tid
130-
<< ", latency: " << latency << "ms";
131-
LOG(INFO) << "=====================================";
129+
<< ", latency: " << latency << "ms ======";
132130
}
133131

134132
} // namespace inference

0 commit comments

Comments
 (0)