Skip to content

Commit 8013293

Browse files
authored
Merge pull request #14281 from luotao1/face
refine analysis_resnet50_tester
2 parents e0c8397 + eea3673 commit 8013293

File tree

6 files changed

+56
-54
lines changed

6 files changed

+56
-54
lines changed

paddle/fluid/inference/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
if(WITH_TESTING)
2-
include(test.cmake) # some generic cmake funtion for inference
2+
include(tests/test.cmake) # some generic cmake funtion for inference
33
endif()
44
# analysis and tensorrt must be added before creating static library,
55
# otherwise, there would be undefined reference to them in static library.

paddle/fluid/inference/tests/api/CMakeLists.txt

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,11 @@
11
set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_fluid_api ir_pass_manager analysis_predictor)
22

3+
function(download_model install_dir model_name)
4+
if (NOT EXISTS ${install_dir})
5+
inference_download_and_uncompress(${install_dir} ${INFERENCE_URL} ${model_name})
6+
endif()
7+
endfunction()
8+
39
function(download_model_and_data install_dir model_name data_name)
410
if (NOT EXISTS ${install_dir})
511
inference_download_and_uncompress(${install_dir} ${INFERENCE_URL} ${model_name})
@@ -13,6 +19,13 @@ function(inference_analysis_api_test target install_dir filename)
1319
ARGS --infer_model=${install_dir}/model --infer_data=${install_dir}/data.txt)
1420
endfunction()
1521

22+
function(inference_analysis_api_test_with_fake_data target install_dir filename model_name)
23+
download_model(${install_dir} ${model_name})
24+
inference_analysis_test(${target} SRCS ${filename}
25+
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}
26+
ARGS --infer_model=${install_dir}/model)
27+
endfunction()
28+
1629
# RNN1
1730
if(NOT APPLE)
1831
set(RNN1_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/rnn1")
@@ -61,17 +74,13 @@ inference_analysis_api_test(test_analyzer_seq_conv1 ${SEQ_CONV1_INSTALL_DIR} ana
6174
# ocr
6275
set(OCR_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/ocr")
6376
if (NOT EXISTS ${OCR_INSTALL_DIR})
64-
inference_download_and_uncompress(${OCR_INSTALL_DIR} "http://paddlemodels.cdn.bcebos.com/" "inference-vis-demos%2Focr.tar.gz")
77+
inference_download_and_uncompress(${OCR_INSTALL_DIR} "http://paddlemodels.cdn.bcebos.com/" "inference-vis-demos%2Focr.tar.gz")
6578
endif()
6679
inference_analysis_api_test(test_analyzer_ocr ${OCR_INSTALL_DIR} analyzer_vis_tester.cc)
6780

6881
# resnet50
69-
set(RESNET50_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/resnet50")
70-
if (NOT EXISTS ${RESNET50_INSTALL_DIR})
71-
inference_download_and_uncompress(${RESNET50_INSTALL_DIR} ${INFERENCE_URL} "resnet50_model.tar.gz")
72-
endif()
73-
inference_analysis_test(test_analyzer_resnet50 SRCS analyzer_resnet50_tester.cc
74-
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} ARGS --infer_model=${RESNET50_INSTALL_DIR}/model)
82+
inference_analysis_api_test_with_fake_data(test_analyzer_resnet50
83+
"${INFERENCE_DEMO_INSTALL_DIR}/resnet50" analyzer_resnet50_tester.cc "resnet50_model.tar.gz")
7584

7685
# anakin
7786
if (WITH_ANAKIN AND WITH_MKL) # only needed in CI

paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc

Lines changed: 2 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -30,25 +30,7 @@ void SetConfig(AnalysisConfig *cfg) {
3030
}
3131

3232
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
33-
PADDLE_ENFORCE_EQ(FLAGS_test_all_data, 0, "Only have single batch of data.");
34-
35-
PaddleTensor input;
36-
// channel=3, height/width=318
37-
std::vector<int> shape({FLAGS_batch_size, 3, 318, 318});
38-
input.shape = shape;
39-
input.dtype = PaddleDType::FLOAT32;
40-
41-
// fill input data, for profile easily, do not use random data here.
42-
size_t size = FLAGS_batch_size * 3 * 318 * 318;
43-
input.data.Resize(size * sizeof(float));
44-
float *input_data = static_cast<float *>(input.data.data());
45-
for (size_t i = 0; i < size; i++) {
46-
*(input_data + i) = static_cast<float>(i) / size;
47-
}
48-
49-
std::vector<PaddleTensor> input_slots;
50-
input_slots.assign({input});
51-
(*inputs).emplace_back(input_slots);
33+
SetFakeImageInput(inputs, FLAGS_infer_model);
5234
}
5335

5436
// Easy for profiling independently.
@@ -61,13 +43,6 @@ void profile(bool use_mkldnn = false) {
6143
std::vector<std::vector<PaddleTensor>> input_slots_all;
6244
SetInput(&input_slots_all);
6345
TestPrediction(cfg, input_slots_all, &outputs, FLAGS_num_threads);
64-
65-
if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) {
66-
PADDLE_ENFORCE_EQ(outputs.size(), 1UL);
67-
size_t size = GetSize(outputs[0]);
68-
// output is a 512-dimension feature
69-
EXPECT_EQ(size, 512 * FLAGS_batch_size);
70-
}
7146
}
7247

7348
TEST(Analyzer_resnet50, profile) { profile(); }
@@ -83,8 +58,7 @@ TEST(Analyzer_resnet50, fuse_statis) {
8358
auto predictor = CreatePaddlePredictor<AnalysisConfig>(cfg);
8459
auto fuse_statis = GetFuseStatis(
8560
static_cast<AnalysisPredictor *>(predictor.get()), &num_ops);
86-
ASSERT_TRUE(fuse_statis.count("fc_fuse"));
87-
EXPECT_EQ(fuse_statis.at("fc_fuse"), 1);
61+
LOG(INFO) << "num_ops: " << num_ops;
8862
}
8963

9064
// Compare result of NativeConfig and AnalysisConfig

paddle/fluid/inference/tests/api/tester_helper.h

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
#include "paddle/fluid/inference/api/analysis_predictor.h"
2626
#include "paddle/fluid/inference/api/helper.h"
2727
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
28+
#include "paddle/fluid/inference/tests/test_helper.h"
2829
#include "paddle/fluid/platform/profiler.h"
2930

3031
DEFINE_string(infer_model, "", "model path");
@@ -105,6 +106,34 @@ std::unordered_map<std::string, int> GetFuseStatis(PaddlePredictor *predictor,
105106
return fuse_statis;
106107
}
107108

109+
void SetFakeImageInput(std::vector<std::vector<PaddleTensor>> *inputs,
110+
const std::string &dirname) {
111+
// Set fake_image_data
112+
PADDLE_ENFORCE_EQ(FLAGS_test_all_data, 0, "Only have single batch of data.");
113+
std::vector<std::vector<int64_t>> feed_target_shapes =
114+
GetFeedTargetShapes(dirname, true, "model", "params");
115+
int dim1 = feed_target_shapes[0][1];
116+
int dim2 = feed_target_shapes[0][2];
117+
int dim3 = feed_target_shapes[0][3];
118+
119+
PaddleTensor input;
120+
std::vector<int> shape({FLAGS_batch_size, dim1, dim2, dim3});
121+
input.shape = shape;
122+
input.dtype = PaddleDType::FLOAT32;
123+
124+
// fill input data, for profile easily, do not use random data here.
125+
size_t size = FLAGS_batch_size * dim1 * dim2 * dim3;
126+
input.data.Resize(size * sizeof(float));
127+
float *input_data = static_cast<float *>(input.data.data());
128+
for (size_t i = 0; i < size; i++) {
129+
*(input_data + i) = static_cast<float>(i) / size;
130+
}
131+
132+
std::vector<PaddleTensor> input_slots;
133+
input_slots.assign({input});
134+
(*inputs).emplace_back(input_slots);
135+
}
136+
108137
void TestOneThreadPrediction(
109138
const AnalysisConfig &config,
110139
const std::vector<std::vector<PaddleTensor>> &inputs,

paddle/fluid/inference/tests/test_helper.h

Lines changed: 8 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ limitations under the License. */
1818
#include <string>
1919
#include <vector>
2020

21-
#include "paddle/fluid/framework/ir/graph_to_program_pass.h"
2221
#include "paddle/fluid/framework/lod_tensor.h"
2322
#include "paddle/fluid/inference/io.h"
2423
#include "paddle/fluid/platform/profiler.h"
@@ -94,15 +93,15 @@ void CheckError(const paddle::framework::LoDTensor& output1,
9493

9594
std::unique_ptr<paddle::framework::ProgramDesc> InitProgram(
9695
paddle::framework::Executor* executor, paddle::framework::Scope* scope,
97-
const std::string& dirname, const bool is_combined = false) {
96+
const std::string& dirname, const bool is_combined = false,
97+
const std::string& prog_filename = "__model_combined__",
98+
const std::string& param_filename = "__params_combined__") {
9899
std::unique_ptr<paddle::framework::ProgramDesc> inference_program;
99100
if (is_combined) {
100101
// All parameters are saved in a single file.
101102
// Hard-coding the file names of program and parameters in unittest.
102103
// The file names should be consistent with that used in Python API
103104
// `fluid.io.save_inference_model`.
104-
std::string prog_filename = "__model_combined__";
105-
std::string param_filename = "__params_combined__";
106105
inference_program =
107106
paddle::inference::Load(executor, scope, dirname + "/" + prog_filename,
108107
dirname + "/" + param_filename);
@@ -115,12 +114,15 @@ std::unique_ptr<paddle::framework::ProgramDesc> InitProgram(
115114
}
116115

117116
std::vector<std::vector<int64_t>> GetFeedTargetShapes(
118-
const std::string& dirname, const bool is_combined = false) {
117+
const std::string& dirname, const bool is_combined = false,
118+
const std::string& prog_filename = "__model_combined__",
119+
const std::string& param_filename = "__params_combined__") {
119120
auto place = paddle::platform::CPUPlace();
120121
auto executor = paddle::framework::Executor(place);
121122
auto* scope = new paddle::framework::Scope();
122123

123-
auto inference_program = InitProgram(&executor, scope, dirname, is_combined);
124+
auto inference_program = InitProgram(&executor, scope, dirname, is_combined,
125+
prog_filename, param_filename);
124126
auto& global_block = inference_program->Block(0);
125127

126128
const std::vector<std::string>& feed_target_names =
@@ -136,15 +138,6 @@ std::vector<std::vector<int64_t>> GetFeedTargetShapes(
136138
return feed_target_shapes;
137139
}
138140

139-
void Compile(paddle::framework::ProgramDesc* program) {
140-
std::unique_ptr<paddle::framework::ir::Graph> g(
141-
new paddle::framework::ir::Graph(*program));
142-
auto pass = paddle::framework::ir::PassRegistry::Instance().Get(
143-
"graph_to_program_pass");
144-
pass->SetNotOwned<paddle::framework::ProgramDesc>("program", program);
145-
pass->Apply(std::move(g));
146-
}
147-
148141
template <typename Place, bool CreateVars = true, bool PrepareContext = false>
149142
void TestInference(const std::string& dirname,
150143
const std::vector<paddle::framework::LoDTensor*>& cpu_feeds,
@@ -182,7 +175,6 @@ void TestInference(const std::string& dirname,
182175
paddle::platform::DeviceContextPool::Instance().Get(place));
183176
inference_program = InitProgram(&executor, scope, dirname, is_combined);
184177
}
185-
Compile(inference_program.get());
186178

187179
// Disable the profiler and print the timing information
188180
paddle::platform::DisableProfiler(paddle::platform::EventSortingKey::kDefault,
@@ -261,5 +253,3 @@ void TestInference(const std::string& dirname,
261253

262254
delete scope;
263255
}
264-
265-
USE_PASS(graph_to_program_pass);

0 commit comments

Comments
 (0)