Skip to content

Commit 6ae7cbe

Browse files
committed
follow comments
1 parent 99d00cc commit 6ae7cbe

File tree

2 files changed

+13
-11
lines changed

2 files changed

+13
-11
lines changed

paddle/fluid/inference/tests/book/CMakeLists.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,9 @@ inference_test(recommender_system)
4040
inference_test(word2vec)
4141

4242
# This is an unly work around to make this test run
43+
# TODO(TJ): clean me up
4344
cc_test(test_inference_nlp
4445
SRCS test_inference_nlp.cc
4546
DEPS paddle_fluid
4647
ARGS
47-
--modelpath=${PADDLE_BINARY_DIR}/python/paddle/fluid/tests/book/recognize_digits_mlp.inference.model)
48+
--model_path=${PADDLE_BINARY_DIR}/python/paddle/fluid/tests/book/recognize_digits_mlp.inference.model)

paddle/fluid/inference/tests/book/test_inference_nlp.cc

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ limitations under the License. */
2424
#include <omp.h>
2525
#endif
2626

27-
DEFINE_string(modelpath, "", "Directory of the inference model.");
28-
DEFINE_string(datafile, "", "File of input index data.");
27+
DEFINE_string(model_path, "", "Directory of the inference model.");
28+
DEFINE_string(data_file, "", "File of input index data.");
2929
DEFINE_int32(repeat, 100, "Running the inference program repeat times");
3030
DEFINE_bool(use_mkldnn, false, "Use MKLDNN to run inference");
3131
DEFINE_bool(prepare_vars, true, "Prepare variables before executor");
@@ -65,6 +65,7 @@ size_t LoadData(std::vector<paddle::framework::LoDTensor>* out,
6565
ids.push_back(stoi(field));
6666
}
6767
if (ids.size() >= 1024) {
68+
// Synced with NLP guys, they will ignore input larger then 1024
6869
continue;
6970
}
7071

@@ -142,18 +143,18 @@ void ThreadRunInfer(
142143
}
143144

144145
TEST(inference, nlp) {
145-
if (FLAGS_modelpath.empty()) {
146-
LOG(FATAL) << "Usage: ./example --modelpath=path/to/your/model";
146+
if (FLAGS_model_path.empty()) {
147+
LOG(FATAL) << "Usage: ./example --model_path=path/to/your/model";
147148
}
148-
if (FLAGS_datafile.empty()) {
149-
LOG(WARNING) << " Not data file provided, will use dummy data!"
149+
if (FLAGS_data_file.empty()) {
150+
LOG(WARNING) << "No data file provided, will use dummy data!"
150151
<< "Note: if you use nlp model, please provide data file.";
151152
}
152-
LOG(INFO) << "Model Path: " << FLAGS_modelpath;
153-
LOG(INFO) << "Data File: " << FLAGS_datafile;
153+
LOG(INFO) << "Model Path: " << FLAGS_model_path;
154+
LOG(INFO) << "Data File: " << FLAGS_data_file;
154155

155156
std::vector<paddle::framework::LoDTensor> datasets;
156-
size_t num_total_words = LoadData(&datasets, FLAGS_datafile);
157+
size_t num_total_words = LoadData(&datasets, FLAGS_data_file);
157158
LOG(INFO) << "Number of samples (seq_len<1024): " << datasets.size();
158159
LOG(INFO) << "Total number of words: " << num_total_words;
159160

@@ -168,7 +169,7 @@ TEST(inference, nlp) {
168169
// 2. Initialize the inference_program and load parameters
169170
std::unique_ptr<paddle::framework::ProgramDesc> inference_program;
170171
inference_program =
171-
InitProgram(&executor, scope.get(), FLAGS_modelpath, model_combined);
172+
InitProgram(&executor, scope.get(), FLAGS_model_path, model_combined);
172173
if (FLAGS_use_mkldnn) {
173174
EnableMKLDNN(inference_program);
174175
}

0 commit comments

Comments
 (0)