Skip to content

Commit eaeb76c

Browse files
committed
add some comments
1 parent 9c687a9 commit eaeb76c

File tree

1 file changed

+14
-11
lines changed

1 file changed

+14
-11
lines changed

paddle/fluid/inference/tests/book/test_inference_nlp.cc

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,8 @@ inline double GetCurrentMs() {
3737
return 1e+3 * time.tv_sec + 1e-3 * time.tv_usec;
3838
}
3939

40-
// return size of total words
40+
// Load the input word index data from file and save into LodTensor.
41+
// Return the size of words.
4142
size_t LoadData(std::vector<paddle::framework::LoDTensor>* out,
4243
const std::string& filename) {
4344
size_t sz = 0;
@@ -67,6 +68,8 @@ size_t LoadData(std::vector<paddle::framework::LoDTensor>* out,
6768
return sz;
6869
}
6970

71+
// Split input data samples into small pieces jobs as balanced as possible,
72+
// according to the number of threads.
7073
void SplitData(
7174
const std::vector<paddle::framework::LoDTensor>& datasets,
7275
std::vector<std::vector<const paddle::framework::LoDTensor*>>* jobs,
@@ -116,7 +119,8 @@ void ThreadRunInfer(
116119
for (size_t i = 0; i < inputs.size(); ++i) {
117120
feed_targets[feed_target_names[0]] = inputs[i];
118121
executor->Run(*copy_program, &sub_scope, &feed_targets, &fetch_targets,
119-
true, true, feed_holder_name, fetch_holder_name);
122+
true /*create_local_scope*/, true /*create_vars*/,
123+
feed_holder_name, fetch_holder_name);
120124
}
121125
auto stop_ms = GetCurrentMs();
122126
scope->DeleteScope(&sub_scope);
@@ -143,12 +147,13 @@ TEST(inference, nlp) {
143147
// 1. Define place, executor, scope
144148
auto place = paddle::platform::CPUPlace();
145149
auto executor = paddle::framework::Executor(place);
146-
auto* scope = new paddle::framework::Scope();
150+
std::unique_ptr<paddle::framework::Scope> scope(
151+
new paddle::framework::Scope());
147152

148153
// 2. Initialize the inference_program and load parameters
149154
std::unique_ptr<paddle::framework::ProgramDesc> inference_program;
150155
inference_program =
151-
InitProgram(&executor, scope, FLAGS_modelpath, model_combined);
156+
InitProgram(&executor, scope.get(), FLAGS_modelpath, model_combined);
152157
if (FLAGS_use_mkldnn) {
153158
EnableMKLDNN(inference_program);
154159
}
@@ -166,9 +171,9 @@ TEST(inference, nlp) {
166171
SplitData(datasets, &jobs, FLAGS_num_threads);
167172
std::vector<std::unique_ptr<std::thread>> threads;
168173
for (int i = 0; i < FLAGS_num_threads; ++i) {
169-
threads.emplace_back(new std::thread(ThreadRunInfer, i, &executor, scope,
170-
std::ref(inference_program),
171-
std::ref(jobs)));
174+
threads.emplace_back(
175+
new std::thread(ThreadRunInfer, i, &executor, scope.get(),
176+
std::ref(inference_program), std::ref(jobs)));
172177
}
173178
start_ms = GetCurrentMs();
174179
for (int i = 0; i < FLAGS_num_threads; ++i) {
@@ -177,7 +182,7 @@ TEST(inference, nlp) {
177182
stop_ms = GetCurrentMs();
178183
} else {
179184
if (FLAGS_prepare_vars) {
180-
executor.CreateVariables(*inference_program, scope, 0);
185+
executor.CreateVariables(*inference_program, scope.get(), 0);
181186
}
182187
// always prepare context
183188
std::unique_ptr<paddle::framework::ExecutorPrepareContext> ctx;
@@ -201,17 +206,15 @@ TEST(inference, nlp) {
201206
start_ms = GetCurrentMs();
202207
for (size_t i = 0; i < datasets.size(); ++i) {
203208
feed_targets[feed_target_names[0]] = &(datasets[i]);
204-
executor.RunPreparedContext(ctx.get(), scope, &feed_targets,
209+
executor.RunPreparedContext(ctx.get(), scope.get(), &feed_targets,
205210
&fetch_targets, !FLAGS_prepare_vars);
206211
}
207212
stop_ms = GetCurrentMs();
208213
LOG(INFO) << "Tid: 0, process " << datasets.size()
209214
<< " samples, avg time per sample: "
210215
<< (stop_ms - start_ms) / datasets.size() << " ms";
211216
}
212-
213217
LOG(INFO) << "Total inference time with " << FLAGS_num_threads
214218
<< " threads : " << (stop_ms - start_ms) / 1000.0
215219
<< " sec, QPS: " << datasets.size() / ((stop_ms - start_ms) / 1000);
216-
delete scope;
217220
}

0 commit comments

Comments
 (0)