Skip to content

Commit 1d9b2a4

Browse files
authored
Merge pull request #14508 from luotao1/warm_up_multi_thread
add warm up in TestMultiThreadPrediction
2 parents a685f30 + eb9b9be commit 1d9b2a4

File tree

1 file changed

+27
-10
lines changed

1 file changed

+27
-10
lines changed

paddle/fluid/inference/tests/api/tester_helper.h

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -222,19 +222,36 @@ void TestMultiThreadPrediction(
222222
// The inputs of each thread are all the same.
223223
std::vector<PaddleTensor> outputs_tid;
224224
auto &predictor = predictors[tid];
225-
LOG(INFO) << "running thread " << tid;
226-
Timer timer;
227-
timer.tic();
228-
for (int i = 0; i < num_times; i++) {
229-
for (const auto &input : inputs) {
230-
ASSERT_TRUE(predictor->Run(input, &outputs_tid));
225+
226+
// warmup run
227+
LOG(INFO) << "Running thread " << tid << ", warm up run...";
228+
{
229+
Timer warmup_timer;
230+
warmup_timer.tic();
231+
predictor->Run(inputs[0], outputs, batch_size);
232+
PrintTime(batch_size, 1, num_threads, tid, warmup_timer.toc(), 1);
233+
#if !defined(_WIN32)
234+
if (FLAGS_profile) {
235+
paddle::platform::ResetProfiler();
231236
}
237+
#endif
232238
}
233239

234-
auto time = timer.toc();
235-
total_time += time;
236-
PrintTime(batch_size, num_times, num_threads, tid, time / num_times,
237-
inputs.size());
240+
LOG(INFO) << "Thread " << tid << " run " << num_times << " times...";
241+
{
242+
Timer timer;
243+
timer.tic();
244+
for (int i = 0; i < num_times; i++) {
245+
for (const auto &input : inputs) {
246+
ASSERT_TRUE(predictor->Run(input, &outputs_tid));
247+
}
248+
}
249+
250+
auto time = timer.toc();
251+
total_time += time;
252+
PrintTime(batch_size, num_times, num_threads, tid, time / num_times,
253+
inputs.size());
254+
}
238255
});
239256
}
240257
for (int i = 0; i < num_threads; ++i) {

0 commit comments

Comments
 (0)