File tree Expand file tree Collapse file tree 4 files changed +17
-4
lines changed Expand file tree Collapse file tree 4 files changed +17
-4
lines changed Original file line number Diff line number Diff line change @@ -159,6 +159,14 @@ bool AnalysisPredictor::PrepareExecutor() {
159
159
return true ;
160
160
}
161
161
162
+ void AnalysisPredictor::SetMKLDNNThreadId (int tid) {
163
+ #ifdef PADDLE_WITH_MKLDNN
164
+ platform::set_cur_thread_id (tid);
165
+ #else
166
+ LOG (ERROR) << " Please compile with MKLDNN first to use MKLDNN" ;
167
+ #endif
168
+ }
169
+
162
170
bool AnalysisPredictor::Run (const std::vector<PaddleTensor> &inputs,
163
171
std::vector<PaddleTensor> *output_data,
164
172
int batch_size) {
Original file line number Diff line number Diff line change @@ -69,6 +69,8 @@ class AnalysisPredictor : public PaddlePredictor {
69
69
framework::Scope *scope () { return scope_.get (); }
70
70
framework::ProgramDesc &program () { return *inference_program_; }
71
71
72
+ void SetMKLDNNThreadId (int tid);
73
+
72
74
protected:
73
75
bool PrepareProgram (const std::shared_ptr<framework::ProgramDesc> &program);
74
76
bool PrepareScope (const std::shared_ptr<framework::Scope> &parent_scope);
Original file line number Diff line number Diff line change @@ -51,9 +51,9 @@ struct AnalysisConfig : public NativeConfig {
51
51
int max_batch_size = 1 );
52
52
bool use_tensorrt () const { return use_tensorrt_; }
53
53
54
+ void EnableMKLDNN ();
54
55
// NOTE this is just for internal development, please not use it.
55
56
// NOT stable yet.
56
- void EnableMKLDNN ();
57
57
bool use_mkldnn () const { return use_mkldnn_; }
58
58
59
59
friend class ::paddle::AnalysisPredictor;
Original file line number Diff line number Diff line change @@ -216,13 +216,16 @@ void TestMultiThreadPrediction(
216
216
size_t total_time{0 };
217
217
for (int tid = 0 ; tid < num_threads; ++tid) {
218
218
threads.emplace_back ([&, tid]() {
219
- #ifdef PADDLE_WITH_MKLDNN
220
- platform::set_cur_thread_id (static_cast <int >(tid) + 1 );
221
- #endif
222
219
// Each thread should have local inputs and outputs.
223
220
// The inputs of each thread are all the same.
224
221
std::vector<PaddleTensor> outputs_tid;
225
222
auto &predictor = predictors[tid];
223
+ #ifdef PADDLE_WITH_MKLDNN
224
+ if (use_analysis) {
225
+ static_cast <AnalysisPredictor *>(predictor.get ())
226
+ ->SetMKLDNNThreadId (static_cast <int >(tid) + 1 );
227
+ }
228
+ #endif
226
229
227
230
// warmup run
228
231
LOG (INFO) << " Running thread " << tid << " , warm up run..." ;
You can’t perform that action at this time.
0 commit comments