@@ -54,7 +54,7 @@ std::string num2str(T a) {
54
54
}
55
55
} // namespace
56
56
57
- bool PaddlePredictorImpl ::Init () {
57
+ bool NativePaddlePredictor ::Init () {
58
58
VLOG (3 ) << " Predictor::init()" ;
59
59
60
60
// TODO(panyx0718): Should CPU vs GPU device be decided by id?
@@ -96,8 +96,8 @@ bool PaddlePredictorImpl::Init() {
96
96
return true ;
97
97
}
98
98
99
- bool PaddlePredictorImpl ::Run (const std::vector<PaddleTensor> &inputs,
100
- std::vector<PaddleTensor> *output_data) {
99
+ bool NativePaddlePredictor ::Run (const std::vector<PaddleTensor> &inputs,
100
+ std::vector<PaddleTensor> *output_data) {
101
101
VLOG (3 ) << " Predictor::predict" ;
102
102
Timer timer;
103
103
timer.tic ();
@@ -133,59 +133,20 @@ bool PaddlePredictorImpl::Run(const std::vector<PaddleTensor> &inputs,
133
133
return true ;
134
134
}
135
135
136
- std::unique_ptr<PaddlePredictor> PaddlePredictorImpl ::Clone () {
136
+ std::unique_ptr<PaddlePredictor> NativePaddlePredictor ::Clone () {
137
137
VLOG (3 ) << " Predictor::clone" ;
138
- std::unique_ptr<PaddlePredictor> cls (new PaddlePredictorImpl (config_));
139
- if (!cls->InitShared ()) {
140
- LOG (ERROR) << " fail to call InitShared" ;
138
+ std::unique_ptr<PaddlePredictor> cls (new NativePaddlePredictor (config_));
139
+
140
+ if (!dynamic_cast <NativePaddlePredictor *>(cls.get ())->Init ()) {
141
+ LOG (ERROR) << " fail to call Init" ;
141
142
return nullptr ;
142
143
}
143
144
// fix manylinux compile error.
144
145
return std::move (cls);
145
146
}
146
147
147
- // TODO(panyx0718): Consider merge with Init()?
148
- bool PaddlePredictorImpl::InitShared () {
149
- VLOG (3 ) << " Predictor::init_shared" ;
150
- // 1. Define place, executor, scope
151
- if (this ->config_ .device >= 0 ) {
152
- place_ = platform::CUDAPlace ();
153
- } else {
154
- place_ = platform::CPUPlace ();
155
- }
156
- this ->executor_ .reset (new framework::Executor (this ->place_ ));
157
- this ->scope_ .reset (new framework::Scope ());
158
- // Initialize the inference program
159
- if (!this ->config_ .model_dir .empty ()) {
160
- // Parameters are saved in separate files sited in
161
- // the specified `dirname`.
162
- this ->inference_program_ = inference::Load (
163
- this ->executor_ .get (), this ->scope_ .get (), this ->config_ .model_dir );
164
- } else if (!this ->config_ .prog_file .empty () &&
165
- !this ->config_ .param_file .empty ()) {
166
- // All parameters are saved in a single file.
167
- // The file names should be consistent with that used
168
- // in Python API `fluid.io.save_inference_model`.
169
- this ->inference_program_ = inference::Load (this ->executor_ .get (),
170
- this ->scope_ .get (),
171
- this ->config_ .prog_file ,
172
- this ->config_ .param_file );
173
- }
174
- this ->ctx_ = this ->executor_ ->Prepare (*this ->inference_program_ , 0 );
175
- // 3. create variables
176
- // TODO(panyx0718): why test share_variables.
177
- if (config_.share_variables ) {
178
- this ->executor_ ->CreateVariables (
179
- *this ->inference_program_ , this ->scope_ .get (), 0 );
180
- }
181
- // 4. Get the feed_target_names and fetch_target_names
182
- this ->feed_target_names_ = this ->inference_program_ ->GetFeedTargetNames ();
183
- this ->fetch_target_names_ = this ->inference_program_ ->GetFetchTargetNames ();
184
- return true ;
185
- }
186
-
187
- bool PaddlePredictorImpl::SetFeed (const std::vector<PaddleTensor> &inputs,
188
- std::vector<framework::LoDTensor> *feeds) {
148
+ bool NativePaddlePredictor::SetFeed (const std::vector<PaddleTensor> &inputs,
149
+ std::vector<framework::LoDTensor> *feeds) {
189
150
VLOG (3 ) << " Predictor::set_feed" ;
190
151
if (inputs.size () != feed_target_names_.size ()) {
191
152
LOG (ERROR) << " wrong feed input size." ;
@@ -213,7 +174,7 @@ bool PaddlePredictorImpl::SetFeed(const std::vector<PaddleTensor> &inputs,
213
174
return true ;
214
175
}
215
176
216
- bool PaddlePredictorImpl ::GetFetch (
177
+ bool NativePaddlePredictor ::GetFetch (
217
178
const std::vector<framework::LoDTensor> &fetchs,
218
179
std::vector<PaddleTensor> *outputs) {
219
180
VLOG (3 ) << " Predictor::get_fetch" ;
@@ -280,23 +241,26 @@ bool PaddlePredictorImpl::GetFetch(
280
241
}
281
242
282
243
template <>
283
- std::unique_ptr<PaddlePredictor> CreatePaddlePredictor (
284
- const ConfigImpl &config) {
285
- VLOG (3 ) << " create PaddlePredictorImpl" ;
286
- // 1. GPU memeroy
287
- std::vector<std::string> flags;
288
- if (config.fraction_of_gpu_memory >= 0 .0f ||
289
- config.fraction_of_gpu_memory <= 0 .95f ) {
290
- flags.push_back (" dummpy" );
291
- std::string flag = " --fraction_of_gpu_memory_to_use=" +
292
- num2str<float >(config.fraction_of_gpu_memory );
293
- flags.push_back (flag);
294
- VLOG (3 ) << " set flag: " << flag;
295
- framework::InitGflags (flags);
244
+ std::unique_ptr<PaddlePredictor>
245
+ CreatePaddlePredictor<NativeConfig, PaddlePredictor::EngineKind::kNative >(
246
+ const NativeConfig &config) {
247
+ VLOG (3 ) << " create NativePaddlePredictor" ;
248
+ if (config.use_gpu ) {
249
+ // 1. GPU memeroy
250
+ std::vector<std::string> flags;
251
+ if (config.fraction_of_gpu_memory >= 0 .0f ||
252
+ config.fraction_of_gpu_memory <= 0 .95f ) {
253
+ flags.push_back (" dummpy" );
254
+ std::string flag = " --fraction_of_gpu_memory_to_use=" +
255
+ num2str<float >(config.fraction_of_gpu_memory );
256
+ flags.push_back (flag);
257
+ VLOG (3 ) << " set flag: " << flag;
258
+ framework::InitGflags (flags);
259
+ }
296
260
}
297
261
298
- std::unique_ptr<PaddlePredictor> predictor (new PaddlePredictorImpl (config));
299
- if (!dynamic_cast <PaddlePredictorImpl *>(predictor.get ())->Init ()) {
262
+ std::unique_ptr<PaddlePredictor> predictor (new NativePaddlePredictor (config));
263
+ if (!dynamic_cast <NativePaddlePredictor *>(predictor.get ())->Init ()) {
300
264
return nullptr ;
301
265
}
302
266
return std::move (predictor);
0 commit comments