@@ -102,8 +102,8 @@ bool PaddlePredictorImpl::Run(const std::vector<PaddleTensor> &inputs,
102
102
Timer timer;
103
103
timer.tic ();
104
104
// set feed variable
105
- std::map<std::string, const paddle:: framework::LoDTensor *> feed_targets;
106
- std::vector<paddle:: framework::LoDTensor> feeds;
105
+ std::map<std::string, const framework::LoDTensor *> feed_targets;
106
+ std::vector<framework::LoDTensor> feeds;
107
107
if (!SetFeed (inputs, &feeds)) {
108
108
LOG (ERROR) << " fail to set feed" ;
109
109
return false ;
@@ -112,8 +112,8 @@ bool PaddlePredictorImpl::Run(const std::vector<PaddleTensor> &inputs,
112
112
feed_targets[feed_target_names_[i]] = &feeds[i];
113
113
}
114
114
// get fetch variable
115
- std::map<std::string, paddle:: framework::LoDTensor *> fetch_targets;
116
- std::vector<paddle:: framework::LoDTensor> fetchs;
115
+ std::map<std::string, framework::LoDTensor *> fetch_targets;
116
+ std::vector<framework::LoDTensor> fetchs;
117
117
fetchs.resize (fetch_target_names_.size ());
118
118
for (size_t i = 0 ; i < fetch_target_names_.size (); ++i) {
119
119
fetch_targets[fetch_target_names_[i]] = &fetchs[i];
@@ -149,28 +149,27 @@ bool PaddlePredictorImpl::InitShared() {
149
149
VLOG (3 ) << " Predictor::init_shared" ;
150
150
// 1. Define place, executor, scope
151
151
if (this ->config_ .device >= 0 ) {
152
- place_ = paddle:: platform::CUDAPlace ();
152
+ place_ = platform::CUDAPlace ();
153
153
} else {
154
- place_ = paddle:: platform::CPUPlace ();
154
+ place_ = platform::CPUPlace ();
155
155
}
156
- this ->executor_ .reset (new paddle:: framework::Executor (this ->place_ ));
157
- this ->scope_ .reset (new paddle:: framework::Scope ());
156
+ this ->executor_ .reset (new framework::Executor (this ->place_ ));
157
+ this ->scope_ .reset (new framework::Scope ());
158
158
// Initialize the inference program
159
159
if (!this ->config_ .model_dir .empty ()) {
160
160
// Parameters are saved in separate files sited in
161
161
// the specified `dirname`.
162
- this ->inference_program_ = paddle:: inference::Load (
162
+ this ->inference_program_ = inference::Load (
163
163
this ->executor_ .get (), this ->scope_ .get (), this ->config_ .model_dir );
164
164
} else if (!this ->config_ .prog_file .empty () &&
165
165
!this ->config_ .param_file .empty ()) {
166
166
// All parameters are saved in a single file.
167
167
// The file names should be consistent with that used
168
168
// in Python API `fluid.io.save_inference_model`.
169
- this ->inference_program_ =
170
- paddle::inference::Load (this ->executor_ .get (),
171
- this ->scope_ .get (),
172
- this ->config_ .prog_file ,
173
- this ->config_ .param_file );
169
+ this ->inference_program_ = inference::Load (this ->executor_ .get (),
170
+ this ->scope_ .get (),
171
+ this ->config_ .prog_file ,
172
+ this ->config_ .param_file );
174
173
}
175
174
this ->ctx_ = this ->executor_ ->Prepare (*this ->inference_program_ , 0 );
176
175
// 3. create variables
@@ -185,24 +184,21 @@ bool PaddlePredictorImpl::InitShared() {
185
184
return true ;
186
185
}
187
186
188
- bool PaddlePredictorImpl::SetFeed (
189
- const std::vector<PaddleTensor> &inputs,
190
- std::vector<paddle::framework::LoDTensor> *feeds) {
187
+ bool PaddlePredictorImpl::SetFeed (const std::vector<PaddleTensor> &inputs,
188
+ std::vector<framework::LoDTensor> *feeds) {
191
189
VLOG (3 ) << " Predictor::set_feed" ;
192
190
if (inputs.size () != feed_target_names_.size ()) {
193
191
LOG (ERROR) << " wrong feed input size." ;
194
192
return false ;
195
193
}
196
194
for (size_t i = 0 ; i < feed_target_names_.size (); ++i) {
197
- paddle::framework::LoDTensor input;
198
- paddle::framework::DDim ddim =
199
- paddle::framework::make_ddim (inputs[i].shape );
195
+ framework::LoDTensor input;
196
+ framework::DDim ddim = framework::make_ddim (inputs[i].shape );
200
197
void *input_ptr;
201
198
if (inputs[i].dtype == PaddleDType::INT64) {
202
- input_ptr =
203
- input.mutable_data <int64_t >(ddim, paddle::platform::CPUPlace ());
199
+ input_ptr = input.mutable_data <int64_t >(ddim, platform::CPUPlace ());
204
200
} else if (inputs[i].dtype == PaddleDType::FLOAT32) {
205
- input_ptr = input.mutable_data <float >(ddim, paddle:: platform::CPUPlace ());
201
+ input_ptr = input.mutable_data <float >(ddim, platform::CPUPlace ());
206
202
} else {
207
203
LOG (ERROR) << " unsupported feed type " << inputs[i].dtype ;
208
204
return false ;
@@ -213,13 +209,12 @@ bool PaddlePredictorImpl::SetFeed(
213
209
inputs[i].data .data ,
214
210
inputs[i].data .length );
215
211
feeds->push_back (input);
216
- LOG (ERROR) << " Actual feed type " << feeds->back ().type ().name ();
217
212
}
218
213
return true ;
219
214
}
220
215
221
216
bool PaddlePredictorImpl::GetFetch (
222
- const std::vector<paddle:: framework::LoDTensor> &fetchs,
217
+ const std::vector<framework::LoDTensor> &fetchs,
223
218
std::vector<PaddleTensor> *outputs) {
224
219
VLOG (3 ) << " Predictor::get_fetch" ;
225
220
outputs->resize (fetchs.size ());
@@ -284,8 +279,9 @@ bool PaddlePredictorImpl::GetFetch(
284
279
return true ;
285
280
}
286
281
287
- std::unique_ptr<PaddlePredictorImpl> CreatePaddlePredictorImpl (
288
- const VisConfig &config) {
282
+ template <>
283
+ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor (
284
+ const ConfigImpl &config) {
289
285
VLOG (3 ) << " create PaddlePredictorImpl" ;
290
286
// 1. GPU memeroy
291
287
std::vector<std::string> flags;
@@ -299,12 +295,11 @@ std::unique_ptr<PaddlePredictorImpl> CreatePaddlePredictorImpl(
299
295
framework::InitGflags (flags);
300
296
}
301
297
302
- std::unique_ptr<PaddlePredictorImpl> predictor (
303
- new PaddlePredictorImpl (config));
304
- if (!predictor->Init ()) {
298
+ std::unique_ptr<PaddlePredictor> predictor (new PaddlePredictorImpl (config));
299
+ if (!dynamic_cast <PaddlePredictorImpl *>(predictor.get ())->Init ()) {
305
300
return nullptr ;
306
301
}
307
- return predictor;
302
+ return std::move ( predictor) ;
308
303
}
309
304
310
305
} // namespace paddle
0 commit comments