@@ -50,7 +50,7 @@ template <typename Target>
50
50
bool PaddleInferenceAnakinPredictor<Target>::Init(
51
51
const contrib::AnakinConfig &config) {
52
52
if (!(graph_.load (config.model_file ))) {
53
- VLOG (30 ) << " fail to load graph from " << config.model_file ;
53
+ VLOG (3 ) << " fail to load graph from " << config.model_file ;
54
54
return false ;
55
55
}
56
56
auto inputs = graph_.get_ins ();
@@ -76,15 +76,15 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
76
76
std::vector<PaddleTensor> *output_data, int batch_size) {
77
77
for (const auto &input : inputs) {
78
78
if (input.dtype != PaddleDType::FLOAT32) {
79
- VLOG (30 ) << " Only support float type inputs. " << input.name
80
- << " 's type is not float" ;
79
+ VLOG (3 ) << " Only support float type inputs. " << input.name
80
+ << " 's type is not float" ;
81
81
return false ;
82
82
}
83
83
auto d_tensor_in_p = executor_p_->get_in (input.name );
84
84
auto net_shape = d_tensor_in_p->shape ();
85
85
if (net_shape.size () != input.shape .size ()) {
86
- VLOG (30 ) << " input " << input.name
87
- << " 's shape size should be equal to that of net" ;
86
+ VLOG (3 ) << " input " << input.name
87
+ << " 's shape size should be equal to that of net" ;
88
88
return false ;
89
89
}
90
90
int sum = 1 ;
@@ -105,15 +105,15 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
105
105
106
106
if (input.lod .size () > 0 ) {
107
107
if (input.lod .size () > 1 ) {
108
- VLOG (30 ) << " input lod first dim should <=1, but you set "
109
- << input.lod .size ();
108
+ VLOG (3 ) << " input lod first dim should <=1, but you set "
109
+ << input.lod .size ();
110
110
return false ;
111
111
}
112
112
std::vector<int > offset (input.lod [0 ].begin (), input.lod [0 ].end ());
113
113
d_tensor_in_p->set_seq_offset (offset);
114
- VLOG (30 ) << " offset.size(): " << offset.size ();
114
+ VLOG (3 ) << " offset.size(): " << offset.size ();
115
115
for (int i = 0 ; i < offset.size (); i++) {
116
- VLOG (30 ) << offset[i];
116
+ VLOG (3 ) << offset[i];
117
117
}
118
118
}
119
119
@@ -124,7 +124,7 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
124
124
if (cudaMemcpy (d_data_p, static_cast <float *>(input.data .data ()),
125
125
d_tensor_in_p->valid_size () * sizeof (float ),
126
126
cudaMemcpyHostToDevice) != 0 ) {
127
- VLOG (30 ) << " copy data from CPU to GPU error" ;
127
+ VLOG (3 ) << " copy data from CPU to GPU error" ;
128
128
return false ;
129
129
}
130
130
}
@@ -141,7 +141,7 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
141
141
#endif
142
142
143
143
if (output_data->empty ()) {
144
- VLOG (30 ) << " At least one output should be set with tensors' names." ;
144
+ VLOG (3 ) << " At least one output should be set with tensors' names." ;
145
145
return false ;
146
146
}
147
147
for (auto &output : *output_data) {
@@ -157,7 +157,7 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
157
157
if (cudaMemcpy (output.data .data (), tensor->mutable_data (),
158
158
tensor->valid_size () * sizeof (float ),
159
159
cudaMemcpyDeviceToHost) != 0 ) {
160
- VLOG (30 ) << " copy data from GPU to CPU error" ;
160
+ VLOG (3 ) << " copy data from GPU to CPU error" ;
161
161
return false ;
162
162
}
163
163
}
@@ -181,14 +181,14 @@ anakin::Net<Target, anakin::saber::AK_FLOAT, anakin::Precision::FP32>
181
181
template <typename Target>
182
182
std::unique_ptr<PaddlePredictor>
183
183
PaddleInferenceAnakinPredictor<Target>::Clone() {
184
- VLOG (30 ) << " Anakin Predictor::clone" ;
184
+ VLOG (3 ) << " Anakin Predictor::clone" ;
185
185
std::unique_ptr<PaddlePredictor> cls (
186
186
new PaddleInferenceAnakinPredictor<Target>());
187
187
// construct executer from other graph
188
188
auto anakin_predictor_p =
189
189
dynamic_cast <PaddleInferenceAnakinPredictor<Target> *>(cls.get ());
190
190
if (!anakin_predictor_p) {
191
- VLOG (30 ) << " fail to call Init" ;
191
+ VLOG (3 ) << " fail to call Init" ;
192
192
return nullptr ;
193
193
}
194
194
anakin_predictor_p->get_executer ().init (graph_);
@@ -206,10 +206,10 @@ template <>
206
206
std::unique_ptr<PaddlePredictor>
207
207
CreatePaddlePredictor<contrib::AnakinConfig, PaddleEngineKind::kAnakin >(
208
208
const contrib::AnakinConfig &config) {
209
- VLOG (30 ) << " Anakin Predictor create." ;
209
+ VLOG (3 ) << " Anakin Predictor create." ;
210
210
if (config.target_type == contrib::AnakinConfig::NVGPU) {
211
211
#ifdef PADDLE_WITH_CUDA
212
- VLOG (30 ) << " Anakin Predictor create on [ NVIDIA GPU ]." ;
212
+ VLOG (3 ) << " Anakin Predictor create on [ NVIDIA GPU ]." ;
213
213
std::unique_ptr<PaddlePredictor> x (
214
214
new PaddleInferenceAnakinPredictor<anakin::NV>(config));
215
215
return x;
@@ -218,12 +218,12 @@ CreatePaddlePredictor<contrib::AnakinConfig, PaddleEngineKind::kAnakin>(
218
218
return nullptr ;
219
219
#endif
220
220
} else if (config.target_type == contrib::AnakinConfig::X86) {
221
- VLOG (30 ) << " Anakin Predictor create on [ Intel X86 ]." ;
221
+ VLOG (3 ) << " Anakin Predictor create on [ Intel X86 ]." ;
222
222
std::unique_ptr<PaddlePredictor> x (
223
223
new PaddleInferenceAnakinPredictor<anakin::X86>(config));
224
224
return x;
225
225
} else {
226
- VLOG (30 ) << " Anakin Predictor create on unknown platform." ;
226
+ VLOG (3 ) << " Anakin Predictor create on unknown platform." ;
227
227
return nullptr ;
228
228
}
229
229
}
0 commit comments