Skip to content

Commit 3da43dc

Browse files
committed
Because anakin do NOT use glog, so we revert anakin related change
test=develop
1 parent 4971096 commit 3da43dc

File tree

2 files changed

+20
-20
lines changed

2 files changed

+20
-20
lines changed

paddle/fluid/inference/api/api_anakin_engine.cc

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ template <typename Target>
5050
bool PaddleInferenceAnakinPredictor<Target>::Init(
5151
const contrib::AnakinConfig &config) {
5252
if (!(graph_.load(config.model_file))) {
53-
VLOG(30) << "fail to load graph from " << config.model_file;
53+
VLOG(3) << "fail to load graph from " << config.model_file;
5454
return false;
5555
}
5656
auto inputs = graph_.get_ins();
@@ -76,15 +76,15 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
7676
std::vector<PaddleTensor> *output_data, int batch_size) {
7777
for (const auto &input : inputs) {
7878
if (input.dtype != PaddleDType::FLOAT32) {
79-
VLOG(30) << "Only support float type inputs. " << input.name
80-
<< "'s type is not float";
79+
VLOG(3) << "Only support float type inputs. " << input.name
80+
<< "'s type is not float";
8181
return false;
8282
}
8383
auto d_tensor_in_p = executor_p_->get_in(input.name);
8484
auto net_shape = d_tensor_in_p->shape();
8585
if (net_shape.size() != input.shape.size()) {
86-
VLOG(30) << " input " << input.name
87-
<< "'s shape size should be equal to that of net";
86+
VLOG(3) << " input " << input.name
87+
<< "'s shape size should be equal to that of net";
8888
return false;
8989
}
9090
int sum = 1;
@@ -105,15 +105,15 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
105105

106106
if (input.lod.size() > 0) {
107107
if (input.lod.size() > 1) {
108-
VLOG(30) << " input lod first dim should <=1, but you set "
109-
<< input.lod.size();
108+
VLOG(3) << " input lod first dim should <=1, but you set "
109+
<< input.lod.size();
110110
return false;
111111
}
112112
std::vector<int> offset(input.lod[0].begin(), input.lod[0].end());
113113
d_tensor_in_p->set_seq_offset(offset);
114-
VLOG(30) << "offset.size(): " << offset.size();
114+
VLOG(3) << "offset.size(): " << offset.size();
115115
for (int i = 0; i < offset.size(); i++) {
116-
VLOG(30) << offset[i];
116+
VLOG(3) << offset[i];
117117
}
118118
}
119119

@@ -124,7 +124,7 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
124124
if (cudaMemcpy(d_data_p, static_cast<float *>(input.data.data()),
125125
d_tensor_in_p->valid_size() * sizeof(float),
126126
cudaMemcpyHostToDevice) != 0) {
127-
VLOG(30) << "copy data from CPU to GPU error";
127+
VLOG(3) << "copy data from CPU to GPU error";
128128
return false;
129129
}
130130
}
@@ -141,7 +141,7 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
141141
#endif
142142

143143
if (output_data->empty()) {
144-
VLOG(30) << "At least one output should be set with tensors' names.";
144+
VLOG(3) << "At least one output should be set with tensors' names.";
145145
return false;
146146
}
147147
for (auto &output : *output_data) {
@@ -157,7 +157,7 @@ bool PaddleInferenceAnakinPredictor<Target>::Run(
157157
if (cudaMemcpy(output.data.data(), tensor->mutable_data(),
158158
tensor->valid_size() * sizeof(float),
159159
cudaMemcpyDeviceToHost) != 0) {
160-
VLOG(30) << "copy data from GPU to CPU error";
160+
VLOG(3) << "copy data from GPU to CPU error";
161161
return false;
162162
}
163163
}
@@ -181,14 +181,14 @@ anakin::Net<Target, anakin::saber::AK_FLOAT, anakin::Precision::FP32>
181181
template <typename Target>
182182
std::unique_ptr<PaddlePredictor>
183183
PaddleInferenceAnakinPredictor<Target>::Clone() {
184-
VLOG(30) << "Anakin Predictor::clone";
184+
VLOG(3) << "Anakin Predictor::clone";
185185
std::unique_ptr<PaddlePredictor> cls(
186186
new PaddleInferenceAnakinPredictor<Target>());
187187
// construct executer from other graph
188188
auto anakin_predictor_p =
189189
dynamic_cast<PaddleInferenceAnakinPredictor<Target> *>(cls.get());
190190
if (!anakin_predictor_p) {
191-
VLOG(30) << "fail to call Init";
191+
VLOG(3) << "fail to call Init";
192192
return nullptr;
193193
}
194194
anakin_predictor_p->get_executer().init(graph_);
@@ -206,10 +206,10 @@ template <>
206206
std::unique_ptr<PaddlePredictor>
207207
CreatePaddlePredictor<contrib::AnakinConfig, PaddleEngineKind::kAnakin>(
208208
const contrib::AnakinConfig &config) {
209-
VLOG(30) << "Anakin Predictor create.";
209+
VLOG(3) << "Anakin Predictor create.";
210210
if (config.target_type == contrib::AnakinConfig::NVGPU) {
211211
#ifdef PADDLE_WITH_CUDA
212-
VLOG(30) << "Anakin Predictor create on [ NVIDIA GPU ].";
212+
VLOG(3) << "Anakin Predictor create on [ NVIDIA GPU ].";
213213
std::unique_ptr<PaddlePredictor> x(
214214
new PaddleInferenceAnakinPredictor<anakin::NV>(config));
215215
return x;
@@ -218,12 +218,12 @@ CreatePaddlePredictor<contrib::AnakinConfig, PaddleEngineKind::kAnakin>(
218218
return nullptr;
219219
#endif
220220
} else if (config.target_type == contrib::AnakinConfig::X86) {
221-
VLOG(30) << "Anakin Predictor create on [ Intel X86 ].";
221+
VLOG(3) << "Anakin Predictor create on [ Intel X86 ].";
222222
std::unique_ptr<PaddlePredictor> x(
223223
new PaddleInferenceAnakinPredictor<anakin::X86>(config));
224224
return x;
225225
} else {
226-
VLOG(30) << "Anakin Predictor create on unknown platform.";
226+
VLOG(3) << "Anakin Predictor create on unknown platform.";
227227
return nullptr;
228228
}
229229
}

paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -217,9 +217,9 @@ void single_test() {
217217
LOG(INFO) << "sequence_length = " << seq_offset[seq_offset.size() - 1];
218218

219219
float* data_o = static_cast<float*>(outputs[0].data.data());
220-
VLOG(30) << "outputs[0].data.length() = " << outputs[0].data.length();
220+
VLOG(3) << "outputs[0].data.length() = " << outputs[0].data.length();
221221
for (size_t j = 0; j < outputs[0].data.length(); ++j) {
222-
VLOG(30) << "output[" << j << "]: " << data_o[j];
222+
VLOG(3) << "output[" << j << "]: " << data_o[j];
223223
}
224224
}
225225
}

0 commit comments

Comments
 (0)