Skip to content

Commit efa5bac

Browse files
committed
fix demo_ci bug in vis_demo.cc
test=develop
1 parent 5428cb9 commit efa5bac

File tree

4 files changed

+164
-96
lines changed

4 files changed

+164
-96
lines changed

paddle/fluid/inference/api/demo_ci/run.sh

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -100,19 +100,17 @@ for WITH_STATIC_LIB in ON OFF; do
100100
rm -rf *
101101
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
102102
-DWITH_MKL=$TURN_ON_MKL \
103-
-DDEMO_NAME=vis_demo \
103+
-DDEMO_NAME=trt_mobilenet_demo \
104104
-DWITH_GPU=$TEST_GPU_CPU \
105105
-DWITH_STATIC_LIB=$WITH_STATIC_LIB \
106106
-DUSE_TENSORRT=$USE_TENSORRT \
107107
-DTENSORRT_INCLUDE_DIR=$TENSORRT_INCLUDE_DIR \
108108
-DTENSORRT_LIB_DIR=$TENSORRT_LIB_DIR
109109
make -j
110-
./vis_demo \
110+
./trt_mobilenet_demo \
111111
--modeldir=$DATA_DIR/mobilenet/model \
112112
--data=$DATA_DIR/mobilenet/data.txt \
113-
--refer=$DATA_DIR/mobilenet/result.txt \
114-
--use_gpu=true \
115-
--use_trt=true
113+
--refer=$DATA_DIR/mobilenet/result.txt
116114
fi
117115
done
118116
set +x
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
/*
16+
* This file contains demo of mobilenet for tensorrt.
17+
*/
18+
19+
#include <gflags/gflags.h>
20+
#include <glog/logging.h> // use glog instead of CHECK to avoid importing other paddle header files.
21+
#include <fstream>
22+
#include <iostream>
23+
24+
// #include "paddle/fluid/platform/enforce.h"
25+
#include "paddle/fluid/inference/demo_ci/utils.h"
26+
27+
#ifdef PADDLE_WITH_CUDA
28+
DECLARE_double(fraction_of_gpu_memory_to_use);
29+
#endif
30+
DEFINE_string(modeldir, "", "Directory of the inference model.");
31+
DEFINE_string(refer, "", "path to reference result for comparison.");
32+
DEFINE_string(
33+
data, "",
34+
"path of data; each line is a record, format is "
35+
"'<space splitted floats as data>\t<space splitted ints as shape'");
36+
37+
namespace paddle {
38+
namespace demo {
39+
40+
/*
41+
* Use the native fluid engine to inference the demo.
42+
*/
43+
void Main() {
44+
std::unique_ptr<PaddlePredictor> predictor;
45+
paddle::contrib::MixedRTConfig config;
46+
config.param_file = FLAGS_modeldir + "/__params__";
47+
config.prog_file = FLAGS_modeldir + "/__model__";
48+
config.use_gpu = true;
49+
config.device = 0;
50+
config.max_batch_size = 1;
51+
config.fraction_of_gpu_memory = 0.1; // set by yourself
52+
predictor = CreatePaddlePredictor<paddle::contrib::MixedRTConfig>(config);
53+
54+
VLOG(3) << "begin to process data";
55+
// Just a single batch of data.
56+
std::string line;
57+
std::ifstream file(FLAGS_data);
58+
std::getline(file, line);
59+
auto record = ProcessALine(line);
60+
file.close();
61+
62+
// Inference.
63+
PaddleTensor input;
64+
input.shape = record.shape;
65+
input.data =
66+
PaddleBuf(record.data.data(), record.data.size() * sizeof(float));
67+
input.dtype = PaddleDType::FLOAT32;
68+
69+
VLOG(3) << "run executor";
70+
std::vector<PaddleTensor> output;
71+
predictor->Run({input}, &output, 1);
72+
73+
VLOG(3) << "output.size " << output.size();
74+
auto& tensor = output.front();
75+
VLOG(3) << "output: " << SummaryTensor(tensor);
76+
77+
// compare with reference result
78+
CheckOutput(FLAGS_refer, tensor);
79+
}
80+
81+
} // namespace demo
82+
} // namespace paddle
83+
84+
int main(int argc, char** argv) {
85+
google::ParseCommandLineFlags(&argc, &argv, true);
86+
paddle::demo::Main();
87+
return 0;
88+
}

paddle/fluid/inference/api/demo_ci/utils.h

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,20 @@
1414

1515
#pragma once
1616
#include <algorithm>
17+
#include <fstream>
18+
#include <iostream>
1719
#include <string>
1820
#include <vector>
1921
#include "paddle/fluid/inference/paddle_inference_api.h"
2022

2123
namespace paddle {
2224
namespace demo {
2325

26+
struct Record {
27+
std::vector<float> data;
28+
std::vector<int32_t> shape;
29+
};
30+
2431
static void split(const std::string& str, char sep,
2532
std::vector<std::string>* pieces) {
2633
pieces->clear();
@@ -39,6 +46,58 @@ static void split(const std::string& str, char sep,
3946
}
4047
}
4148

49+
Record ProcessALine(const std::string& line) {
50+
VLOG(3) << "process a line";
51+
std::vector<std::string> columns;
52+
split(line, '\t', &columns);
53+
CHECK_EQ(columns.size(), 2UL)
54+
<< "data format error, should be <data>\t<shape>";
55+
56+
Record record;
57+
std::vector<std::string> data_strs;
58+
split(columns[0], ' ', &data_strs);
59+
for (auto& d : data_strs) {
60+
record.data.push_back(std::stof(d));
61+
}
62+
63+
std::vector<std::string> shape_strs;
64+
split(columns[1], ' ', &shape_strs);
65+
for (auto& s : shape_strs) {
66+
record.shape.push_back(std::stoi(s));
67+
}
68+
VLOG(3) << "data size " << record.data.size();
69+
VLOG(3) << "data shape size " << record.shape.size();
70+
return record;
71+
}
72+
73+
void CheckOutput(const std::string& referfile, const PaddleTensor& output) {
74+
std::string line;
75+
std::ifstream file(referfile);
76+
std::getline(file, line);
77+
auto refer = ProcessALine(line);
78+
file.close();
79+
80+
size_t numel = output.data.length() / PaddleDtypeSize(output.dtype);
81+
VLOG(3) << "predictor output numel " << numel;
82+
VLOG(3) << "reference output numel " << refer.data.size();
83+
CHECK_EQ(numel, refer.data.size());
84+
switch (output.dtype) {
85+
case PaddleDType::INT64: {
86+
for (size_t i = 0; i < numel; ++i) {
87+
CHECK_EQ(static_cast<int64_t*>(output.data.data())[i], refer.data[i]);
88+
}
89+
break;
90+
}
91+
case PaddleDType::FLOAT32:
92+
for (size_t i = 0; i < numel; ++i) {
93+
CHECK_LT(
94+
fabs(static_cast<float*>(output.data.data())[i] - refer.data[i]),
95+
1e-5);
96+
}
97+
break;
98+
}
99+
}
100+
42101
/*
43102
* Get a summary of a PaddleTensor content.
44103
*/

paddle/fluid/inference/api/demo_ci/vis_demo.cc

Lines changed: 14 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,6 @@ limitations under the License. */
1818

1919
#include <gflags/gflags.h>
2020
#include <glog/logging.h> // use glog instead of CHECK to avoid importing other paddle header files.
21-
#include <fstream>
22-
#include <iostream>
23-
24-
// #include "paddle/fluid/platform/enforce.h"
2521
#include "paddle/fluid/inference/demo_ci/utils.h"
2622

2723
#ifdef PADDLE_WITH_CUDA
@@ -34,99 +30,28 @@ DEFINE_string(
3430
"path of data; each line is a record, format is "
3531
"'<space splitted floats as data>\t<space splitted ints as shape'");
3632
DEFINE_bool(use_gpu, false, "Whether use gpu.");
37-
DEFINE_bool(use_trt, false, "Whether use trt.");
3833

3934
namespace paddle {
4035
namespace demo {
4136

42-
struct Record {
43-
std::vector<float> data;
44-
std::vector<int32_t> shape;
45-
};
46-
47-
void split(const std::string& str, char sep, std::vector<std::string>* pieces);
48-
49-
Record ProcessALine(const std::string& line) {
50-
VLOG(3) << "process a line";
51-
std::vector<std::string> columns;
52-
split(line, '\t', &columns);
53-
CHECK_EQ(columns.size(), 2UL)
54-
<< "data format error, should be <data>\t<shape>";
55-
56-
Record record;
57-
std::vector<std::string> data_strs;
58-
split(columns[0], ' ', &data_strs);
59-
for (auto& d : data_strs) {
60-
record.data.push_back(std::stof(d));
61-
}
62-
63-
std::vector<std::string> shape_strs;
64-
split(columns[1], ' ', &shape_strs);
65-
for (auto& s : shape_strs) {
66-
record.shape.push_back(std::stoi(s));
67-
}
68-
VLOG(3) << "data size " << record.data.size();
69-
VLOG(3) << "data shape size " << record.shape.size();
70-
return record;
71-
}
72-
73-
void CheckOutput(const std::string& referfile, const PaddleTensor& output) {
74-
std::string line;
75-
std::ifstream file(referfile);
76-
std::getline(file, line);
77-
auto refer = ProcessALine(line);
78-
file.close();
79-
80-
size_t numel = output.data.length() / PaddleDtypeSize(output.dtype);
81-
VLOG(3) << "predictor output numel " << numel;
82-
VLOG(3) << "reference output numel " << refer.data.size();
83-
CHECK_EQ(numel, refer.data.size());
84-
switch (output.dtype) {
85-
case PaddleDType::INT64: {
86-
for (size_t i = 0; i < numel; ++i) {
87-
CHECK_EQ(static_cast<int64_t*>(output.data.data())[i], refer.data[i]);
88-
}
89-
break;
90-
}
91-
case PaddleDType::FLOAT32:
92-
for (size_t i = 0; i < numel; ++i) {
93-
CHECK_LT(
94-
fabs(static_cast<float*>(output.data.data())[i] - refer.data[i]),
95-
1e-5);
96-
}
97-
break;
98-
}
99-
}
100-
10137
/*
10238
* Use the native fluid engine to inference the demo.
10339
*/
104-
void Main(bool use_gpu, bool use_trt) {
40+
void Main(bool use_gpu) {
10541
std::unique_ptr<PaddlePredictor> predictor;
106-
if (!use_trt) {
107-
NativeConfig config;
108-
config.param_file = FLAGS_modeldir + "/__params__";
109-
config.prog_file = FLAGS_modeldir + "/__model__";
110-
config.use_gpu = use_gpu;
111-
config.device = 0;
112-
if (FLAGS_use_gpu) {
113-
config.fraction_of_gpu_memory = 0.1; // set by yourself
114-
}
115-
116-
VLOG(3) << "init predictor";
117-
predictor =
118-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
119-
} else {
120-
paddle::contrib::MixedRTConfig config;
121-
config.param_file = FLAGS_modeldir + "/__params__";
122-
config.prog_file = FLAGS_modeldir + "/__model__";
123-
config.use_gpu = true;
124-
config.device = 0;
125-
config.max_batch_size = 1;
42+
NativeConfig config;
43+
config.param_file = FLAGS_modeldir + "/__params__";
44+
config.prog_file = FLAGS_modeldir + "/__model__";
45+
config.use_gpu = use_gpu;
46+
config.device = 0;
47+
if (FLAGS_use_gpu) {
12648
config.fraction_of_gpu_memory = 0.1; // set by yourself
127-
predictor = CreatePaddlePredictor<paddle::contrib::MixedRTConfig>(config);
12849
}
12950

51+
VLOG(3) << "init predictor";
52+
predictor =
53+
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
54+
13055
VLOG(3) << "begin to process data";
13156
// Just a single batch of data.
13257
std::string line;
@@ -159,12 +84,10 @@ void Main(bool use_gpu, bool use_trt) {
15984

16085
int main(int argc, char** argv) {
16186
google::ParseCommandLineFlags(&argc, &argv, true);
162-
if (FLAGS_use_gpu && FLAGS_use_trt) {
163-
paddle::demo::Main(true /*use_gpu*/, true);
164-
} else if (FLAGS_use_gpu) {
165-
paddle::demo::Main(true /*use_gpu*/, false);
87+
if (FLAGS_use_gpu) {
88+
paddle::demo::Main(true /*use_gpu*/);
16689
} else {
167-
paddle::demo::Main(false /*use_gpu*/, false /*use_tensorrt*/);
90+
paddle::demo::Main(false /*use_gpu*/);
16891
}
16992
return 0;
17093
}

0 commit comments

Comments
 (0)