@@ -18,26 +18,25 @@ limitations under the License. */
18
18
19
19
#include < gflags/gflags.h>
20
20
#include < glog/logging.h> // use glog instead of PADDLE_ENFORCE to avoid importing other paddle header files.
21
- #include < gtest/gtest.h>
22
21
#include < fstream>
23
22
#include < iostream>
24
- #include " paddle/contrib/inference/demo/utils .h"
25
- #include " paddle/contrib/inference/paddle_inference_api .h"
23
+ #include " paddle/fluid/platform/enforce .h"
24
+ #include " utils .h"
26
25
27
26
#ifdef PADDLE_WITH_CUDA
28
27
DECLARE_double (fraction_of_gpu_memory_to_use);
29
28
#endif
30
-
31
- namespace paddle {
32
- namespace demo {
33
-
34
29
DEFINE_string (modeldir, " " , " Directory of the inference model." );
35
30
DEFINE_string (refer, " " , " path to reference result for comparison." );
36
31
DEFINE_string (
37
32
data,
38
33
" " ,
39
34
" path of data; each line is a record, format is "
40
35
" '<space splitted floats as data>\t <space splitted ints as shape'" );
36
+ DEFINE_bool (use_gpu, false , " Whether use gpu." );
37
+
38
+ namespace paddle {
39
+ namespace demo {
41
40
42
41
struct Record {
43
42
std::vector<float > data;
@@ -47,7 +46,7 @@ struct Record {
47
46
void split (const std::string& str, char sep, std::vector<std::string>* pieces);
48
47
49
48
Record ProcessALine (const std::string& line) {
50
- LOG (INFO ) << " process a line" ;
49
+ VLOG ( 3 ) << " process a line" ;
51
50
std::vector<std::string> columns;
52
51
split (line, ' \t ' , &columns);
53
52
CHECK_EQ (columns.size (), 2UL )
@@ -65,8 +64,8 @@ Record ProcessALine(const std::string& line) {
65
64
for (auto & s : shape_strs) {
66
65
record.shape .push_back (std::stoi (s));
67
66
}
68
- LOG (INFO ) << " data size " << record.data .size ();
69
- LOG (INFO ) << " data shape size " << record.shape .size ();
67
+ VLOG ( 3 ) << " data size " << record.data .size ();
68
+ VLOG ( 3 ) << " data shape size " << record.shape .size ();
70
69
return record;
71
70
}
72
71
@@ -78,20 +77,22 @@ void CheckOutput(const std::string& referfile, const PaddleTensor& output) {
78
77
file.close ();
79
78
80
79
size_t numel = output.data .length () / PaddleDtypeSize (output.dtype );
81
- LOG (INFO ) << " predictor output numel " << numel;
82
- LOG (INFO ) << " reference output numel " << refer.data .size ();
83
- EXPECT_EQ (numel, refer.data .size ());
80
+ VLOG ( 3 ) << " predictor output numel " << numel;
81
+ VLOG ( 3 ) << " reference output numel " << refer.data .size ();
82
+ PADDLE_ENFORCE_EQ (numel, refer.data .size ());
84
83
switch (output.dtype ) {
85
84
case PaddleDType::INT64: {
86
85
for (size_t i = 0 ; i < numel; ++i) {
87
- EXPECT_EQ (static_cast <int64_t *>(output.data .data ())[i], refer.data [i]);
86
+ PADDLE_ENFORCE_EQ (static_cast <int64_t *>(output.data .data ())[i],
87
+ refer.data [i]);
88
88
}
89
89
break ;
90
90
}
91
91
case PaddleDType::FLOAT32:
92
92
for (size_t i = 0 ; i < numel; ++i) {
93
- EXPECT_NEAR (
94
- static_cast <float *>(output.data .data ())[i], refer.data [i], 1e-5 );
93
+ PADDLE_ENFORCE_LT (
94
+ fabs (static_cast <float *>(output.data .data ())[i] - refer.data [i]),
95
+ 1e-5 );
95
96
}
96
97
break ;
97
98
}
@@ -106,15 +107,15 @@ void Main(bool use_gpu) {
106
107
config.prog_file = FLAGS_modeldir + " /__model__" ;
107
108
config.use_gpu = use_gpu;
108
109
config.device = 0 ;
109
- # ifdef PADDLE_WITH_CUDA
110
- config.fraction_of_gpu_memory = FLAGS_fraction_of_gpu_memory_to_use;
111
- # endif
110
+ if (FLAGS_use_gpu) {
111
+ config.fraction_of_gpu_memory = 0.1 ; // set by yourself
112
+ }
112
113
113
- LOG (INFO ) << " init predictor" ;
114
+ VLOG ( 3 ) << " init predictor" ;
114
115
auto predictor =
115
116
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative >(config);
116
117
117
- LOG (INFO ) << " begin to process data" ;
118
+ VLOG ( 3 ) << " begin to process data" ;
118
119
// Just a single batch of data.
119
120
std::string line;
120
121
std::ifstream file (FLAGS_data);
@@ -129,21 +130,26 @@ void Main(bool use_gpu) {
129
130
.data = PaddleBuf (record.data .data (), record.data .size () * sizeof (float )),
130
131
.dtype = PaddleDType::FLOAT32};
131
132
132
- LOG (INFO ) << " run executor" ;
133
+ VLOG ( 3 ) << " run executor" ;
133
134
std::vector<PaddleTensor> output;
134
135
predictor->Run ({input}, &output);
135
136
136
- LOG (INFO ) << " output.size " << output.size ();
137
+ VLOG ( 3 ) << " output.size " << output.size ();
137
138
auto & tensor = output.front ();
138
- LOG (INFO ) << " output: " << SummaryTensor (tensor);
139
+ VLOG ( 3 ) << " output: " << SummaryTensor (tensor);
139
140
140
141
// compare with reference result
141
142
CheckOutput (FLAGS_refer, tensor);
142
143
}
143
144
144
- TEST (demo, vis_demo_cpu) { Main (false /* use_gpu*/ ); }
145
- #ifdef PADDLE_WITH_CUDA
146
- TEST (demo, vis_demo_gpu) { Main (true /* use_gpu*/ ); }
147
- #endif
148
145
} // namespace demo
149
146
} // namespace paddle
147
+
148
+ int main (int argc, char ** argv) {
149
+ google::ParseCommandLineFlags (&argc, &argv, true );
150
+ paddle::demo::Main (false /* use_gpu*/ );
151
+ if (FLAGS_use_gpu) {
152
+ paddle::demo::Main (true /* use_gpu*/ );
153
+ }
154
+ return 0 ;
155
+ }
0 commit comments