@@ -56,9 +56,10 @@ NativeConfig GetConfig() {
56
56
return config;
57
57
}
58
58
59
- TEST (paddle_inference_api_impl, word2vec ) {
59
+ void MainWord2Vec ( bool use_gpu ) {
60
60
NativeConfig config = GetConfig ();
61
61
auto predictor = CreatePaddlePredictor<NativeConfig>(config);
62
+ config.use_gpu = use_gpu;
62
63
63
64
framework::LoDTensor first_word, second_word, third_word, fourth_word;
64
65
framework::LoD lod{{0 , 1 }};
@@ -106,11 +107,12 @@ TEST(paddle_inference_api_impl, word2vec) {
106
107
free (outputs[0 ].data .data );
107
108
}
108
109
109
- TEST (paddle_inference_api_impl, image_classification ) {
110
+ void MainImageClassification ( bool use_gpu ) {
110
111
int batch_size = 2 ;
111
112
bool use_mkldnn = false ;
112
113
bool repeat = false ;
113
114
NativeConfig config = GetConfig ();
115
+ config.use_gpu = use_gpu;
114
116
config.model_dir =
115
117
FLAGS_dirname + " image_classification_resnet.inference.model" ;
116
118
@@ -155,9 +157,9 @@ TEST(paddle_inference_api_impl, image_classification) {
155
157
free (data);
156
158
}
157
159
158
- TEST (paddle_inference_api_native_multithreads, word2vec ) {
160
+ void MainThreadsWord2Vec ( bool use_gpu ) {
159
161
NativeConfig config = GetConfig ();
160
- config.use_gpu = false ;
162
+ config.use_gpu = use_gpu ;
161
163
auto main_predictor = CreatePaddlePredictor<NativeConfig>(config);
162
164
163
165
// prepare inputs data and reference results
@@ -216,11 +218,11 @@ TEST(paddle_inference_api_native_multithreads, word2vec) {
216
218
}
217
219
}
218
220
219
- TEST (paddle_inference_api_native_multithreads, image_classification ) {
221
+ void MainThreadsImageClassification ( bool use_gpu ) {
220
222
constexpr int num_jobs = 4 ; // each job run 1 batch
221
223
constexpr int batch_size = 1 ;
222
224
NativeConfig config = GetConfig ();
223
- config.use_gpu = false ;
225
+ config.use_gpu = use_gpu ;
224
226
config.model_dir =
225
227
FLAGS_dirname + " image_classification_resnet.inference.model" ;
226
228
@@ -269,4 +271,29 @@ TEST(paddle_inference_api_native_multithreads, image_classification) {
269
271
}
270
272
}
271
273
274
+ TEST (inference_api_native, word2vec_cpu) { MainWord2Vec (false /* use_gpu*/ ); }
275
+ TEST (inference_api_native, word2vec_cpu_threads) {
276
+ MainThreadsWord2Vec (false /* use_gpu*/ );
277
+ }
278
+ TEST (inference_api_native, image_classification_cpu) {
279
+ MainThreadsImageClassification (false /* use_gpu*/ );
280
+ }
281
+ TEST (inference_api_native, image_classification_cpu_threads) {
282
+ MainThreadsImageClassification (false /* use_gpu*/ );
283
+ }
284
+
285
+ #ifdef PADDLE_WITH_CUDA
286
+ TEST (inference_api_native, word2vec_gpu) { MainWord2Vec (true /* use_gpu*/ ); }
287
+ TEST (inference_api_native, word2vec_gpu_threads) {
288
+ MainThreadsWord2Vec (true /* use_gpu*/ );
289
+ }
290
+ TEST (inference_api_native, image_classification_gpu) {
291
+ MainThreadsImageClassification (true /* use_gpu*/ );
292
+ }
293
+ TEST (inference_api_native, image_classification_gpu_threads) {
294
+ MainThreadsImageClassification (true /* use_gpu*/ );
295
+ }
296
+
297
+ #endif
298
+
272
299
} // namespace paddle
0 commit comments