Skip to content

Commit 746a62e

Browse files
committed
add gpu tests
1 parent 35e820d commit 746a62e

File tree

2 files changed

+39
-10
lines changed

2 files changed

+39
-10
lines changed

paddle/contrib/inference/demo/simple_on_word2vec.cc

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -66,12 +66,12 @@ void Main(bool use_gpu) {
6666
}
6767
}
6868

69-
void MainThreads(int num_threads) {
69+
void MainThreads(int num_threads, bool use_gpu) {
7070
// Multi-threads only support on CPU
7171
// 0. Create PaddlePredictor with a config.
7272
NativeConfig config;
7373
config.model_dir = FLAGS_dirname + "word2vec.inference.model";
74-
config.use_gpu = false;
74+
config.use_gpu = use_gpu;
7575
auto main_predictor =
7676
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
7777

@@ -113,11 +113,13 @@ void MainThreads(int num_threads) {
113113
}
114114

115115
TEST(demo, word2vec_cpu) { Main(false /*use_gpu*/); }
116-
TEST(demo_multi_threads, word2vec_cpu_1) { MainThreads(1); }
117-
TEST(demo_multi_threads, word2vec_cpu_4) { MainThreads(4); }
116+
TEST(demo_multi_threads, word2vec_cpu_1) { MainThreads(1, false /*use_gpu*/); }
117+
TEST(demo_multi_threads, word2vec_cpu_4) { MainThreads(4, false /*use_gpu*/); }
118118

119119
#ifdef PADDLE_WITH_CUDA
120120
TEST(demo, word2vec_gpu) { Main(true /*use_gpu*/); }
121+
TEST(demo_multi_threads, word2vec_gpu_1) { MainThreads(1, true /*use_gpu*/); }
122+
TEST(demo_multi_threads, word2vec_gpu_4) { MainThreads(4, true /*use_gpu*/); }
121123
#endif
122124

123125
} // namespace demo

paddle/contrib/inference/test_paddle_inference_api_impl.cc

Lines changed: 33 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -56,9 +56,10 @@ NativeConfig GetConfig() {
5656
return config;
5757
}
5858

59-
TEST(paddle_inference_api_impl, word2vec) {
59+
void MainWord2Vec(bool use_gpu) {
6060
NativeConfig config = GetConfig();
6161
auto predictor = CreatePaddlePredictor<NativeConfig>(config);
62+
config.use_gpu = use_gpu;
6263

6364
framework::LoDTensor first_word, second_word, third_word, fourth_word;
6465
framework::LoD lod{{0, 1}};
@@ -106,11 +107,12 @@ TEST(paddle_inference_api_impl, word2vec) {
106107
free(outputs[0].data.data);
107108
}
108109

109-
TEST(paddle_inference_api_impl, image_classification) {
110+
void MainImageClassification(bool use_gpu) {
110111
int batch_size = 2;
111112
bool use_mkldnn = false;
112113
bool repeat = false;
113114
NativeConfig config = GetConfig();
115+
config.use_gpu = use_gpu;
114116
config.model_dir =
115117
FLAGS_dirname + "image_classification_resnet.inference.model";
116118

@@ -155,9 +157,9 @@ TEST(paddle_inference_api_impl, image_classification) {
155157
free(data);
156158
}
157159

158-
TEST(paddle_inference_api_native_multithreads, word2vec) {
160+
void MainThreadsWord2Vec(bool use_gpu) {
159161
NativeConfig config = GetConfig();
160-
config.use_gpu = false;
162+
config.use_gpu = use_gpu;
161163
auto main_predictor = CreatePaddlePredictor<NativeConfig>(config);
162164

163165
// prepare inputs data and reference results
@@ -216,11 +218,11 @@ TEST(paddle_inference_api_native_multithreads, word2vec) {
216218
}
217219
}
218220

219-
TEST(paddle_inference_api_native_multithreads, image_classification) {
221+
void MainThreadsImageClassification(bool use_gpu) {
220222
constexpr int num_jobs = 4; // each job run 1 batch
221223
constexpr int batch_size = 1;
222224
NativeConfig config = GetConfig();
223-
config.use_gpu = false;
225+
config.use_gpu = use_gpu;
224226
config.model_dir =
225227
FLAGS_dirname + "image_classification_resnet.inference.model";
226228

@@ -269,4 +271,29 @@ TEST(paddle_inference_api_native_multithreads, image_classification) {
269271
}
270272
}
271273

274+
TEST(inference_api_native, word2vec_cpu) { MainWord2Vec(false /*use_gpu*/); }
275+
TEST(inference_api_native, word2vec_cpu_threads) {
276+
MainThreadsWord2Vec(false /*use_gpu*/);
277+
}
278+
TEST(inference_api_native, image_classification_cpu) {
279+
MainThreadsImageClassification(false /*use_gpu*/);
280+
}
281+
TEST(inference_api_native, image_classification_cpu_threads) {
282+
MainThreadsImageClassification(false /*use_gpu*/);
283+
}
284+
285+
#ifdef PADDLE_WITH_CUDA
286+
TEST(inference_api_native, word2vec_gpu) { MainWord2Vec(true /*use_gpu*/); }
287+
TEST(inference_api_native, word2vec_gpu_threads) {
288+
MainThreadsWord2Vec(true /*use_gpu*/);
289+
}
290+
TEST(inference_api_native, image_classification_gpu) {
291+
MainThreadsImageClassification(true /*use_gpu*/);
292+
}
293+
TEST(inference_api_native, image_classification_gpu_threads) {
294+
MainThreadsImageClassification(true /*use_gpu*/);
295+
}
296+
297+
#endif
298+
272299
} // namespace paddle

0 commit comments

Comments
 (0)