@@ -117,34 +117,6 @@ void GetOneBatch(std::vector<PaddleTensor> *input_slots, DataRecord *data,
117
117
input_slots->assign ({input_tensor});
118
118
}
119
119
120
- void BenchAllData (const std::string &model_path, const std::string &data_file,
121
- const int batch_size, const int repeat) {
122
- NativeConfig config;
123
- config.model_dir = model_path;
124
- config.use_gpu = false ;
125
- config.device = 0 ;
126
- config.specify_input_name = true ;
127
- std::vector<PaddleTensor> input_slots, outputs_slots;
128
- DataRecord data (data_file, batch_size);
129
- auto predictor =
130
- CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative >(config);
131
- GetOneBatch (&input_slots, &data, batch_size);
132
- for (int i = 0 ; i < FLAGS_burning; i++) {
133
- predictor->Run (input_slots, &outputs_slots);
134
- }
135
- Timer timer;
136
- double sum = 0 ;
137
- for (int i = 0 ; i < repeat; i++) {
138
- for (size_t bid = 0 ; bid < data.batched_datas .size (); ++bid) {
139
- GetOneBatch (&input_slots, &data, batch_size);
140
- timer.tic ();
141
- predictor->Run (input_slots, &outputs_slots);
142
- sum += timer.toc ();
143
- }
144
- }
145
- PrintTime (batch_size, repeat, 1 , 0 , sum / repeat);
146
- }
147
-
148
120
const int64_t lac_ref_data[] = {24 , 25 , 25 , 25 , 38 , 30 , 31 , 14 , 15 , 44 , 24 , 25 ,
149
121
25 , 25 , 25 , 25 , 44 , 24 , 25 , 25 , 25 , 36 , 42 , 43 ,
150
122
44 , 14 , 15 , 44 , 14 , 15 , 44 , 14 , 15 , 44 , 38 , 39 ,
0 commit comments