@@ -56,13 +56,6 @@ DECLARE_int32(paddle_num_threads);
56
56
namespace paddle {
57
57
namespace inference {
58
58
59
- float Random (float low, float high) {
60
- static std::random_device rd;
61
- static std::mt19937 mt (rd ());
62
- std::uniform_real_distribution<double > dist (low, high);
63
- return dist (mt);
64
- }
65
-
66
59
void PrintConfig (const PaddlePredictor::Config *config, bool use_analysis) {
67
60
const auto *analysis_config =
68
61
reinterpret_cast <const AnalysisConfig *>(config);
@@ -146,7 +139,8 @@ void SetFakeImageInput(std::vector<std::vector<PaddleTensor>> *inputs,
146
139
const std::string &dirname, bool is_combined = true ,
147
140
std::string model_filename = " model" ,
148
141
std::string params_filename = " params" ,
149
- const std::vector<std::string> *feed_names = nullptr ) {
142
+ const std::vector<std::string> *feed_names = nullptr ,
143
+ const int continuous_inuput_index = 0 ) {
150
144
// Set fake_image_data
151
145
PADDLE_ENFORCE_EQ (FLAGS_test_all_data, 0 , " Only have single batch of data." );
152
146
std::vector<std::vector<int64_t >> feed_target_shapes = GetFeedTargetShapes (
@@ -183,7 +177,8 @@ void SetFakeImageInput(std::vector<std::vector<PaddleTensor>> *inputs,
183
177
float *input_data = static_cast <float *>(input.data .data ());
184
178
// fill input data, for profile easily, do not use random data here.
185
179
for (size_t j = 0 ; j < len; ++j) {
186
- *(input_data + j) = Random (0.0 , 1.0 ) / 10 .;
180
+ *(input_data + j) =
181
+ static_cast <float >((j + continuous_inuput_index) % len) / len;
187
182
}
188
183
}
189
184
(*inputs).emplace_back (input_slots);
0 commit comments