Skip to content

Commit f444a72

Browse files
committed
Merge branch 'develop' into clean_inference_lib
2 parents 41eeb77 + 3598500 commit f444a72

File tree

6 files changed

+9
-24
lines changed

6 files changed

+9
-24
lines changed

paddle/fluid/inference/analysis/analyzer_tester.cc

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,9 +51,7 @@ void TestWord2vecPrediction(const std::string& model_path) {
5151
config.model_dir = model_path;
5252
config.use_gpu = false;
5353
config.device = 0;
54-
auto predictor =
55-
::paddle::CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(
56-
config);
54+
auto predictor = ::paddle::CreatePaddlePredictor<NativeConfig>(config);
5755

5856
// One single batch
5957

paddle/fluid/inference/api/analysis_predictor_tester.cc

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,7 @@ TEST(AnalysisPredictor, ZeroCopy) {
2727
config.model_dir = FLAGS_dirname + "/word2vec.inference.model";
2828
config.use_feed_fetch_ops = false;
2929

30-
auto predictor =
31-
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
32-
config);
30+
auto predictor = CreatePaddlePredictor<AnalysisConfig>(config);
3331

3432
auto w0 = predictor->GetInputTensor("firstw");
3533
auto w1 = predictor->GetInputTensor("secondw");

paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,11 +41,8 @@ void CompareTensorRTWithFluid(bool enable_tensorrt) {
4141
config1.device = 0;
4242
config1.max_batch_size = 10;
4343

44-
auto predictor0 =
45-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config0);
46-
auto predictor1 =
47-
CreatePaddlePredictor<MixedRTConfig,
48-
PaddleEngineKind::kAutoMixedTensorRT>(config1);
44+
auto predictor0 = CreatePaddlePredictor<NativeConfig>(config0);
45+
auto predictor1 = CreatePaddlePredictor<MixedRTConfig>(config1);
4946

5047
for (int batch_id = 0; batch_id < 1; batch_id++) {
5148
//# 2. Prepare input.

paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -308,18 +308,14 @@ TEST(Analyzer_rnn1, ZeroCopy) {
308308
PaddlePlace place;
309309
int output_size{0};
310310

311-
auto predictor =
312-
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
313-
config);
311+
auto predictor = CreatePaddlePredictor<AnalysisConfig>(config);
314312

315313
config.use_feed_fetch_ops = true;
316314
auto native_predictor =
317315
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
318316

319317
config.use_feed_fetch_ops = true; // the analysis predictor needs feed/fetch.
320-
auto analysis_predictor =
321-
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
322-
config);
318+
auto analysis_predictor = CreatePaddlePredictor<AnalysisConfig>(config);
323319

324320
#define NEW_TENSOR(name__) \
325321
auto name__##_tensor = predictor->GetInputTensor(#name__);

paddle/fluid/inference/tests/api/tester_helper.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,7 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
7777
std::unique_ptr<PaddlePredictor> CreateTestPredictor(
7878
const AnalysisConfig &config, bool use_analysis = true) {
7979
if (use_analysis) {
80-
return CreatePaddlePredictor<contrib::AnalysisConfig,
81-
PaddleEngineKind::kAnalysis>(config);
80+
return CreatePaddlePredictor<contrib::AnalysisConfig>(config);
8281
} else {
8382
return CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(
8483
config);

paddle/fluid/inference/tests/api/trt_models_tester.cc

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -51,11 +51,8 @@ void CompareTensorRTWithFluid(int batch_size, std::string model_dirname) {
5151
config1.model_dir = model_dirname;
5252
config1.max_batch_size = batch_size;
5353

54-
auto predictor0 =
55-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config0);
56-
auto predictor1 =
57-
CreatePaddlePredictor<MixedRTConfig,
58-
PaddleEngineKind::kAutoMixedTensorRT>(config1);
54+
auto predictor0 = CreatePaddlePredictor<NativeConfig>(config0);
55+
auto predictor1 = CreatePaddlePredictor<MixedRTConfig>(config1);
5956
// Prepare inputs
6057
int height = 224;
6158
int width = 224;

0 commit comments

Comments
 (0)