Skip to content

Commit 6551790

Browse files
authored
AnalysisConfig remove contrib namespace (#15540)
1 parent 7bc8481 commit 6551790

24 files changed

+78
-97
lines changed

paddle/fluid/inference/analysis/argument.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ struct Argument {
132132
DECL_ARGUMENT_FIELD(tensorrt_workspace_size, TensorRtWorkspaceSize, int);
133133
DECL_ARGUMENT_FIELD(tensorrt_min_subgraph_size, TensorRtMinSubgraphSize, int);
134134
DECL_ARGUMENT_FIELD(tensorrt_precision_mode, TensorRtPrecisionMode,
135-
contrib::AnalysisConfig::Precision);
135+
AnalysisConfig::Precision);
136136

137137
// Memory optimized related.
138138
DECL_ARGUMENT_FIELD(enable_memory_optim, EnableMemoryOptim, bool);

paddle/fluid/inference/analysis/helper.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ limitations under the License. */
3232
#ifdef _WIN32
3333
#include <direct.h>
3434
#include <io.h>
35-
#define GCC_ATTRIBUTE(attr__) ;
35+
#define GCC_ATTRIBUTE(attr__)
3636
#define MKDIR(path) _mkdir(path)
3737
#else
3838
#include <unistd.h>

paddle/fluid/inference/analysis/ir_pass_manager.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ void IRPassManager::CreatePasses(Argument *argument,
7171
new framework::ProgramDesc *(&argument->main_program()));
7272

7373
bool enable_int8 = argument->tensorrt_precision_mode() ==
74-
contrib::AnalysisConfig::Precision::kInt8;
74+
AnalysisConfig::Precision::kInt8;
7575

7676
pass->Set("enable_int8", new bool(enable_int8));
7777
std::string model_opt_cache_dir =

paddle/fluid/inference/api/analysis_config.cc

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222

2323
namespace paddle {
2424

25-
PassStrategy *contrib::AnalysisConfig::pass_builder() const {
25+
PassStrategy *AnalysisConfig::pass_builder() const {
2626
if (!pass_builder_.get()) {
2727
if (use_gpu_) {
2828
LOG(INFO) << "Create GPU IR passes";
@@ -42,27 +42,27 @@ PassStrategy *contrib::AnalysisConfig::pass_builder() const {
4242
return pass_builder_.get();
4343
}
4444

45-
contrib::AnalysisConfig::AnalysisConfig(const std::string &model_dir) {
45+
AnalysisConfig::AnalysisConfig(const std::string &model_dir) {
4646
model_dir_ = model_dir;
4747

4848
Update();
4949
}
50-
contrib::AnalysisConfig::AnalysisConfig(const std::string &prog_file,
51-
const std::string &params_file) {
50+
AnalysisConfig::AnalysisConfig(const std::string &prog_file,
51+
const std::string &params_file) {
5252
prog_file_ = prog_file;
5353
params_file_ = params_file;
5454

5555
Update();
5656
}
57-
void contrib::AnalysisConfig::SetModel(const std::string &prog_file_path,
58-
const std::string &params_file_path) {
57+
void AnalysisConfig::SetModel(const std::string &prog_file_path,
58+
const std::string &params_file_path) {
5959
prog_file_ = prog_file_path;
6060
params_file_ = params_file_path;
6161

6262
Update();
6363
}
64-
void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
65-
int device_id) {
64+
void AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
65+
int device_id) {
6666
#ifdef PADDLE_WITH_CUDA
6767
use_gpu_ = true;
6868
memory_pool_init_size_mb_ = memory_pool_init_size_mb;
@@ -74,13 +74,13 @@ void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
7474

7575
Update();
7676
}
77-
void contrib::AnalysisConfig::DisableGpu() {
77+
void AnalysisConfig::DisableGpu() {
7878
use_gpu_ = false;
7979

8080
Update();
8181
}
8282

83-
contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) {
83+
AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
8484
#define CP_MEMBER(member__) member__ = other.member__;
8585

8686
// Model related.
@@ -130,7 +130,7 @@ contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) {
130130
Update();
131131
}
132132

133-
void contrib::AnalysisConfig::EnableMKLDNN() {
133+
void AnalysisConfig::EnableMKLDNN() {
134134
#ifdef PADDLE_WITH_MKLDNN
135135
pass_builder()->EnableMKLDNN();
136136
use_mkldnn_ = true;
@@ -142,9 +142,9 @@ void contrib::AnalysisConfig::EnableMKLDNN() {
142142
Update();
143143
}
144144

145-
void contrib::AnalysisConfig::EnableTensorRtEngine(
145+
void AnalysisConfig::EnableTensorRtEngine(
146146
int workspace_size, int max_batch_size, int min_subgraph_size,
147-
contrib::AnalysisConfig::Precision precision_mode) {
147+
AnalysisConfig::Precision precision_mode) {
148148
#ifdef PADDLE_WITH_CUDA
149149
if (!use_gpu()) {
150150
LOG(ERROR) << "To use TensorRT engine, please call EnableGpu() first";
@@ -165,7 +165,7 @@ void contrib::AnalysisConfig::EnableTensorRtEngine(
165165
}
166166

167167
// TODO(Superjomn) refactor this, buggy.
168-
void contrib::AnalysisConfig::Update() {
168+
void AnalysisConfig::Update() {
169169
auto info = SerializeInfoCache();
170170
if (info == serialized_info_cache_) return;
171171

@@ -225,7 +225,7 @@ void contrib::AnalysisConfig::Update() {
225225
}
226226
}
227227

228-
std::string contrib::AnalysisConfig::SerializeInfoCache() {
228+
std::string AnalysisConfig::SerializeInfoCache() {
229229
std::stringstream ss;
230230
ss << model_dir_;
231231
ss << prog_file_;
@@ -260,14 +260,14 @@ std::string contrib::AnalysisConfig::SerializeInfoCache() {
260260
return ss.str();
261261
}
262262

263-
void contrib::AnalysisConfig::SetCpuMathLibraryNumThreads(
263+
void AnalysisConfig::SetCpuMathLibraryNumThreads(
264264
int cpu_math_library_num_threads) {
265265
cpu_math_library_num_threads_ = cpu_math_library_num_threads;
266266

267267
Update();
268268
}
269269

270-
float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
270+
float AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
271271
#ifdef PADDLE_WITH_CUDA
272272
// Get the GPU memory details and calculate the fraction of memory for the
273273
// GPU memory pool.
@@ -282,31 +282,31 @@ float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
282282
#endif
283283
}
284284

285-
void contrib::AnalysisConfig::EnableMemoryOptim(
286-
bool static_optim, bool force_update_static_cache) {
285+
void AnalysisConfig::EnableMemoryOptim(bool static_optim,
286+
bool force_update_static_cache) {
287287
enable_memory_optim_ = true;
288288
static_memory_optim_ = static_optim;
289289
static_memory_optim_force_update_ = force_update_static_cache;
290290

291291
Update();
292292
}
293293

294-
bool contrib::AnalysisConfig::enable_memory_optim() const {
294+
bool AnalysisConfig::enable_memory_optim() const {
295295
return enable_memory_optim_;
296296
}
297297

298-
void contrib::AnalysisConfig::SetModelBuffer(const char *prog_buffer,
299-
size_t prog_buffer_size,
300-
const char *param_buffer,
301-
size_t param_buffer_size) {
298+
void AnalysisConfig::SetModelBuffer(const char *prog_buffer,
299+
size_t prog_buffer_size,
300+
const char *param_buffer,
301+
size_t param_buffer_size) {
302302
prog_file_ = std::string(prog_buffer, prog_buffer + prog_buffer_size);
303303
params_file_ = std::string(param_buffer, param_buffer + param_buffer_size);
304304
model_from_memory_ = true;
305305

306306
Update();
307307
}
308308

309-
NativeConfig contrib::AnalysisConfig::ToNativeConfig() const {
309+
NativeConfig AnalysisConfig::ToNativeConfig() const {
310310
NativeConfig config;
311311
config.model_dir = model_dir_;
312312
config.prog_file = prog_file_;

paddle/fluid/inference/api/analysis_predictor.cc

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@ DECLARE_bool(profile);
4747

4848
namespace paddle {
4949

50-
using contrib::AnalysisConfig;
5150
using inference::Singleton;
5251
#if PADDLE_WITH_TENSORRT
5352
using inference::tensorrt::TRTInt8Calibrator;
@@ -731,10 +730,10 @@ std::string AnalysisPredictor::GetSeriazlizedProgram() const {
731730
}
732731

733732
template <>
734-
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<contrib::AnalysisConfig>(
735-
const contrib::AnalysisConfig &config) {
736-
return CreatePaddlePredictor<contrib::AnalysisConfig,
737-
PaddleEngineKind::kAnalysis>(config);
733+
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<AnalysisConfig>(
734+
const AnalysisConfig &config) {
735+
return CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
736+
config);
738737
}
739738

740739
} // namespace paddle

paddle/fluid/inference/api/analysis_predictor.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ using inference::analysis::Argument;
3333
using inference::analysis::Analyzer;
3434
using framework::proto::ProgramDesc;
3535
using framework::NaiveExecutor;
36-
using contrib::AnalysisConfig;
3736

3837
/** \brief This predictor is based on the original native predictor with IR and
3938
* Analysis support.
@@ -123,7 +122,7 @@ class AnalysisPredictor : public PaddlePredictor {
123122
#endif
124123

125124
private:
126-
contrib::AnalysisConfig config_;
125+
AnalysisConfig config_;
127126
Argument argument_;
128127
std::unique_ptr<NaiveExecutor> executor_;
129128
platform::Place place_;

paddle/fluid/inference/api/analysis_predictor_tester.cc

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
DEFINE_string(dirname, "", "dirname to tests.");
2525

2626
namespace paddle {
27-
using contrib::AnalysisConfig;
2827

2928
TEST(AnalysisPredictor, analysis_off) {
3029
AnalysisConfig config;

paddle/fluid/inference/api/api_impl_tester.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -295,7 +295,7 @@ TEST(inference_api_native, image_classification_gpu) {
295295
#endif
296296

297297
TEST(PassBuilder, Delete) {
298-
contrib::AnalysisConfig config;
298+
AnalysisConfig config;
299299
config.DisableGpu();
300300
config.pass_builder()->DeletePass("attention_lstm_fuse_pass");
301301
const auto& passes = config.pass_builder()->AllPasses();

paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ namespace demo {
3636
*/
3737
void Main() {
3838
std::unique_ptr<PaddlePredictor> predictor;
39-
paddle::contrib::AnalysisConfig config;
39+
paddle::AnalysisConfig config;
4040
config.EnableUseGpu(100, 0);
4141
config.SetModel(FLAGS_modeldir + "/__model__",
4242
FLAGS_modeldir + "/__params__");

paddle/fluid/inference/api/demo_ci/vis_demo.cc

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ DEFINE_bool(use_gpu, false, "Whether use gpu.");
3434
namespace paddle {
3535
namespace demo {
3636

37-
using contrib::AnalysisConfig;
3837
/*
3938
* Use the native and analysis fluid engine to inference the demo.
4039
*/

0 commit comments

Comments
 (0)