22
22
23
23
namespace paddle {
24
24
25
- PassStrategy *contrib:: AnalysisConfig::pass_builder () const {
25
+ PassStrategy *AnalysisConfig::pass_builder () const {
26
26
if (!pass_builder_.get ()) {
27
27
if (use_gpu_) {
28
28
LOG (INFO) << " Create GPU IR passes" ;
@@ -42,27 +42,27 @@ PassStrategy *contrib::AnalysisConfig::pass_builder() const {
42
42
return pass_builder_.get ();
43
43
}
44
44
45
- contrib:: AnalysisConfig::AnalysisConfig (const std::string &model_dir) {
45
+ AnalysisConfig::AnalysisConfig (const std::string &model_dir) {
46
46
model_dir_ = model_dir;
47
47
48
48
Update ();
49
49
}
50
- contrib:: AnalysisConfig::AnalysisConfig (const std::string &prog_file,
51
- const std::string ¶ms_file) {
50
+ AnalysisConfig::AnalysisConfig (const std::string &prog_file,
51
+ const std::string ¶ms_file) {
52
52
prog_file_ = prog_file;
53
53
params_file_ = params_file;
54
54
55
55
Update ();
56
56
}
57
- void contrib:: AnalysisConfig::SetModel (const std::string &prog_file_path,
58
- const std::string ¶ms_file_path) {
57
+ void AnalysisConfig::SetModel (const std::string &prog_file_path,
58
+ const std::string ¶ms_file_path) {
59
59
prog_file_ = prog_file_path;
60
60
params_file_ = params_file_path;
61
61
62
62
Update ();
63
63
}
64
- void contrib:: AnalysisConfig::EnableUseGpu (uint64_t memory_pool_init_size_mb,
65
- int device_id) {
64
+ void AnalysisConfig::EnableUseGpu (uint64_t memory_pool_init_size_mb,
65
+ int device_id) {
66
66
#ifdef PADDLE_WITH_CUDA
67
67
use_gpu_ = true ;
68
68
memory_pool_init_size_mb_ = memory_pool_init_size_mb;
@@ -74,13 +74,13 @@ void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb,
74
74
75
75
Update ();
76
76
}
77
- void contrib:: AnalysisConfig::DisableGpu () {
77
+ void AnalysisConfig::DisableGpu () {
78
78
use_gpu_ = false ;
79
79
80
80
Update ();
81
81
}
82
82
83
- contrib:: AnalysisConfig::AnalysisConfig (const contrib:: AnalysisConfig &other) {
83
+ AnalysisConfig::AnalysisConfig (const AnalysisConfig &other) {
84
84
#define CP_MEMBER (member__ ) member__ = other.member__;
85
85
86
86
// Model related.
@@ -130,7 +130,7 @@ contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) {
130
130
Update ();
131
131
}
132
132
133
- void contrib:: AnalysisConfig::EnableMKLDNN () {
133
+ void AnalysisConfig::EnableMKLDNN () {
134
134
#ifdef PADDLE_WITH_MKLDNN
135
135
pass_builder ()->EnableMKLDNN ();
136
136
use_mkldnn_ = true ;
@@ -142,9 +142,9 @@ void contrib::AnalysisConfig::EnableMKLDNN() {
142
142
Update ();
143
143
}
144
144
145
- void contrib:: AnalysisConfig::EnableTensorRtEngine (
145
+ void AnalysisConfig::EnableTensorRtEngine (
146
146
int workspace_size, int max_batch_size, int min_subgraph_size,
147
- contrib:: AnalysisConfig::Precision precision_mode) {
147
+ AnalysisConfig::Precision precision_mode) {
148
148
#ifdef PADDLE_WITH_CUDA
149
149
if (!use_gpu ()) {
150
150
LOG (ERROR) << " To use TensorRT engine, please call EnableGpu() first" ;
@@ -165,7 +165,7 @@ void contrib::AnalysisConfig::EnableTensorRtEngine(
165
165
}
166
166
167
167
// TODO(Superjomn) refactor this, buggy.
168
- void contrib:: AnalysisConfig::Update () {
168
+ void AnalysisConfig::Update () {
169
169
auto info = SerializeInfoCache ();
170
170
if (info == serialized_info_cache_) return ;
171
171
@@ -225,7 +225,7 @@ void contrib::AnalysisConfig::Update() {
225
225
}
226
226
}
227
227
228
- std::string contrib:: AnalysisConfig::SerializeInfoCache () {
228
+ std::string AnalysisConfig::SerializeInfoCache () {
229
229
std::stringstream ss;
230
230
ss << model_dir_;
231
231
ss << prog_file_;
@@ -260,14 +260,14 @@ std::string contrib::AnalysisConfig::SerializeInfoCache() {
260
260
return ss.str ();
261
261
}
262
262
263
- void contrib:: AnalysisConfig::SetCpuMathLibraryNumThreads (
263
+ void AnalysisConfig::SetCpuMathLibraryNumThreads (
264
264
int cpu_math_library_num_threads) {
265
265
cpu_math_library_num_threads_ = cpu_math_library_num_threads;
266
266
267
267
Update ();
268
268
}
269
269
270
- float contrib:: AnalysisConfig::fraction_of_gpu_memory_for_pool () const {
270
+ float AnalysisConfig::fraction_of_gpu_memory_for_pool () const {
271
271
#ifdef PADDLE_WITH_CUDA
272
272
// Get the GPU memory details and calculate the fraction of memory for the
273
273
// GPU memory pool.
@@ -282,31 +282,31 @@ float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const {
282
282
#endif
283
283
}
284
284
285
- void contrib:: AnalysisConfig::EnableMemoryOptim (
286
- bool static_optim, bool force_update_static_cache) {
285
+ void AnalysisConfig::EnableMemoryOptim (bool static_optim,
286
+ bool force_update_static_cache) {
287
287
enable_memory_optim_ = true ;
288
288
static_memory_optim_ = static_optim;
289
289
static_memory_optim_force_update_ = force_update_static_cache;
290
290
291
291
Update ();
292
292
}
293
293
294
- bool contrib:: AnalysisConfig::enable_memory_optim () const {
294
+ bool AnalysisConfig::enable_memory_optim () const {
295
295
return enable_memory_optim_;
296
296
}
297
297
298
- void contrib:: AnalysisConfig::SetModelBuffer (const char *prog_buffer,
299
- size_t prog_buffer_size,
300
- const char *param_buffer,
301
- size_t param_buffer_size) {
298
+ void AnalysisConfig::SetModelBuffer (const char *prog_buffer,
299
+ size_t prog_buffer_size,
300
+ const char *param_buffer,
301
+ size_t param_buffer_size) {
302
302
prog_file_ = std::string (prog_buffer, prog_buffer + prog_buffer_size);
303
303
params_file_ = std::string (param_buffer, param_buffer + param_buffer_size);
304
304
model_from_memory_ = true ;
305
305
306
306
Update ();
307
307
}
308
308
309
- NativeConfig contrib:: AnalysisConfig::ToNativeConfig () const {
309
+ NativeConfig AnalysisConfig::ToNativeConfig () const {
310
310
NativeConfig config;
311
311
config.model_dir = model_dir_;
312
312
config.prog_file = prog_file_;
0 commit comments