Skip to content

Commit 8140485

Browse files
authored
[Cherry-Pick] logclean & embedding doc (#32009)
* fix en doc for emb (#31980) * fix en doc for emb, test=document_fix; Change-Id: I4757e67caacd7189f068493ed45a7445f87ffb40 * LOG CLEAN (#31819) * upgrade vlog * train from dataset fetch optimize
1 parent e7542a4 commit 8140485

File tree

19 files changed

+104
-70
lines changed

19 files changed

+104
-70
lines changed

cmake/external/brpc.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ ExternalProject_Add(
4141
${EXTERNAL_PROJECT_LOG_ARGS}
4242
# TODO(gongwb): change to de newst repo when they changed.
4343
GIT_REPOSITORY "https://github.com/wangjiawei04/brpc"
44-
GIT_TAG "6d79e0b17f25107c35b705ea58d888083f59ff47"
44+
GIT_TAG "e203afb794caf027da0f1e0776443e7d20c0c28e"
4545
PREFIX ${BRPC_SOURCES_DIR}
4646
UPDATE_COMMAND ""
4747
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}

paddle/fluid/distributed/service/brpc_ps_server.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,8 @@ uint64_t BrpcPsServer::start(const std::string &ip, uint32_t port) {
5757
std::unique_lock<std::mutex> lock(mutex_);
5858

5959
std::string ip_port = ip + ":" + std::to_string(port);
60-
VLOG(3) << "server of rank " << _rank << " starts at " << ip_port;
60+
VLOG(0) << "running server with rank id: " << _rank
61+
<< ", endpoint: " << ip_port;
6162
brpc::ServerOptions options;
6263

6364
int num_threads = std::thread::hardware_concurrency();
@@ -535,7 +536,7 @@ int32_t BrpcPsService::stop_server(Table *table,
535536
auto *p_server = _server;
536537
std::thread t_stop([p_server]() {
537538
p_server->stop();
538-
LOG(INFO) << "Server Stoped";
539+
VLOG(3) << "Server Stoped";
539540
});
540541
t_stop.detach();
541542
return 0;

paddle/fluid/distributed/service/brpc_utils.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -331,7 +331,7 @@ std::string GetIntTypeEndpoint(const std::string& ip, const uint32_t& port) {
331331

332332
while (hp->h_addr_list[i] != NULL) {
333333
int_ip = inet_ntoa(*(struct in_addr*)hp->h_addr_list[i]);
334-
VLOG(0) << "Brpc Get host by name, host:" << ip << " -> ip: " << int_ip;
334+
VLOG(3) << "Brpc Get host by name, host:" << ip << " -> ip: " << int_ip;
335335
break;
336336
}
337337

paddle/fluid/distributed/service/env.h

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ struct PSHost {
3939

4040
// |---ip---|---port---|--rank--|
4141
// |-32bit--|--20bit---|--12bit-|
42-
// for pslib
42+
4343
uint64_t serialize_to_uint64() {
4444
uint64_t host_label = 0;
4545
host_label = inet_addr(ip.c_str());
@@ -174,14 +174,12 @@ class PSEnvironment {
174174
host.ip = ip;
175175
host.port = port;
176176
host.rank = rank;
177-
if (sign_set.count(rank) > 0) {
178-
LOG(WARNING) << "ps-host :" << host.ip << ":" << host.port
179-
<< ", rank:" << host.rank
180-
<< " already register, ignore register";
181-
} else {
177+
178+
if (sign_set.count(rank) == 0) {
182179
host_list.push_back(host);
183180
sign_set.insert(rank);
184181
}
182+
185183
return 0;
186184
}
187185

paddle/fluid/distributed/service/ps_client.cc

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,7 @@ PSClient *PSClientFactory::create(const PSParameter &ps_config) {
8080
}
8181

8282
TableManager::instance().initialize();
83-
LOG(INFO) << "Create PSClient[" << service_param.client_class()
84-
<< "] success";
83+
VLOG(3) << "Create PSClient[" << service_param.client_class() << "] success";
8584
return client;
8685
}
8786
} // namespace distributed

paddle/fluid/distributed/service/service.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ paddle::distributed::PSParameter load_from_prototxt(
4747
}
4848

4949
void PSCore::init_gflag(const std::string& gflags) {
50-
LOG(INFO) << "Init With Gflags:" << gflags;
50+
VLOG(3) << "Init With Gflags:" << gflags;
5151
std::vector<std::string> flags = paddle::string::split_string(gflags);
5252
if (flags.size() < 1) {
5353
flags.push_back("-max_body_size=314217728");

paddle/fluid/distributed/table/depends/dense.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,6 @@ class DSGD : public DenseOptimizer {
8989

9090
auto blas = GetBlas<float>();
9191
float lr = *(global_learning_rate_) * (*learning_rate);
92-
VLOG(4) << "DSGD LearningRate: " << lr;
9392
blas.VCOPY(update_numel, update_values + begin, grads.data());
9493
blas.SCAL(update_numel, lr, grads.data());
9594
blas.VSUB(update_numel, param + begin, grads.data(), param + begin);
@@ -157,7 +156,6 @@ class DAdam : public DenseOptimizer {
157156
beta2_pow[0] = beta2_pow[0] * beta2;
158157

159158
float lr_ = *(global_learning_rate_)*learning_rate[0];
160-
VLOG(4) << "DAdam LearningRate: " << lr_;
161159
lr_ *= sqrt(1 - beta2_pow[0]) / (1 - beta1_pow[0]);
162160

163161
float* tmp_ = tmp.data();

paddle/fluid/distributed/table/depends/sparse.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,6 @@ class SSGD : public SparseOptimizer {
110110
auto* value = block->Get(id);
111111

112112
float learning_rate = *(global_learning_rate_) * (value + lr_offset)[0];
113-
VLOG(4) << "SSGD LearningRate: " << learning_rate;
114113
float* param = value + param_offset;
115114

116115
std::vector<float> grads;
@@ -166,7 +165,6 @@ class SAdam : public SparseOptimizer {
166165
if (!block->GetEntry(id)) continue;
167166
auto* values = block->Get(id);
168167
float lr_ = *(global_learning_rate_) * (values + lr_offset)[0];
169-
VLOG(4) << "SAdam LearningRate: " << lr_;
170168
float* param = values + param_offset;
171169
float* moment1 = values + m1_offset;
172170
float* moment2 = values + m2_offset;

paddle/fluid/framework/details/build_strategy.cc

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -167,9 +167,6 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder {
167167
AppendPassWithCheck(strategy_.fuse_bn_add_act_ops_, "fuse_bn_add_act_pass");
168168
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) && !defined(__APPLE__)
169169
AppendPassWithCheck(strategy_.enable_auto_fusion_, "fusion_group_pass");
170-
#else
171-
LOG(WARNING) << "fusion_group is not enabled for Windows/MacOS now, and "
172-
"only effective when running with CUDA GPU.";
173170
#endif
174171
AppendPassWithCheck(strategy_.fuse_elewise_add_act_ops_,
175172
"fuse_elewise_add_act_pass");
@@ -271,12 +268,11 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder {
271268
if (FLAGS_use_mkldnn) {
272269
AppendPass(pass_name);
273270
} else if (!strategy_.mkldnn_enabled_op_types_.empty()) {
274-
LOG(WARNING)
275-
<< "mkldnn_enabled_op_types specify the operator type list to "
276-
"use MKLDNN acceleration. It is null in default, means "
277-
"that all the operators supported by MKLDNN will be "
278-
"accelerated. And it should not be set when "
279-
"FLAGS_use_mkldnn=false.";
271+
VLOG(1) << "mkldnn_enabled_op_types specify the operator type list to "
272+
"use MKLDNN acceleration. It is null in default, means "
273+
"that all the operators supported by MKLDNN will be "
274+
"accelerated. And it should not be set when "
275+
"FLAGS_use_mkldnn=false.";
280276
}
281277
#else
282278
PADDLE_ENFORCE_NE(FLAGS_use_mkldnn, true,
@@ -409,26 +405,26 @@ ir::Graph *BuildStrategy::Apply(ir::Graph *graph,
409405
<< ", num_trainers:" << num_trainers_;
410406
} else if (pass->Type() == "fuse_relu_depthwise_conv_pass") {
411407
if (use_device != p::kCUDA) {
412-
LOG(WARNING) << "fuse_relu_depthwise_conv_pass is only supported on "
413-
"GPU, skipped.";
408+
VLOG(1) << "fuse_relu_depthwise_conv_pass is only supported on "
409+
"GPU, skipped.";
414410
continue;
415411
}
416412
} else if (pass->Type() == "fusion_group_pass") {
417413
pass->Set<bool>("use_gpu", new bool((use_device == p::kCUDA)));
418414
if (use_device != p::kCUDA) {
419-
LOG(WARNING) << "fusion_group_pass is only supported on GPU, skipped.";
415+
VLOG(1) << "fusion_group_pass is only supported on GPU, skipped.";
420416
continue;
421417
}
422418
} else if (pass->Type() == "fuse_bn_act_pass") {
423419
if (use_device != p::kCUDA) {
424-
LOG(WARNING) << "fuse_bn_act_pass is only supported on "
425-
"GPU, skipped.";
420+
VLOG(1) << "fuse_bn_act_pass is only supported on "
421+
"GPU, skipped.";
426422
continue;
427423
}
428424
} else if (pass->Type() == "fuse_bn_add_act_pass") {
429425
if (use_device != p::kCUDA) {
430-
LOG(WARNING) << "fuse_bn_add_act_pass is only supported on "
431-
"GPU, skipped.";
426+
VLOG(1) << "fuse_bn_add_act_pass is only supported on "
427+
"GPU, skipped.";
432428
continue;
433429
}
434430
} else if (pass->Type() == "mkldnn_placement_pass") {

paddle/fluid/framework/device_worker.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ class DeviceWorker {
202202
Scope* root_scope_ = nullptr;
203203
Scope* thread_scope_;
204204
paddle::platform::Place place_;
205-
int64_t batch_num_;
205+
int64_t batch_num_ = 0;
206206
FetchConfig fetch_config_;
207207
bool use_cvm_;
208208
bool no_cvm_;

0 commit comments

Comments
 (0)