@@ -130,9 +130,14 @@ void ListenAndServOp::RunSyncLoop(
130
130
131
131
int32_t profile_step = 0 ;
132
132
while (true ) {
133
- if (FLAGS_listen_and_serv_profile_period > 0 && profile_step == 0 ) {
134
- auto pf_state = paddle::platform::ProfilerState::kCPU ;
135
- paddle::platform::EnableProfiler (pf_state);
133
+ PADDLE_ENFORCE_LE (profile_step, FLAGS_listen_and_serv_profile_period,
134
+ " profile_step should not be larger then "
135
+ " FLAGS_listen_and_serv_profile_period" );
136
+ if (FLAGS_listen_and_serv_profile_period > 0 ) {
137
+ if (profile_step == 0 ) {
138
+ auto pf_state = paddle::platform::ProfilerState::kCPU ;
139
+ paddle::platform::EnableProfiler (pf_state);
140
+ }
136
141
}
137
142
// Get from multiple trainers, we don't care about the order in which
138
143
// the gradients arrives, just add suffix 0~n and merge the gradient.
@@ -175,13 +180,14 @@ void ListenAndServOp::RunSyncLoop(
175
180
// reset received sparse vars to avoid reuse it in the next mini-batch
176
181
dynamic_cast <distributed::RequestSendHandler *>(request_send_handler_.get ())
177
182
->ResetSparseVarRecorder ();
178
- if (FLAGS_listen_and_serv_profile_period > 0 &&
179
- profile_step == FLAGS_listen_and_serv_profile_period) {
180
- paddle::platform::DisableProfiler (
181
- paddle::platform::EventSortingKey::kTotal , " /dev/null" );
182
- profile_step = 0 ;
183
- } else {
184
- profile_step++;
183
+ if (FLAGS_listen_and_serv_profile_period > 0 ) {
184
+ if (profile_step == FLAGS_listen_and_serv_profile_period) {
185
+ paddle::platform::DisableProfiler (
186
+ paddle::platform::EventSortingKey::kTotal , " /dev/null" );
187
+ profile_step = 0 ;
188
+ } else {
189
+ profile_step++;
190
+ }
185
191
}
186
192
} // while(true)
187
193
}
0 commit comments