Skip to content

Commit aadaadf

Browse files
committed
replace use_event with use_cuda, because use_event means the program running with CUDA, so use_cuda maybe more intuitive.
1 parent 961fbce commit aadaadf

File tree

7 files changed

+12
-12
lines changed

7 files changed

+12
-12
lines changed

paddle/fluid/framework/details/execution_strategy.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ namespace details {
2020

2121
struct ExecutionStrategy {
2222
size_t num_threads_{0};
23-
bool use_event_{true};
23+
bool use_cuda_{true};
2424
bool allow_op_delay_{false};
2525
size_t num_iteration_per_drop_scope_{100};
2626
};

paddle/fluid/framework/details/op_handle_base.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,9 +39,9 @@ OpHandleBase::~OpHandleBase() {
3939
#endif
4040
}
4141

42-
void OpHandleBase::Run(bool use_event) {
42+
void OpHandleBase::Run(bool use_cuda) {
4343
#ifdef PADDLE_WITH_CUDA
44-
if (events_.empty() && use_event) {
44+
if (events_.empty() && use_cuda) {
4545
for (auto &p : dev_ctxes_) {
4646
int dev_id = boost::get<platform::CUDAPlace>(p.first).device;
4747
PADDLE_ENFORCE(cudaSetDevice(dev_id));
@@ -50,7 +50,7 @@ void OpHandleBase::Run(bool use_event) {
5050
}
5151
}
5252
#else
53-
PADDLE_ENFORCE(!use_event);
53+
PADDLE_ENFORCE(!use_cuda);
5454
#endif
5555

5656
RunImpl();

paddle/fluid/framework/details/op_handle_base.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ class OpHandleBase {
3636

3737
virtual std::string Name() const = 0;
3838

39-
void Run(bool use_event);
39+
void Run(bool use_cuda);
4040

4141
virtual void RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx);
4242

paddle/fluid/framework/details/threaded_ssa_graph_executor.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ void ThreadedSSAGraphExecutor::RunOp(
192192
if (VLOG_IS_ON(10)) {
193193
VLOG(10) << op << " " << op->Name() << " : " << op->DebugString();
194194
}
195-
op->Run(strategy_.use_event_);
195+
op->Run(strategy_.use_cuda_);
196196
VLOG(10) << op << " " << op->Name() << " Done ";
197197
running_ops_--;
198198
ready_var_q->Extend(op->Outputs());

paddle/fluid/framework/parallel_executor.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ ParallelExecutor::ParallelExecutor(
6161
size_t num_trainers, size_t trainer_id)
6262
: member_(new ParallelExecutorPrivate(places)) {
6363
member_->global_scope_ = scope;
64-
member_->use_cuda_ = exec_strategy.use_event_;
64+
member_->use_cuda_ = exec_strategy.use_cuda_;
6565

6666
// Step 1. Bcast the params to devs.
6767
// Create local scopes

paddle/fluid/pybind/pybind.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -509,10 +509,10 @@ All parameter, weight, gradient are variables in Paddle.
509509
self.num_threads_ = num_threads;
510510
})
511511
.def_property(
512-
"use_event",
513-
[](const ExecutionStrategy &self) { return self.use_event_; },
514-
[](ExecutionStrategy &self, bool use_event) {
515-
self.use_event_ = use_event;
512+
"use_cuda",
513+
[](const ExecutionStrategy &self) { return self.use_cuda_; },
514+
[](ExecutionStrategy &self, bool use_cuda) {
515+
self.use_cuda_ = use_cuda;
516516
})
517517
.def_property(
518518
"allow_op_delay",

python/paddle/fluid/parallel_executor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def __init__(self,
113113

114114
if exec_strategy is None:
115115
exec_strategy = ExecutionStrategy()
116-
exec_strategy.use_event = use_cuda
116+
exec_strategy.use_cuda = use_cuda
117117

118118
if exec_strategy.num_threads == 0:
119119
if use_cuda:

0 commit comments

Comments
 (0)